in_source_id
stringlengths
13
58
issue
stringlengths
3
241k
before_files
listlengths
0
3
after_files
listlengths
0
3
pr_diff
stringlengths
109
107M
mit-ll-responsible-ai__hydra-zen-355
`validates_with_beartype` considers `Partial` as `NoneType` Hi @rsokl. I was having a blast using this fascinating library. But It seems when used with `hydra_zen.third_party.validates_with_beartype`, it casts `hydra_zen.typing.Partial` as `NoneType`. ```python from hydra_zen.typing import Partial from hydra_zen.third_party.beartype import validates_with_beartype def f(x: Partial[list]): return x val_f = validates_with_beartype(f) val_f(3) ``` It raises the following error. Can you take a look? ```bash beartype.roar.BeartypeCallHintParamViolation: @beartyped __main__.f() parameter x=3 violates type hint None, as int 3 not instance of <class "builtins.NoneType">. ```
[ { "content": "# Copyright (c) 2022 Massachusetts Institute of Technology\n# SPDX-License-Identifier: MIT\n\n# pyright: strict\n\nimport sys\nimport types\nfrom enum import Enum\nfrom pathlib import Path, PosixPath, WindowsPath\nfrom typing import (\n TYPE_CHECKING,\n Any,\n ByteString,\n Callable,\n ClassVar,\n Dict,\n FrozenSet,\n Generic,\n List,\n Mapping,\n NewType,\n Optional,\n Sequence,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n)\n\nfrom omegaconf import DictConfig, ListConfig\nfrom typing_extensions import (\n Final,\n Literal,\n ParamSpec,\n Protocol,\n Self,\n TypeAlias,\n TypedDict,\n runtime_checkable,\n)\n\n__all__ = [\n \"Just\",\n \"Builds\",\n \"PartialBuilds\",\n \"Partial\",\n \"Importable\",\n \"SupportedPrimitive\",\n \"ZenWrappers\",\n \"ZenPartialBuilds\",\n \"HydraPartialBuilds\",\n \"ZenConvert\",\n]\n\nP = ParamSpec(\"P\")\nR = TypeVar(\"R\")\n\n\nclass EmptyDict(TypedDict):\n pass\n\n\nT = TypeVar(\"T\", covariant=True)\nT2 = TypeVar(\"T2\")\nT3 = TypeVar(\"T3\")\n\nT4 = TypeVar(\"T4\", bound=Callable[..., Any])\n\n\nInstOrType: TypeAlias = Union[T, Type[T]]\n\n\nif TYPE_CHECKING:\n from dataclasses import Field # provided by typestub but not generic at runtime\nelse:\n\n class Field(Protocol[T2]):\n name: str\n type: Type[T2]\n default: T2\n default_factory: Callable[[], T2]\n repr: bool\n hash: Optional[bool]\n init: bool\n compare: bool\n metadata: Mapping[str, Any]\n\n\n@runtime_checkable\nclass Partial(Protocol[T2]):\n __call__: Callable[..., T2]\n\n @property\n def func(self) -> Callable[..., T2]:\n ...\n\n @property\n def args(self) -> Tuple[Any, ...]:\n ...\n\n @property\n def keywords(self) -> Dict[str, Any]:\n ...\n\n def __new__(\n cls: Type[Self], __func: Callable[..., T2], *args: Any, **kwargs: Any\n ) -> Self:\n ...\n\n if sys.version_info >= (3, 9): # pragma: no cover\n\n def __class_getitem__(cls, item: Any) -> types.GenericAlias:\n ...\n\n\nInterpStr = NewType(\"InterpStr\", str)\n\n\nclass DataClass_(Protocol):\n # doesn't provide __init__, __getattribute__, etc.\n __dataclass_fields__: ClassVar[Dict[str, Field[Any]]]\n\n\nclass DataClass(DataClass_, Protocol):\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n ...\n\n def __getattribute__(self, __name: str) -> Any:\n ...\n\n def __setattr__(self, __name: str, __value: Any) -> None:\n ...\n\n\n@runtime_checkable\nclass Builds(DataClass, Protocol[T]):\n _target_: ClassVar[str]\n\n\nclass BuildsWithSig(Builds[T], Protocol[T, P]):\n def __init__(self, *args: P.args, **kwds: P.kwargs):\n ...\n\n\n@runtime_checkable\nclass Just(Builds[T], Protocol[T]):\n path: ClassVar[str] # interpolated string for importing obj\n _target_: ClassVar[Literal[\"hydra_zen.funcs.get_obj\"]] = \"hydra_zen.funcs.get_obj\"\n\n\nclass ZenPartialMixin(Protocol[T]):\n _zen_target: ClassVar[str]\n _zen_partial: ClassVar[Literal[True]] = True\n\n\nclass HydraPartialMixin(Protocol[T]):\n _partial_: ClassVar[Literal[True]] = True\n\n\n@runtime_checkable\nclass ZenPartialBuilds(Builds[T], ZenPartialMixin[T], Protocol[T]):\n _target_: ClassVar[\n Literal[\"hydra_zen.funcs.zen_processing\"]\n ] = \"hydra_zen.funcs.zen_processing\"\n\n\n@runtime_checkable\nclass HydraPartialBuilds(Builds[T], HydraPartialMixin[T], Protocol[T]):\n ...\n\n\n# Necessary, but not sufficient, check for PartialBuilds; useful for creating\n# non-overlapping overloads\nIsPartial: TypeAlias = Union[ZenPartialMixin[T], HydraPartialMixin[T]]\n\nPartialBuilds: TypeAlias = Union[ZenPartialBuilds[T], HydraPartialBuilds[T]]\n\n\n@runtime_checkable\nclass HasTarget(Protocol):\n _target_: str\n\n\nImportable = TypeVar(\"Importable\", bound=Callable[..., Any])\n\n_HydraPrimitive: TypeAlias = Union[\n bool, None, int, float, str, ByteString, Path, WindowsPath, PosixPath\n]\n\n_SupportedViaBuilds = Union[\n Partial[Any],\n range,\n Set[Any],\n]\n\n_SupportedPrimitive: TypeAlias = Union[\n _HydraPrimitive,\n ListConfig,\n DictConfig,\n Callable[..., Any],\n Enum,\n DataClass_,\n complex,\n _SupportedViaBuilds,\n EmptyDict, # not covered by Mapping[..., ...]]\n]\n\nif TYPE_CHECKING:\n SupportedPrimitive: TypeAlias = Union[\n _SupportedPrimitive,\n FrozenSet[\"SupportedPrimitive\"],\n # Even thought this is redundant with Sequence, it seems to\n # be needed for pyright to do proper checking of tuple contents\n Tuple[\"SupportedPrimitive\", ...],\n # Mutable generic containers need to be invariant, so\n # we have to settle for Sequence/Mapping. While this\n # is overly permissive in terms of sequence-type, it\n # at least affords quality checking of sequence content\n Sequence[\"SupportedPrimitive\"],\n # Mapping is covariant only in value\n Mapping[Any, \"SupportedPrimitive\"],\n ]\nelse:\n # cleans up annotations for REPLs\n SupportedPrimitive = TypeVar(\"SupportedPrimitive\")\n\n\nZenWrapper: TypeAlias = Union[\n None,\n Builds[Callable[[T4], T4]],\n PartialBuilds[Callable[[T4], T4]],\n Just[Callable[[T4], T4]],\n Type[Builds[Callable[[T4], T4]]],\n Type[PartialBuilds[Callable[[T4], T4]]],\n Type[Just[Callable[[T4], T4]]],\n Callable[[T4], T4],\n str,\n]\nif TYPE_CHECKING:\n ZenWrappers: TypeAlias = Union[ZenWrapper[T4], Sequence[ZenWrapper[T4]]]\nelse:\n # cleans up annotations for REPLs\n class ZenWrappers(Generic[T2]): # pragma: no cover\n pass\n\n\nDefaultsList = List[\n Union[str, DataClass_, Mapping[str, Union[None, str, Sequence[str]]]]\n]\n\n\n# Lists all zen-convert settings and their types. Not part of public API\nclass AllConvert(TypedDict, total=True):\n dataclass: bool\n\n\n# used for runtime type-checking\nconvert_types: Final = {\"dataclass\": bool}\n\nGroupName: TypeAlias = Optional[str]\nNodeName: TypeAlias = str\nNode: TypeAlias = Any\n\n\n# TODO: make immutable\nclass StoreEntry(TypedDict):\n name: NodeName\n group: GroupName\n package: Optional[str]\n provider: Optional[str]\n node: Node\n\n\nclass ZenConvert(TypedDict, total=False):\n \"\"\"A TypedDict that provides a type-checked interface for specifying zen-convert\n options that configure the hydra-zen config-creation functions (e.g., `builds`,\n `just`, and `make_config`).\n\n Note that, at runtime, `ZenConvert` is simply a dictionary with type-annotations. There is no enforced runtime validation of its keys and values.\n\n Parameters\n ----------\n dataclass : bool\n If `True` any dataclass type/instance without a `_target_` field is\n automatically converted to a targeted config that will instantiate to that type/\n instance. Otherwise the dataclass type/instance will be passed through as-is.\n\n Note that this only works with statically-defined dataclass types, whereas\n :func:`~hydra_zen.make_config` and :py:func:`dataclasses.make_dataclass`\n dynamically generate dataclass types. Additionally, this feature is not\n compatible with a dataclass instance whose type possesses an `InitVar` field.\n\n Examples\n --------\n >>> from hydra_zen.typing import ZenConvert as zc\n >>> zc()\n {}\n >>> zc(dataclass=True)\n {\"dataclass\": True}\n >>> # static type-checker will raise, but runtime will not\n >>> zc(apple=1) # type: ignore\n {\"apple\": 1}\n\n **Configuring dataclass auto-config behaviors**\n\n >>> from hydra_zen import instantiate as I\n >>> from hydra_zen import builds, just\n >>> from dataclasses import dataclass\n >>> @dataclass\n ... class B:\n ... x: int\n >>> b = B(x=1)\n\n >>> I(just(b))\n B(x=1)\n >>> I(just(b, zen_convert=zc(dataclass=False))) # returns omegaconf.DictConfig\n {\"x\": 1}\n\n >>> I(builds(dict, y=b))\n {'y': B(x=1)}\n >>> I(builds(dict, y=b, zen_convert=zc(dataclass=False))) # returns omegaconf.DictConfig\n {'y': {'x': 1}}\n\n >>> I(make_config(y=b)) # returns omegaconf.DictConfig\n {'y': {'x': 1}}\n >>> I(make_config(y=b, zen_convert=zc(dataclass=True), hydra_convert=\"all\"))\n {'y': B(x=1)}\n\n Auto-config support does not work with dynamically-generated dataclass types\n\n >>> just(make_config(z=1))\n HydraZenUnsupportedPrimitiveError: ...\n >>> I(just(make_config(z=1), zen_convert=zc(dataclass=False)))\n {'z': 1}\n\n A dataclass with a `_target_` field will not be converted:\n\n >>> @dataclass\n ... class BuildsStr:\n ... _target_: str = 'builtins.str'\n ...\n >>> BuildsStr is just(BuildsStr)\n True\n >>> (builds_str := BuildsStr()) is just(builds_str)\n True\n \"\"\"\n\n dataclass: bool\n", "path": "src/hydra_zen/typing/_implementations.py" } ]
[ { "content": "# Copyright (c) 2022 Massachusetts Institute of Technology\n# SPDX-License-Identifier: MIT\n\n# pyright: strict\n\nimport sys\nimport types\nfrom enum import Enum\nfrom pathlib import Path, PosixPath, WindowsPath\nfrom typing import (\n TYPE_CHECKING,\n Any,\n ByteString,\n Callable,\n ClassVar,\n Dict,\n FrozenSet,\n Generic,\n List,\n Mapping,\n NewType,\n Optional,\n Sequence,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n)\n\nfrom omegaconf import DictConfig, ListConfig\nfrom typing_extensions import (\n Final,\n Literal,\n ParamSpec,\n Protocol,\n Self,\n TypeAlias,\n TypedDict,\n runtime_checkable,\n)\n\n__all__ = [\n \"Just\",\n \"Builds\",\n \"PartialBuilds\",\n \"Partial\",\n \"Importable\",\n \"SupportedPrimitive\",\n \"ZenWrappers\",\n \"ZenPartialBuilds\",\n \"HydraPartialBuilds\",\n \"ZenConvert\",\n]\n\nP = ParamSpec(\"P\")\nR = TypeVar(\"R\")\n\n\nclass EmptyDict(TypedDict):\n pass\n\n\nT = TypeVar(\"T\", covariant=True)\nT2 = TypeVar(\"T2\")\nT3 = TypeVar(\"T3\")\n\nT4 = TypeVar(\"T4\", bound=Callable[..., Any])\n\n\nInstOrType: TypeAlias = Union[T, Type[T]]\n\n\nif TYPE_CHECKING:\n from dataclasses import Field # provided by typestub but not generic at runtime\nelse:\n\n class Field(Protocol[T2]):\n name: str\n type: Type[T2]\n default: T2\n default_factory: Callable[[], T2]\n repr: bool\n hash: Optional[bool]\n init: bool\n compare: bool\n metadata: Mapping[str, Any]\n\n\n@runtime_checkable\nclass Partial(Protocol[T2]):\n __call__: Callable[..., T2]\n\n @property\n def func(self) -> Callable[..., T2]:\n ...\n\n @property\n def args(self) -> Tuple[Any, ...]:\n ...\n\n @property\n def keywords(self) -> Dict[str, Any]:\n ...\n\n def __new__(\n cls: Type[Self], __func: Callable[..., T2], *args: Any, **kwargs: Any\n ) -> Self:\n ...\n\n if TYPE_CHECKING and sys.version_info >= (3, 9): # pragma: no cover\n\n def __class_getitem__(cls, item: Any) -> types.GenericAlias:\n ...\n\n\nInterpStr = NewType(\"InterpStr\", str)\n\n\nclass DataClass_(Protocol):\n # doesn't provide __init__, __getattribute__, etc.\n __dataclass_fields__: ClassVar[Dict[str, Field[Any]]]\n\n\nclass DataClass(DataClass_, Protocol):\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n ...\n\n def __getattribute__(self, __name: str) -> Any:\n ...\n\n def __setattr__(self, __name: str, __value: Any) -> None:\n ...\n\n\n@runtime_checkable\nclass Builds(DataClass, Protocol[T]):\n _target_: ClassVar[str]\n\n\nclass BuildsWithSig(Builds[T], Protocol[T, P]):\n def __init__(self, *args: P.args, **kwds: P.kwargs):\n ...\n\n\n@runtime_checkable\nclass Just(Builds[T], Protocol[T]):\n path: ClassVar[str] # interpolated string for importing obj\n _target_: ClassVar[Literal[\"hydra_zen.funcs.get_obj\"]] = \"hydra_zen.funcs.get_obj\"\n\n\nclass ZenPartialMixin(Protocol[T]):\n _zen_target: ClassVar[str]\n _zen_partial: ClassVar[Literal[True]] = True\n\n\nclass HydraPartialMixin(Protocol[T]):\n _partial_: ClassVar[Literal[True]] = True\n\n\n@runtime_checkable\nclass ZenPartialBuilds(Builds[T], ZenPartialMixin[T], Protocol[T]):\n _target_: ClassVar[\n Literal[\"hydra_zen.funcs.zen_processing\"]\n ] = \"hydra_zen.funcs.zen_processing\"\n\n\n@runtime_checkable\nclass HydraPartialBuilds(Builds[T], HydraPartialMixin[T], Protocol[T]):\n ...\n\n\n# Necessary, but not sufficient, check for PartialBuilds; useful for creating\n# non-overlapping overloads\nIsPartial: TypeAlias = Union[ZenPartialMixin[T], HydraPartialMixin[T]]\n\nPartialBuilds: TypeAlias = Union[ZenPartialBuilds[T], HydraPartialBuilds[T]]\n\n\n@runtime_checkable\nclass HasTarget(Protocol):\n _target_: str\n\n\nImportable = TypeVar(\"Importable\", bound=Callable[..., Any])\n\n_HydraPrimitive: TypeAlias = Union[\n bool, None, int, float, str, ByteString, Path, WindowsPath, PosixPath\n]\n\n_SupportedViaBuilds = Union[\n Partial[Any],\n range,\n Set[Any],\n]\n\n_SupportedPrimitive: TypeAlias = Union[\n _HydraPrimitive,\n ListConfig,\n DictConfig,\n Callable[..., Any],\n Enum,\n DataClass_,\n complex,\n _SupportedViaBuilds,\n EmptyDict, # not covered by Mapping[..., ...]]\n]\n\nif TYPE_CHECKING:\n SupportedPrimitive: TypeAlias = Union[\n _SupportedPrimitive,\n FrozenSet[\"SupportedPrimitive\"],\n # Even thought this is redundant with Sequence, it seems to\n # be needed for pyright to do proper checking of tuple contents\n Tuple[\"SupportedPrimitive\", ...],\n # Mutable generic containers need to be invariant, so\n # we have to settle for Sequence/Mapping. While this\n # is overly permissive in terms of sequence-type, it\n # at least affords quality checking of sequence content\n Sequence[\"SupportedPrimitive\"],\n # Mapping is covariant only in value\n Mapping[Any, \"SupportedPrimitive\"],\n ]\nelse:\n # cleans up annotations for REPLs\n SupportedPrimitive = TypeVar(\"SupportedPrimitive\")\n\n\nZenWrapper: TypeAlias = Union[\n None,\n Builds[Callable[[T4], T4]],\n PartialBuilds[Callable[[T4], T4]],\n Just[Callable[[T4], T4]],\n Type[Builds[Callable[[T4], T4]]],\n Type[PartialBuilds[Callable[[T4], T4]]],\n Type[Just[Callable[[T4], T4]]],\n Callable[[T4], T4],\n str,\n]\nif TYPE_CHECKING:\n ZenWrappers: TypeAlias = Union[ZenWrapper[T4], Sequence[ZenWrapper[T4]]]\nelse:\n # cleans up annotations for REPLs\n class ZenWrappers(Generic[T2]): # pragma: no cover\n pass\n\n\nDefaultsList = List[\n Union[str, DataClass_, Mapping[str, Union[None, str, Sequence[str]]]]\n]\n\n\n# Lists all zen-convert settings and their types. Not part of public API\nclass AllConvert(TypedDict, total=True):\n dataclass: bool\n\n\n# used for runtime type-checking\nconvert_types: Final = {\"dataclass\": bool}\n\nGroupName: TypeAlias = Optional[str]\nNodeName: TypeAlias = str\nNode: TypeAlias = Any\n\n\n# TODO: make immutable\nclass StoreEntry(TypedDict):\n name: NodeName\n group: GroupName\n package: Optional[str]\n provider: Optional[str]\n node: Node\n\n\nclass ZenConvert(TypedDict, total=False):\n \"\"\"A TypedDict that provides a type-checked interface for specifying zen-convert\n options that configure the hydra-zen config-creation functions (e.g., `builds`,\n `just`, and `make_config`).\n\n Note that, at runtime, `ZenConvert` is simply a dictionary with type-annotations. There is no enforced runtime validation of its keys and values.\n\n Parameters\n ----------\n dataclass : bool\n If `True` any dataclass type/instance without a `_target_` field is\n automatically converted to a targeted config that will instantiate to that type/\n instance. Otherwise the dataclass type/instance will be passed through as-is.\n\n Note that this only works with statically-defined dataclass types, whereas\n :func:`~hydra_zen.make_config` and :py:func:`dataclasses.make_dataclass`\n dynamically generate dataclass types. Additionally, this feature is not\n compatible with a dataclass instance whose type possesses an `InitVar` field.\n\n Examples\n --------\n >>> from hydra_zen.typing import ZenConvert as zc\n >>> zc()\n {}\n >>> zc(dataclass=True)\n {\"dataclass\": True}\n >>> # static type-checker will raise, but runtime will not\n >>> zc(apple=1) # type: ignore\n {\"apple\": 1}\n\n **Configuring dataclass auto-config behaviors**\n\n >>> from hydra_zen import instantiate as I\n >>> from hydra_zen import builds, just\n >>> from dataclasses import dataclass\n >>> @dataclass\n ... class B:\n ... x: int\n >>> b = B(x=1)\n\n >>> I(just(b))\n B(x=1)\n >>> I(just(b, zen_convert=zc(dataclass=False))) # returns omegaconf.DictConfig\n {\"x\": 1}\n\n >>> I(builds(dict, y=b))\n {'y': B(x=1)}\n >>> I(builds(dict, y=b, zen_convert=zc(dataclass=False))) # returns omegaconf.DictConfig\n {'y': {'x': 1}}\n\n >>> I(make_config(y=b)) # returns omegaconf.DictConfig\n {'y': {'x': 1}}\n >>> I(make_config(y=b, zen_convert=zc(dataclass=True), hydra_convert=\"all\"))\n {'y': B(x=1)}\n\n Auto-config support does not work with dynamically-generated dataclass types\n\n >>> just(make_config(z=1))\n HydraZenUnsupportedPrimitiveError: ...\n >>> I(just(make_config(z=1), zen_convert=zc(dataclass=False)))\n {'z': 1}\n\n A dataclass with a `_target_` field will not be converted:\n\n >>> @dataclass\n ... class BuildsStr:\n ... _target_: str = 'builtins.str'\n ...\n >>> BuildsStr is just(BuildsStr)\n True\n >>> (builds_str := BuildsStr()) is just(builds_str)\n True\n \"\"\"\n\n dataclass: bool\n", "path": "src/hydra_zen/typing/_implementations.py" } ]
diff --git a/.github/workflows/tox_run.yml b/.github/workflows/tox_run.yml index fe7d517c6..6d7f016b0 100644 --- a/.github/workflows/tox_run.yml +++ b/.github/workflows/tox_run.yml @@ -91,10 +91,10 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - name: Set up Python 3.8 + - name: Set up Python 3.9 uses: actions/setup-python@v4 with: - python-version: 3.8 + python-version: 3.9 - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/docs/source/changes.rst b/docs/source/changes.rst index 514ba78bf..4429b8636 100644 --- a/docs/source/changes.rst +++ b/docs/source/changes.rst @@ -11,7 +11,7 @@ chronological order. All previous releases should still be available on pip. .. _v0.9.0: --------------------- -0.9.0rc4 - 2022-11-21 +0.9.0rc5 - 2022-12-09 --------------------- .. note:: This is documentation for an unreleased version of hydra-zen. You can try out this pre-release version using `pip install --pre hydra-zen` @@ -123,6 +123,10 @@ Improvements - :func:`~hydra_zen.hydrated_dataclass` will now produce a pickle-compatible dataclass type. See :pull:`338`. - All documentation code blocks are scanned by pyright as part of our CI process. Several errors in the documentation were fixed. See :pull:`343` and :pull:`344`. +Bug Fixes +--------- +- :pull:`355` fixes an issue where the parameterized generic `hydra_zen.typing.Partial[<...>]` would return `None` for Python versions 3.9+. This prevented this annotation from being used by runtime type checkers. + Compatibility-Breaking Changes ------------------------------ - Previously, any class decorated by :func:`~hydra_zen.hydrated_dataclass` would have a `__module__` attribute set to `typing`. Now the class's `__module__` will reflect the module where its static definition resides. This enables pickle-compatibility (:pull:`338`). This is unlikely to cause any issues for users. diff --git a/pyproject.toml b/pyproject.toml index 99780bc3a..784ab0968 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -203,7 +203,7 @@ commands = pytest -n auto tests/test_docs_typecheck.py -vv [testenv:third-party] install_command = pip install --upgrade --upgrade-strategy eager {opts} {packages} -basepython = python3.8 +basepython = python3.9 deps = {[testenv]deps} torch pytorch-lightning diff --git a/src/hydra_zen/typing/_implementations.py b/src/hydra_zen/typing/_implementations.py index 330742397..eccfc1540 100644 --- a/src/hydra_zen/typing/_implementations.py +++ b/src/hydra_zen/typing/_implementations.py @@ -108,7 +108,7 @@ def __new__( ) -> Self: ... - if sys.version_info >= (3, 9): # pragma: no cover + if TYPE_CHECKING and sys.version_info >= (3, 9): # pragma: no cover def __class_getitem__(cls, item: Any) -> types.GenericAlias: ... diff --git a/tests/test_protocols.py b/tests/test_protocols.py index 35db0c2b5..d29b570be 100644 --- a/tests/test_protocols.py +++ b/tests/test_protocols.py @@ -146,3 +146,8 @@ def test_protocol_checkers(x, yes_builds, yes_just, yes_partial): def test_partial_protocol(): assert isinstance(partial(int), Partial) assert not isinstance(print, Partial) + + +def test_parameterized_partial_regression(): + # https://github.com/mit-ll-responsible-ai/hydra-zen/issues/352 + assert Partial[int].__origin__ is Partial # type: ignore diff --git a/tests/test_py39.py b/tests/test_py39.py index b2ad85ebc..ff17efdad 100644 --- a/tests/test_py39.py +++ b/tests/test_py39.py @@ -20,7 +20,8 @@ ) def test_sanitized_type_expected_behavior(in_type, expected_type): # tests collections-as-generics introduced in py39 - assert sanitized_type(in_type) is expected_type + actual = sanitized_type(in_type) + assert actual is expected_type or actual == expected_type @dataclass class Tmp: diff --git a/tests/test_third_party/test_using_beartype.py b/tests/test_third_party/test_using_beartype.py index 94aec6458..c8e2d6d5f 100644 --- a/tests/test_third_party/test_using_beartype.py +++ b/tests/test_third_party/test_using_beartype.py @@ -1,12 +1,21 @@ # Copyright (c) 2022 Massachusetts Institute of Technology # SPDX-License-Identifier: MIT +from functools import partial +from typing import Type + import pytest from beartype.cave import RegexTypes from beartype.vale import Is from typing_extensions import Annotated +from hydra_zen import builds from hydra_zen.third_party.beartype import validates_with_beartype +from hydra_zen.typing import Builds, Partial + + +def func(x: int) -> float: + ... @pytest.mark.parametrize( @@ -14,6 +23,8 @@ [ (RegexTypes, "abc+", 22), (Annotated[str, Is[lambda text: 2 == len(text)]], "hi", "bye"), + (Partial[float], partial(func), func), + (Builds[Type[float]], builds(func), func), ], ) def test_beartype_specific_fields(custom_type, good_val, bad_val):
netbox-community__netbox-6029
Virtual chassis search displays duplicate entries ### NetBox version v2.10.6 ### Python version 3.8 ### Steps to Reproduce 1. Create a device named test-vc-1-1 2. Create a device named test-vc-1-2 3. Create a virtual chassis named test-vc-1 and bundle the two device created in 1. and 2. 4. In the virtual chassis view perform a search by name using test-vc-1 ### Expected Behavior The list view should display only one entry for the virtual chassis (check the screenshot of the search using Nebox 2.9 release) ### Observed Behavior The list view displays twice the same entry for the virtual chassis, one per member (check the screenshot of the search using Nebox 2.10.6 release) ### Expected behaviour in netbox 2.9 ![Netbox 2.9](https://user-images.githubusercontent.com/4469833/111668228-d558f600-8815-11eb-9e81-def201f1e744.png) ### Observed behaviour in netbox 2.10 ![image](https://user-images.githubusercontent.com/4469833/111668410-033e3a80-8816-11eb-8768-78493606ad7c.png) ### Solution hint After some code investigation it seems that the `VirtualChassisFilterSet.search` method in `dcim/filter.py` has something wrong with the [forged queryset](https://github.com/netbox-community/netbox/blob/91fe80f73c12bb4182ee892ca612252e9a30126b/netbox/dcim/filters.py#L1078). ```python def search(self, queryset, name, value): if not value.strip(): return queryset qs_filter = ( Q(name__icontains=value) | Q(members__name__icontains=value) | Q(domain__icontains=value) ) return queryset.filter(qs_filter) ``` When you modify the queryset with the following code, the entry is not duplicated anymore in the WebUI. ```python def search(self, queryset, name, value): if not value.strip(): return queryset qs_filter = ( Q(name__icontains=value) | Q(members__name__icontains=value) | Q(domain__icontains=value) ) return queryset.filter(qs_filter).distinct() # adding distinct de-duplicate the VC ```
[ { "content": "import django_filters\nfrom django.contrib.auth.models import User\nfrom django.db.models import Count\n\nfrom extras.filters import CustomFieldModelFilterSet, LocalConfigContextFilterSet, CreatedUpdatedFilterSet\nfrom tenancy.filters import TenancyFilterSet\nfrom tenancy.models import Tenant\nfrom utilities.choices import ColorChoices\nfrom utilities.filters import (\n BaseFilterSet, MultiValueCharFilter, MultiValueMACAddressFilter, MultiValueNumberFilter,\n NameSlugSearchFilterSet, TagFilter, TreeNodeMultipleChoiceFilter,\n)\nfrom virtualization.models import Cluster\nfrom .choices import *\nfrom .constants import *\nfrom .models import (\n Cable, ConsolePort, ConsolePortTemplate, ConsoleServerPort, ConsoleServerPortTemplate, Device, DeviceBay,\n DeviceBayTemplate, DeviceRole, DeviceType, FrontPort, FrontPortTemplate, Interface, InterfaceTemplate,\n InventoryItem, Manufacturer, Platform, PowerFeed, PowerOutlet, PowerOutletTemplate, PowerPanel, PowerPort,\n PowerPortTemplate, Rack, RackGroup, RackReservation, RackRole, RearPort, RearPortTemplate, Region, Site,\n VirtualChassis,\n)\n\n\n__all__ = (\n 'CableFilterSet',\n 'CableTerminationFilterSet',\n 'ConsoleConnectionFilterSet',\n 'ConsolePortFilterSet',\n 'ConsolePortTemplateFilterSet',\n 'ConsoleServerPortFilterSet',\n 'ConsoleServerPortTemplateFilterSet',\n 'DeviceBayFilterSet',\n 'DeviceBayTemplateFilterSet',\n 'DeviceFilterSet',\n 'DeviceRoleFilterSet',\n 'DeviceTypeFilterSet',\n 'FrontPortFilterSet',\n 'FrontPortTemplateFilterSet',\n 'InterfaceConnectionFilterSet',\n 'InterfaceFilterSet',\n 'InterfaceTemplateFilterSet',\n 'InventoryItemFilterSet',\n 'ManufacturerFilterSet',\n 'PathEndpointFilterSet',\n 'PlatformFilterSet',\n 'PowerConnectionFilterSet',\n 'PowerFeedFilterSet',\n 'PowerOutletFilterSet',\n 'PowerOutletTemplateFilterSet',\n 'PowerPanelFilterSet',\n 'PowerPortFilterSet',\n 'PowerPortTemplateFilterSet',\n 'RackFilterSet',\n 'RackGroupFilterSet',\n 'RackReservationFilterSet',\n 'RackRoleFilterSet',\n 'RearPortFilterSet',\n 'RearPortTemplateFilterSet',\n 'RegionFilterSet',\n 'SiteFilterSet',\n 'VirtualChassisFilterSet',\n)\n\n\nclass RegionFilterSet(BaseFilterSet, NameSlugSearchFilterSet):\n parent_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Region.objects.all(),\n label='Parent region (ID)',\n )\n parent = django_filters.ModelMultipleChoiceFilter(\n field_name='parent__slug',\n queryset=Region.objects.all(),\n to_field_name='slug',\n label='Parent region (slug)',\n )\n\n class Meta:\n model = Region\n fields = ['id', 'name', 'slug', 'description']\n\n\nclass SiteFilterSet(BaseFilterSet, TenancyFilterSet, CustomFieldModelFilterSet, CreatedUpdatedFilterSet):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n status = django_filters.MultipleChoiceFilter(\n choices=SiteStatusChoices,\n null_value=None\n )\n region_id = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='region',\n lookup_expr='in',\n label='Region (ID)',\n )\n region = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='region',\n lookup_expr='in',\n to_field_name='slug',\n label='Region (slug)',\n )\n tag = TagFilter()\n\n class Meta:\n model = Site\n fields = [\n 'id', 'name', 'slug', 'facility', 'asn', 'latitude', 'longitude', 'contact_name', 'contact_phone',\n 'contact_email',\n ]\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n qs_filter = (\n Q(name__icontains=value) |\n Q(facility__icontains=value) |\n Q(description__icontains=value) |\n Q(physical_address__icontains=value) |\n Q(shipping_address__icontains=value) |\n Q(contact_name__icontains=value) |\n Q(contact_phone__icontains=value) |\n Q(contact_email__icontains=value) |\n Q(comments__icontains=value)\n )\n try:\n qs_filter |= Q(asn=int(value.strip()))\n except ValueError:\n pass\n return queryset.filter(qs_filter)\n\n\nclass RackGroupFilterSet(BaseFilterSet, NameSlugSearchFilterSet):\n region_id = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='site__region',\n lookup_expr='in',\n label='Region (ID)',\n )\n region = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='site__region',\n lookup_expr='in',\n to_field_name='slug',\n label='Region (slug)',\n )\n site_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Site.objects.all(),\n label='Site (ID)',\n )\n site = django_filters.ModelMultipleChoiceFilter(\n field_name='site__slug',\n queryset=Site.objects.all(),\n to_field_name='slug',\n label='Site (slug)',\n )\n parent_id = django_filters.ModelMultipleChoiceFilter(\n queryset=RackGroup.objects.all(),\n label='Rack group (ID)',\n )\n parent = django_filters.ModelMultipleChoiceFilter(\n field_name='parent__slug',\n queryset=RackGroup.objects.all(),\n to_field_name='slug',\n label='Rack group (slug)',\n )\n\n class Meta:\n model = RackGroup\n fields = ['id', 'name', 'slug', 'description']\n\n\nclass RackRoleFilterSet(BaseFilterSet, NameSlugSearchFilterSet):\n\n class Meta:\n model = RackRole\n fields = ['id', 'name', 'slug', 'color']\n\n\nclass RackFilterSet(BaseFilterSet, TenancyFilterSet, CustomFieldModelFilterSet, CreatedUpdatedFilterSet):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n region_id = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='site__region',\n lookup_expr='in',\n label='Region (ID)',\n )\n region = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='site__region',\n lookup_expr='in',\n to_field_name='slug',\n label='Region (slug)',\n )\n site_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Site.objects.all(),\n label='Site (ID)',\n )\n site = django_filters.ModelMultipleChoiceFilter(\n field_name='site__slug',\n queryset=Site.objects.all(),\n to_field_name='slug',\n label='Site (slug)',\n )\n group_id = TreeNodeMultipleChoiceFilter(\n queryset=RackGroup.objects.all(),\n field_name='group',\n lookup_expr='in',\n label='Rack group (ID)',\n )\n group = TreeNodeMultipleChoiceFilter(\n queryset=RackGroup.objects.all(),\n field_name='group',\n lookup_expr='in',\n to_field_name='slug',\n label='Rack group (slug)',\n )\n status = django_filters.MultipleChoiceFilter(\n choices=RackStatusChoices,\n null_value=None\n )\n type = django_filters.MultipleChoiceFilter(\n choices=RackTypeChoices\n )\n width = django_filters.MultipleChoiceFilter(\n choices=RackWidthChoices\n )\n role_id = django_filters.ModelMultipleChoiceFilter(\n queryset=RackRole.objects.all(),\n label='Role (ID)',\n )\n role = django_filters.ModelMultipleChoiceFilter(\n field_name='role__slug',\n queryset=RackRole.objects.all(),\n to_field_name='slug',\n label='Role (slug)',\n )\n serial = django_filters.CharFilter(\n lookup_expr='iexact'\n )\n tag = TagFilter()\n\n class Meta:\n model = Rack\n fields = [\n 'id', 'name', 'facility_id', 'asset_tag', 'u_height', 'desc_units', 'outer_width', 'outer_depth',\n 'outer_unit',\n ]\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(name__icontains=value) |\n Q(facility_id__icontains=value) |\n Q(serial__icontains=value.strip()) |\n Q(asset_tag__icontains=value.strip()) |\n Q(comments__icontains=value)\n )\n\n\nclass RackReservationFilterSet(BaseFilterSet, TenancyFilterSet, CustomFieldModelFilterSet):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n rack_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Rack.objects.all(),\n label='Rack (ID)',\n )\n site_id = django_filters.ModelMultipleChoiceFilter(\n field_name='rack__site',\n queryset=Site.objects.all(),\n label='Site (ID)',\n )\n site = django_filters.ModelMultipleChoiceFilter(\n field_name='rack__site__slug',\n queryset=Site.objects.all(),\n to_field_name='slug',\n label='Site (slug)',\n )\n group_id = TreeNodeMultipleChoiceFilter(\n queryset=RackGroup.objects.all(),\n field_name='rack__group',\n lookup_expr='in',\n label='Rack group (ID)',\n )\n group = TreeNodeMultipleChoiceFilter(\n queryset=RackGroup.objects.all(),\n field_name='rack__group',\n lookup_expr='in',\n to_field_name='slug',\n label='Rack group (slug)',\n )\n user_id = django_filters.ModelMultipleChoiceFilter(\n queryset=User.objects.all(),\n label='User (ID)',\n )\n user = django_filters.ModelMultipleChoiceFilter(\n field_name='user__username',\n queryset=User.objects.all(),\n to_field_name='username',\n label='User (name)',\n )\n tag = TagFilter()\n\n class Meta:\n model = RackReservation\n fields = ['id', 'created']\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(rack__name__icontains=value) |\n Q(rack__facility_id__icontains=value) |\n Q(user__username__icontains=value) |\n Q(description__icontains=value)\n )\n\n\nclass ManufacturerFilterSet(BaseFilterSet, NameSlugSearchFilterSet):\n\n class Meta:\n model = Manufacturer\n fields = ['id', 'name', 'slug', 'description']\n\n\nclass DeviceTypeFilterSet(BaseFilterSet, CustomFieldModelFilterSet, CreatedUpdatedFilterSet):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n manufacturer_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Manufacturer.objects.all(),\n label='Manufacturer (ID)',\n )\n manufacturer = django_filters.ModelMultipleChoiceFilter(\n field_name='manufacturer__slug',\n queryset=Manufacturer.objects.all(),\n to_field_name='slug',\n label='Manufacturer (slug)',\n )\n console_ports = django_filters.BooleanFilter(\n method='_console_ports',\n label='Has console ports',\n )\n console_server_ports = django_filters.BooleanFilter(\n method='_console_server_ports',\n label='Has console server ports',\n )\n power_ports = django_filters.BooleanFilter(\n method='_power_ports',\n label='Has power ports',\n )\n power_outlets = django_filters.BooleanFilter(\n method='_power_outlets',\n label='Has power outlets',\n )\n interfaces = django_filters.BooleanFilter(\n method='_interfaces',\n label='Has interfaces',\n )\n pass_through_ports = django_filters.BooleanFilter(\n method='_pass_through_ports',\n label='Has pass-through ports',\n )\n device_bays = django_filters.BooleanFilter(\n method='_device_bays',\n label='Has device bays',\n )\n tag = TagFilter()\n\n class Meta:\n model = DeviceType\n fields = [\n 'id', 'model', 'slug', 'part_number', 'u_height', 'is_full_depth', 'subdevice_role',\n ]\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(manufacturer__name__icontains=value) |\n Q(model__icontains=value) |\n Q(part_number__icontains=value) |\n Q(comments__icontains=value)\n )\n\n def _console_ports(self, queryset, name, value):\n return queryset.exclude(consoleporttemplates__isnull=value)\n\n def _console_server_ports(self, queryset, name, value):\n return queryset.exclude(consoleserverporttemplates__isnull=value)\n\n def _power_ports(self, queryset, name, value):\n return queryset.exclude(powerporttemplates__isnull=value)\n\n def _power_outlets(self, queryset, name, value):\n return queryset.exclude(poweroutlettemplates__isnull=value)\n\n def _interfaces(self, queryset, name, value):\n return queryset.exclude(interfacetemplates__isnull=value)\n\n def _pass_through_ports(self, queryset, name, value):\n return queryset.exclude(\n frontporttemplates__isnull=value,\n rearporttemplates__isnull=value\n )\n\n def _device_bays(self, queryset, name, value):\n return queryset.exclude(devicebaytemplates__isnull=value)\n\n\nclass DeviceTypeComponentFilterSet(NameSlugSearchFilterSet):\n devicetype_id = django_filters.ModelMultipleChoiceFilter(\n queryset=DeviceType.objects.all(),\n field_name='device_type_id',\n label='Device type (ID)',\n )\n\n\nclass ConsolePortTemplateFilterSet(BaseFilterSet, DeviceTypeComponentFilterSet):\n\n class Meta:\n model = ConsolePortTemplate\n fields = ['id', 'name', 'type']\n\n\nclass ConsoleServerPortTemplateFilterSet(BaseFilterSet, DeviceTypeComponentFilterSet):\n\n class Meta:\n model = ConsoleServerPortTemplate\n fields = ['id', 'name', 'type']\n\n\nclass PowerPortTemplateFilterSet(BaseFilterSet, DeviceTypeComponentFilterSet):\n\n class Meta:\n model = PowerPortTemplate\n fields = ['id', 'name', 'type', 'maximum_draw', 'allocated_draw']\n\n\nclass PowerOutletTemplateFilterSet(BaseFilterSet, DeviceTypeComponentFilterSet):\n\n class Meta:\n model = PowerOutletTemplate\n fields = ['id', 'name', 'type', 'feed_leg']\n\n\nclass InterfaceTemplateFilterSet(BaseFilterSet, DeviceTypeComponentFilterSet):\n\n class Meta:\n model = InterfaceTemplate\n fields = ['id', 'name', 'type', 'mgmt_only']\n\n\nclass FrontPortTemplateFilterSet(BaseFilterSet, DeviceTypeComponentFilterSet):\n\n class Meta:\n model = FrontPortTemplate\n fields = ['id', 'name', 'type']\n\n\nclass RearPortTemplateFilterSet(BaseFilterSet, DeviceTypeComponentFilterSet):\n\n class Meta:\n model = RearPortTemplate\n fields = ['id', 'name', 'type', 'positions']\n\n\nclass DeviceBayTemplateFilterSet(BaseFilterSet, DeviceTypeComponentFilterSet):\n\n class Meta:\n model = DeviceBayTemplate\n fields = ['id', 'name']\n\n\nclass DeviceRoleFilterSet(BaseFilterSet, NameSlugSearchFilterSet):\n\n class Meta:\n model = DeviceRole\n fields = ['id', 'name', 'slug', 'color', 'vm_role']\n\n\nclass PlatformFilterSet(BaseFilterSet, NameSlugSearchFilterSet):\n manufacturer_id = django_filters.ModelMultipleChoiceFilter(\n field_name='manufacturer',\n queryset=Manufacturer.objects.all(),\n label='Manufacturer (ID)',\n )\n manufacturer = django_filters.ModelMultipleChoiceFilter(\n field_name='manufacturer__slug',\n queryset=Manufacturer.objects.all(),\n to_field_name='slug',\n label='Manufacturer (slug)',\n )\n\n class Meta:\n model = Platform\n fields = ['id', 'name', 'slug', 'napalm_driver', 'description']\n\n\nclass DeviceFilterSet(\n BaseFilterSet,\n TenancyFilterSet,\n LocalConfigContextFilterSet,\n CustomFieldModelFilterSet,\n CreatedUpdatedFilterSet\n):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n manufacturer_id = django_filters.ModelMultipleChoiceFilter(\n field_name='device_type__manufacturer',\n queryset=Manufacturer.objects.all(),\n label='Manufacturer (ID)',\n )\n manufacturer = django_filters.ModelMultipleChoiceFilter(\n field_name='device_type__manufacturer__slug',\n queryset=Manufacturer.objects.all(),\n to_field_name='slug',\n label='Manufacturer (slug)',\n )\n device_type_id = django_filters.ModelMultipleChoiceFilter(\n queryset=DeviceType.objects.all(),\n label='Device type (ID)',\n )\n role_id = django_filters.ModelMultipleChoiceFilter(\n field_name='device_role_id',\n queryset=DeviceRole.objects.all(),\n label='Role (ID)',\n )\n role = django_filters.ModelMultipleChoiceFilter(\n field_name='device_role__slug',\n queryset=DeviceRole.objects.all(),\n to_field_name='slug',\n label='Role (slug)',\n )\n platform_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Platform.objects.all(),\n label='Platform (ID)',\n )\n platform = django_filters.ModelMultipleChoiceFilter(\n field_name='platform__slug',\n queryset=Platform.objects.all(),\n to_field_name='slug',\n label='Platform (slug)',\n )\n region_id = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='site__region',\n lookup_expr='in',\n label='Region (ID)',\n )\n region = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='site__region',\n lookup_expr='in',\n to_field_name='slug',\n label='Region (slug)',\n )\n site_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Site.objects.all(),\n label='Site (ID)',\n )\n site = django_filters.ModelMultipleChoiceFilter(\n field_name='site__slug',\n queryset=Site.objects.all(),\n to_field_name='slug',\n label='Site name (slug)',\n )\n rack_group_id = TreeNodeMultipleChoiceFilter(\n queryset=RackGroup.objects.all(),\n field_name='rack__group',\n lookup_expr='in',\n label='Rack group (ID)',\n )\n rack_id = django_filters.ModelMultipleChoiceFilter(\n field_name='rack',\n queryset=Rack.objects.all(),\n label='Rack (ID)',\n )\n cluster_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Cluster.objects.all(),\n label='VM cluster (ID)',\n )\n model = django_filters.ModelMultipleChoiceFilter(\n field_name='device_type__slug',\n queryset=DeviceType.objects.all(),\n to_field_name='slug',\n label='Device model (slug)',\n )\n status = django_filters.MultipleChoiceFilter(\n choices=DeviceStatusChoices,\n null_value=None\n )\n is_full_depth = django_filters.BooleanFilter(\n field_name='device_type__is_full_depth',\n label='Is full depth',\n )\n mac_address = MultiValueMACAddressFilter(\n field_name='interfaces__mac_address',\n label='MAC address',\n )\n serial = django_filters.CharFilter(\n lookup_expr='iexact'\n )\n has_primary_ip = django_filters.BooleanFilter(\n method='_has_primary_ip',\n label='Has a primary IP',\n )\n virtual_chassis_id = django_filters.ModelMultipleChoiceFilter(\n field_name='virtual_chassis',\n queryset=VirtualChassis.objects.all(),\n label='Virtual chassis (ID)',\n )\n virtual_chassis_member = django_filters.BooleanFilter(\n method='_virtual_chassis_member',\n label='Is a virtual chassis member'\n )\n console_ports = django_filters.BooleanFilter(\n method='_console_ports',\n label='Has console ports',\n )\n console_server_ports = django_filters.BooleanFilter(\n method='_console_server_ports',\n label='Has console server ports',\n )\n power_ports = django_filters.BooleanFilter(\n method='_power_ports',\n label='Has power ports',\n )\n power_outlets = django_filters.BooleanFilter(\n method='_power_outlets',\n label='Has power outlets',\n )\n interfaces = django_filters.BooleanFilter(\n method='_interfaces',\n label='Has interfaces',\n )\n pass_through_ports = django_filters.BooleanFilter(\n method='_pass_through_ports',\n label='Has pass-through ports',\n )\n device_bays = django_filters.BooleanFilter(\n method='_device_bays',\n label='Has device bays',\n )\n tag = TagFilter()\n\n class Meta:\n model = Device\n fields = ['id', 'name', 'asset_tag', 'face', 'position', 'vc_position', 'vc_priority']\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(name__icontains=value) |\n Q(serial__icontains=value.strip()) |\n Q(inventoryitems__serial__icontains=value.strip()) |\n Q(asset_tag__icontains=value.strip()) |\n Q(comments__icontains=value)\n ).distinct()\n\n def _has_primary_ip(self, queryset, name, value):\n params = Q(primary_ip4__isnull=False) | Q(primary_ip6__isnull=False)\n if value:\n return queryset.filter(params)\n return queryset.exclude(params)\n\n def _virtual_chassis_member(self, queryset, name, value):\n return queryset.exclude(virtual_chassis__isnull=value)\n\n def _console_ports(self, queryset, name, value):\n return queryset.exclude(consoleports__isnull=value)\n\n def _console_server_ports(self, queryset, name, value):\n return queryset.exclude(consoleserverports__isnull=value)\n\n def _power_ports(self, queryset, name, value):\n return queryset.exclude(powerports__isnull=value)\n\n def _power_outlets(self, queryset, name, value):\n return queryset.exclude(poweroutlets__isnull=value)\n\n def _interfaces(self, queryset, name, value):\n return queryset.exclude(interfaces__isnull=value)\n\n def _pass_through_ports(self, queryset, name, value):\n return queryset.exclude(\n frontports__isnull=value,\n rearports__isnull=value\n )\n\n def _device_bays(self, queryset, name, value):\n return queryset.exclude(devicebays__isnull=value)\n\n\nclass DeviceComponentFilterSet(django_filters.FilterSet):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n region_id = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='device__site__region',\n lookup_expr='in',\n label='Region (ID)',\n )\n region = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='device__site__region',\n lookup_expr='in',\n to_field_name='slug',\n label='Region (slug)',\n )\n site_id = django_filters.ModelMultipleChoiceFilter(\n field_name='device__site',\n queryset=Site.objects.all(),\n label='Site (ID)',\n )\n site = django_filters.ModelMultipleChoiceFilter(\n field_name='device__site__slug',\n queryset=Site.objects.all(),\n to_field_name='slug',\n label='Site name (slug)',\n )\n device_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Device.objects.all(),\n label='Device (ID)',\n )\n device = django_filters.ModelMultipleChoiceFilter(\n field_name='device__name',\n queryset=Device.objects.all(),\n to_field_name='name',\n label='Device (name)',\n )\n tag = TagFilter()\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(name__icontains=value) |\n Q(label__icontains=value) |\n Q(description__icontains=value)\n )\n\n\nclass CableTerminationFilterSet(django_filters.FilterSet):\n cabled = django_filters.BooleanFilter(\n field_name='cable',\n lookup_expr='isnull',\n exclude=True\n )\n\n\nclass PathEndpointFilterSet(django_filters.FilterSet):\n connected = django_filters.BooleanFilter(\n method='filter_connected'\n )\n\n def filter_connected(self, queryset, name, value):\n if value:\n return queryset.filter(_path__is_active=True)\n else:\n return queryset.filter(Q(_path__isnull=True) | Q(_path__is_active=False))\n\n\nclass ConsolePortFilterSet(BaseFilterSet, DeviceComponentFilterSet, CableTerminationFilterSet, PathEndpointFilterSet):\n type = django_filters.MultipleChoiceFilter(\n choices=ConsolePortTypeChoices,\n null_value=None\n )\n\n class Meta:\n model = ConsolePort\n fields = ['id', 'name', 'description']\n\n\nclass ConsoleServerPortFilterSet(\n BaseFilterSet,\n DeviceComponentFilterSet,\n CableTerminationFilterSet,\n PathEndpointFilterSet\n):\n type = django_filters.MultipleChoiceFilter(\n choices=ConsolePortTypeChoices,\n null_value=None\n )\n\n class Meta:\n model = ConsoleServerPort\n fields = ['id', 'name', 'description']\n\n\nclass PowerPortFilterSet(BaseFilterSet, DeviceComponentFilterSet, CableTerminationFilterSet, PathEndpointFilterSet):\n type = django_filters.MultipleChoiceFilter(\n choices=PowerPortTypeChoices,\n null_value=None\n )\n\n class Meta:\n model = PowerPort\n fields = ['id', 'name', 'maximum_draw', 'allocated_draw', 'description']\n\n\nclass PowerOutletFilterSet(BaseFilterSet, DeviceComponentFilterSet, CableTerminationFilterSet, PathEndpointFilterSet):\n type = django_filters.MultipleChoiceFilter(\n choices=PowerOutletTypeChoices,\n null_value=None\n )\n\n class Meta:\n model = PowerOutlet\n fields = ['id', 'name', 'feed_leg', 'description']\n\n\nclass InterfaceFilterSet(BaseFilterSet, DeviceComponentFilterSet, CableTerminationFilterSet, PathEndpointFilterSet):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n # Override device and device_id filters from DeviceComponentFilterSet to match against any peer virtual chassis\n # members\n device = MultiValueCharFilter(\n method='filter_device',\n field_name='name',\n label='Device',\n )\n device_id = MultiValueNumberFilter(\n method='filter_device_id',\n field_name='pk',\n label='Device (ID)',\n )\n kind = django_filters.CharFilter(\n method='filter_kind',\n label='Kind of interface',\n )\n lag_id = django_filters.ModelMultipleChoiceFilter(\n field_name='lag',\n queryset=Interface.objects.all(),\n label='LAG interface (ID)',\n )\n mac_address = MultiValueMACAddressFilter()\n tag = TagFilter()\n vlan_id = django_filters.CharFilter(\n method='filter_vlan_id',\n label='Assigned VLAN'\n )\n vlan = django_filters.CharFilter(\n method='filter_vlan',\n label='Assigned VID'\n )\n type = django_filters.MultipleChoiceFilter(\n choices=InterfaceTypeChoices,\n null_value=None\n )\n\n class Meta:\n model = Interface\n fields = ['id', 'name', 'type', 'enabled', 'mtu', 'mgmt_only', 'mode', 'description']\n\n def filter_device(self, queryset, name, value):\n try:\n devices = Device.objects.filter(**{'{}__in'.format(name): value})\n vc_interface_ids = []\n for device in devices:\n vc_interface_ids.extend(device.vc_interfaces.values_list('id', flat=True))\n return queryset.filter(pk__in=vc_interface_ids)\n except Device.DoesNotExist:\n return queryset.none()\n\n def filter_device_id(self, queryset, name, id_list):\n # Include interfaces belonging to peer virtual chassis members\n vc_interface_ids = []\n try:\n devices = Device.objects.filter(pk__in=id_list)\n for device in devices:\n vc_interface_ids += device.vc_interfaces.values_list('id', flat=True)\n return queryset.filter(pk__in=vc_interface_ids)\n except Device.DoesNotExist:\n return queryset.none()\n\n def filter_vlan_id(self, queryset, name, value):\n value = value.strip()\n if not value:\n return queryset\n return queryset.filter(\n Q(untagged_vlan_id=value) |\n Q(tagged_vlans=value)\n )\n\n def filter_vlan(self, queryset, name, value):\n value = value.strip()\n if not value:\n return queryset\n return queryset.filter(\n Q(untagged_vlan_id__vid=value) |\n Q(tagged_vlans__vid=value)\n )\n\n def filter_kind(self, queryset, name, value):\n value = value.strip().lower()\n return {\n 'physical': queryset.exclude(type__in=NONCONNECTABLE_IFACE_TYPES),\n 'virtual': queryset.filter(type__in=VIRTUAL_IFACE_TYPES),\n 'wireless': queryset.filter(type__in=WIRELESS_IFACE_TYPES),\n }.get(value, queryset.none())\n\n\nclass FrontPortFilterSet(BaseFilterSet, DeviceComponentFilterSet, CableTerminationFilterSet):\n\n class Meta:\n model = FrontPort\n fields = ['id', 'name', 'type', 'description']\n\n\nclass RearPortFilterSet(BaseFilterSet, DeviceComponentFilterSet, CableTerminationFilterSet):\n\n class Meta:\n model = RearPort\n fields = ['id', 'name', 'type', 'positions', 'description']\n\n\nclass DeviceBayFilterSet(BaseFilterSet, DeviceComponentFilterSet):\n\n class Meta:\n model = DeviceBay\n fields = ['id', 'name', 'description']\n\n\nclass InventoryItemFilterSet(BaseFilterSet, DeviceComponentFilterSet):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n region_id = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='device__site__region',\n lookup_expr='in',\n label='Region (ID)',\n )\n region = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='device__site__region',\n lookup_expr='in',\n to_field_name='slug',\n label='Region (slug)',\n )\n site_id = django_filters.ModelMultipleChoiceFilter(\n field_name='device__site',\n queryset=Site.objects.all(),\n label='Site (ID)',\n )\n site = django_filters.ModelMultipleChoiceFilter(\n field_name='device__site__slug',\n queryset=Site.objects.all(),\n to_field_name='slug',\n label='Site name (slug)',\n )\n device_id = django_filters.ModelChoiceFilter(\n queryset=Device.objects.all(),\n label='Device (ID)',\n )\n device = django_filters.ModelChoiceFilter(\n queryset=Device.objects.all(),\n to_field_name='name',\n label='Device (name)',\n )\n parent_id = django_filters.ModelMultipleChoiceFilter(\n queryset=InventoryItem.objects.all(),\n label='Parent inventory item (ID)',\n )\n manufacturer_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Manufacturer.objects.all(),\n label='Manufacturer (ID)',\n )\n manufacturer = django_filters.ModelMultipleChoiceFilter(\n field_name='manufacturer__slug',\n queryset=Manufacturer.objects.all(),\n to_field_name='slug',\n label='Manufacturer (slug)',\n )\n serial = django_filters.CharFilter(\n lookup_expr='iexact'\n )\n\n class Meta:\n model = InventoryItem\n fields = ['id', 'name', 'part_id', 'asset_tag', 'discovered']\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n qs_filter = (\n Q(name__icontains=value) |\n Q(part_id__icontains=value) |\n Q(serial__icontains=value) |\n Q(asset_tag__icontains=value) |\n Q(description__icontains=value)\n )\n return queryset.filter(qs_filter)\n\n\nclass VirtualChassisFilterSet(BaseFilterSet):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n master_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Device.objects.all(),\n label='Master (ID)',\n )\n master = django_filters.ModelMultipleChoiceFilter(\n field_name='master__name',\n queryset=Device.objects.all(),\n to_field_name='name',\n label='Master (name)',\n )\n region_id = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='master__site__region',\n lookup_expr='in',\n label='Region (ID)',\n )\n region = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='master__site__region',\n lookup_expr='in',\n to_field_name='slug',\n label='Region (slug)',\n )\n site_id = django_filters.ModelMultipleChoiceFilter(\n field_name='master__site',\n queryset=Site.objects.all(),\n label='Site (ID)',\n )\n site = django_filters.ModelMultipleChoiceFilter(\n field_name='master__site__slug',\n queryset=Site.objects.all(),\n to_field_name='slug',\n label='Site name (slug)',\n )\n tenant_id = django_filters.ModelMultipleChoiceFilter(\n field_name='master__tenant',\n queryset=Tenant.objects.all(),\n label='Tenant (ID)',\n )\n tenant = django_filters.ModelMultipleChoiceFilter(\n field_name='master__tenant__slug',\n queryset=Tenant.objects.all(),\n to_field_name='slug',\n label='Tenant (slug)',\n )\n tag = TagFilter()\n\n class Meta:\n model = VirtualChassis\n fields = ['id', 'domain', 'name']\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n qs_filter = (\n Q(name__icontains=value) |\n Q(members__name__icontains=value) |\n Q(domain__icontains=value)\n )\n return queryset.filter(qs_filter)\n\n\nclass CableFilterSet(BaseFilterSet):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n type = django_filters.MultipleChoiceFilter(\n choices=CableTypeChoices\n )\n status = django_filters.MultipleChoiceFilter(\n choices=CableStatusChoices\n )\n color = django_filters.MultipleChoiceFilter(\n choices=ColorChoices\n )\n device_id = MultiValueNumberFilter(\n method='filter_device'\n )\n device = MultiValueCharFilter(\n method='filter_device',\n field_name='device__name'\n )\n rack_id = MultiValueNumberFilter(\n method='filter_device',\n field_name='device__rack_id'\n )\n rack = MultiValueNumberFilter(\n method='filter_device',\n field_name='device__rack__name'\n )\n site_id = MultiValueNumberFilter(\n method='filter_device',\n field_name='device__site_id'\n )\n site = MultiValueNumberFilter(\n method='filter_device',\n field_name='device__site__slug'\n )\n tenant_id = MultiValueNumberFilter(\n method='filter_device',\n field_name='device__tenant_id'\n )\n tenant = MultiValueNumberFilter(\n method='filter_device',\n field_name='device__tenant__slug'\n )\n tag = TagFilter()\n\n class Meta:\n model = Cable\n fields = ['id', 'label', 'length', 'length_unit']\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(label__icontains=value)\n\n def filter_device(self, queryset, name, value):\n queryset = queryset.filter(\n Q(**{'_termination_a_{}__in'.format(name): value}) |\n Q(**{'_termination_b_{}__in'.format(name): value})\n )\n return queryset\n\n\nclass ConnectionFilterSet:\n\n def filter_site(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(device__site__slug=value)\n\n def filter_device(self, queryset, name, value):\n if not value:\n return queryset\n return queryset.filter(**{f'{name}__in': value})\n\n\nclass ConsoleConnectionFilterSet(ConnectionFilterSet, BaseFilterSet):\n site = django_filters.CharFilter(\n method='filter_site',\n label='Site (slug)',\n )\n device_id = MultiValueNumberFilter(\n method='filter_device'\n )\n device = MultiValueCharFilter(\n method='filter_device',\n field_name='device__name'\n )\n\n class Meta:\n model = ConsolePort\n fields = ['name']\n\n\nclass PowerConnectionFilterSet(ConnectionFilterSet, BaseFilterSet):\n site = django_filters.CharFilter(\n method='filter_site',\n label='Site (slug)',\n )\n device_id = MultiValueNumberFilter(\n method='filter_device'\n )\n device = MultiValueCharFilter(\n method='filter_device',\n field_name='device__name'\n )\n\n class Meta:\n model = PowerPort\n fields = ['name']\n\n\nclass InterfaceConnectionFilterSet(ConnectionFilterSet, BaseFilterSet):\n site = django_filters.CharFilter(\n method='filter_site',\n label='Site (slug)',\n )\n device_id = MultiValueNumberFilter(\n method='filter_device'\n )\n device = MultiValueCharFilter(\n method='filter_device',\n field_name='device__name'\n )\n\n class Meta:\n model = Interface\n fields = []\n\n\nclass PowerPanelFilterSet(BaseFilterSet):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n region_id = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='site__region',\n lookup_expr='in',\n label='Region (ID)',\n )\n region = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='site__region',\n lookup_expr='in',\n to_field_name='slug',\n label='Region (slug)',\n )\n site_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Site.objects.all(),\n label='Site (ID)',\n )\n site = django_filters.ModelMultipleChoiceFilter(\n field_name='site__slug',\n queryset=Site.objects.all(),\n to_field_name='slug',\n label='Site name (slug)',\n )\n rack_group_id = TreeNodeMultipleChoiceFilter(\n queryset=RackGroup.objects.all(),\n field_name='rack_group',\n lookup_expr='in',\n label='Rack group (ID)',\n )\n tag = TagFilter()\n\n class Meta:\n model = PowerPanel\n fields = ['id', 'name']\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n qs_filter = (\n Q(name__icontains=value)\n )\n return queryset.filter(qs_filter)\n\n\nclass PowerFeedFilterSet(\n BaseFilterSet,\n CableTerminationFilterSet,\n PathEndpointFilterSet,\n CustomFieldModelFilterSet,\n CreatedUpdatedFilterSet\n):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n region_id = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='power_panel__site__region',\n lookup_expr='in',\n label='Region (ID)',\n )\n region = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='power_panel__site__region',\n lookup_expr='in',\n to_field_name='slug',\n label='Region (slug)',\n )\n site_id = django_filters.ModelMultipleChoiceFilter(\n field_name='power_panel__site',\n queryset=Site.objects.all(),\n label='Site (ID)',\n )\n site = django_filters.ModelMultipleChoiceFilter(\n field_name='power_panel__site__slug',\n queryset=Site.objects.all(),\n to_field_name='slug',\n label='Site name (slug)',\n )\n power_panel_id = django_filters.ModelMultipleChoiceFilter(\n queryset=PowerPanel.objects.all(),\n label='Power panel (ID)',\n )\n rack_id = django_filters.ModelMultipleChoiceFilter(\n field_name='rack',\n queryset=Rack.objects.all(),\n label='Rack (ID)',\n )\n tag = TagFilter()\n\n class Meta:\n model = PowerFeed\n fields = ['id', 'name', 'status', 'type', 'supply', 'phase', 'voltage', 'amperage', 'max_utilization']\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n qs_filter = (\n Q(name__icontains=value) |\n Q(comments__icontains=value)\n )\n return queryset.filter(qs_filter)\n", "path": "netbox/dcim/filters.py" } ]
[ { "content": "import django_filters\nfrom django.contrib.auth.models import User\nfrom django.db.models import Count\n\nfrom extras.filters import CustomFieldModelFilterSet, LocalConfigContextFilterSet, CreatedUpdatedFilterSet\nfrom tenancy.filters import TenancyFilterSet\nfrom tenancy.models import Tenant\nfrom utilities.choices import ColorChoices\nfrom utilities.filters import (\n BaseFilterSet, MultiValueCharFilter, MultiValueMACAddressFilter, MultiValueNumberFilter,\n NameSlugSearchFilterSet, TagFilter, TreeNodeMultipleChoiceFilter,\n)\nfrom virtualization.models import Cluster\nfrom .choices import *\nfrom .constants import *\nfrom .models import (\n Cable, ConsolePort, ConsolePortTemplate, ConsoleServerPort, ConsoleServerPortTemplate, Device, DeviceBay,\n DeviceBayTemplate, DeviceRole, DeviceType, FrontPort, FrontPortTemplate, Interface, InterfaceTemplate,\n InventoryItem, Manufacturer, Platform, PowerFeed, PowerOutlet, PowerOutletTemplate, PowerPanel, PowerPort,\n PowerPortTemplate, Rack, RackGroup, RackReservation, RackRole, RearPort, RearPortTemplate, Region, Site,\n VirtualChassis,\n)\n\n\n__all__ = (\n 'CableFilterSet',\n 'CableTerminationFilterSet',\n 'ConsoleConnectionFilterSet',\n 'ConsolePortFilterSet',\n 'ConsolePortTemplateFilterSet',\n 'ConsoleServerPortFilterSet',\n 'ConsoleServerPortTemplateFilterSet',\n 'DeviceBayFilterSet',\n 'DeviceBayTemplateFilterSet',\n 'DeviceFilterSet',\n 'DeviceRoleFilterSet',\n 'DeviceTypeFilterSet',\n 'FrontPortFilterSet',\n 'FrontPortTemplateFilterSet',\n 'InterfaceConnectionFilterSet',\n 'InterfaceFilterSet',\n 'InterfaceTemplateFilterSet',\n 'InventoryItemFilterSet',\n 'ManufacturerFilterSet',\n 'PathEndpointFilterSet',\n 'PlatformFilterSet',\n 'PowerConnectionFilterSet',\n 'PowerFeedFilterSet',\n 'PowerOutletFilterSet',\n 'PowerOutletTemplateFilterSet',\n 'PowerPanelFilterSet',\n 'PowerPortFilterSet',\n 'PowerPortTemplateFilterSet',\n 'RackFilterSet',\n 'RackGroupFilterSet',\n 'RackReservationFilterSet',\n 'RackRoleFilterSet',\n 'RearPortFilterSet',\n 'RearPortTemplateFilterSet',\n 'RegionFilterSet',\n 'SiteFilterSet',\n 'VirtualChassisFilterSet',\n)\n\n\nclass RegionFilterSet(BaseFilterSet, NameSlugSearchFilterSet):\n parent_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Region.objects.all(),\n label='Parent region (ID)',\n )\n parent = django_filters.ModelMultipleChoiceFilter(\n field_name='parent__slug',\n queryset=Region.objects.all(),\n to_field_name='slug',\n label='Parent region (slug)',\n )\n\n class Meta:\n model = Region\n fields = ['id', 'name', 'slug', 'description']\n\n\nclass SiteFilterSet(BaseFilterSet, TenancyFilterSet, CustomFieldModelFilterSet, CreatedUpdatedFilterSet):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n status = django_filters.MultipleChoiceFilter(\n choices=SiteStatusChoices,\n null_value=None\n )\n region_id = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='region',\n lookup_expr='in',\n label='Region (ID)',\n )\n region = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='region',\n lookup_expr='in',\n to_field_name='slug',\n label='Region (slug)',\n )\n tag = TagFilter()\n\n class Meta:\n model = Site\n fields = [\n 'id', 'name', 'slug', 'facility', 'asn', 'latitude', 'longitude', 'contact_name', 'contact_phone',\n 'contact_email',\n ]\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n qs_filter = (\n Q(name__icontains=value) |\n Q(facility__icontains=value) |\n Q(description__icontains=value) |\n Q(physical_address__icontains=value) |\n Q(shipping_address__icontains=value) |\n Q(contact_name__icontains=value) |\n Q(contact_phone__icontains=value) |\n Q(contact_email__icontains=value) |\n Q(comments__icontains=value)\n )\n try:\n qs_filter |= Q(asn=int(value.strip()))\n except ValueError:\n pass\n return queryset.filter(qs_filter)\n\n\nclass RackGroupFilterSet(BaseFilterSet, NameSlugSearchFilterSet):\n region_id = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='site__region',\n lookup_expr='in',\n label='Region (ID)',\n )\n region = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='site__region',\n lookup_expr='in',\n to_field_name='slug',\n label='Region (slug)',\n )\n site_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Site.objects.all(),\n label='Site (ID)',\n )\n site = django_filters.ModelMultipleChoiceFilter(\n field_name='site__slug',\n queryset=Site.objects.all(),\n to_field_name='slug',\n label='Site (slug)',\n )\n parent_id = django_filters.ModelMultipleChoiceFilter(\n queryset=RackGroup.objects.all(),\n label='Rack group (ID)',\n )\n parent = django_filters.ModelMultipleChoiceFilter(\n field_name='parent__slug',\n queryset=RackGroup.objects.all(),\n to_field_name='slug',\n label='Rack group (slug)',\n )\n\n class Meta:\n model = RackGroup\n fields = ['id', 'name', 'slug', 'description']\n\n\nclass RackRoleFilterSet(BaseFilterSet, NameSlugSearchFilterSet):\n\n class Meta:\n model = RackRole\n fields = ['id', 'name', 'slug', 'color']\n\n\nclass RackFilterSet(BaseFilterSet, TenancyFilterSet, CustomFieldModelFilterSet, CreatedUpdatedFilterSet):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n region_id = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='site__region',\n lookup_expr='in',\n label='Region (ID)',\n )\n region = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='site__region',\n lookup_expr='in',\n to_field_name='slug',\n label='Region (slug)',\n )\n site_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Site.objects.all(),\n label='Site (ID)',\n )\n site = django_filters.ModelMultipleChoiceFilter(\n field_name='site__slug',\n queryset=Site.objects.all(),\n to_field_name='slug',\n label='Site (slug)',\n )\n group_id = TreeNodeMultipleChoiceFilter(\n queryset=RackGroup.objects.all(),\n field_name='group',\n lookup_expr='in',\n label='Rack group (ID)',\n )\n group = TreeNodeMultipleChoiceFilter(\n queryset=RackGroup.objects.all(),\n field_name='group',\n lookup_expr='in',\n to_field_name='slug',\n label='Rack group (slug)',\n )\n status = django_filters.MultipleChoiceFilter(\n choices=RackStatusChoices,\n null_value=None\n )\n type = django_filters.MultipleChoiceFilter(\n choices=RackTypeChoices\n )\n width = django_filters.MultipleChoiceFilter(\n choices=RackWidthChoices\n )\n role_id = django_filters.ModelMultipleChoiceFilter(\n queryset=RackRole.objects.all(),\n label='Role (ID)',\n )\n role = django_filters.ModelMultipleChoiceFilter(\n field_name='role__slug',\n queryset=RackRole.objects.all(),\n to_field_name='slug',\n label='Role (slug)',\n )\n serial = django_filters.CharFilter(\n lookup_expr='iexact'\n )\n tag = TagFilter()\n\n class Meta:\n model = Rack\n fields = [\n 'id', 'name', 'facility_id', 'asset_tag', 'u_height', 'desc_units', 'outer_width', 'outer_depth',\n 'outer_unit',\n ]\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(name__icontains=value) |\n Q(facility_id__icontains=value) |\n Q(serial__icontains=value.strip()) |\n Q(asset_tag__icontains=value.strip()) |\n Q(comments__icontains=value)\n )\n\n\nclass RackReservationFilterSet(BaseFilterSet, TenancyFilterSet, CustomFieldModelFilterSet):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n rack_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Rack.objects.all(),\n label='Rack (ID)',\n )\n site_id = django_filters.ModelMultipleChoiceFilter(\n field_name='rack__site',\n queryset=Site.objects.all(),\n label='Site (ID)',\n )\n site = django_filters.ModelMultipleChoiceFilter(\n field_name='rack__site__slug',\n queryset=Site.objects.all(),\n to_field_name='slug',\n label='Site (slug)',\n )\n group_id = TreeNodeMultipleChoiceFilter(\n queryset=RackGroup.objects.all(),\n field_name='rack__group',\n lookup_expr='in',\n label='Rack group (ID)',\n )\n group = TreeNodeMultipleChoiceFilter(\n queryset=RackGroup.objects.all(),\n field_name='rack__group',\n lookup_expr='in',\n to_field_name='slug',\n label='Rack group (slug)',\n )\n user_id = django_filters.ModelMultipleChoiceFilter(\n queryset=User.objects.all(),\n label='User (ID)',\n )\n user = django_filters.ModelMultipleChoiceFilter(\n field_name='user__username',\n queryset=User.objects.all(),\n to_field_name='username',\n label='User (name)',\n )\n tag = TagFilter()\n\n class Meta:\n model = RackReservation\n fields = ['id', 'created']\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(rack__name__icontains=value) |\n Q(rack__facility_id__icontains=value) |\n Q(user__username__icontains=value) |\n Q(description__icontains=value)\n )\n\n\nclass ManufacturerFilterSet(BaseFilterSet, NameSlugSearchFilterSet):\n\n class Meta:\n model = Manufacturer\n fields = ['id', 'name', 'slug', 'description']\n\n\nclass DeviceTypeFilterSet(BaseFilterSet, CustomFieldModelFilterSet, CreatedUpdatedFilterSet):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n manufacturer_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Manufacturer.objects.all(),\n label='Manufacturer (ID)',\n )\n manufacturer = django_filters.ModelMultipleChoiceFilter(\n field_name='manufacturer__slug',\n queryset=Manufacturer.objects.all(),\n to_field_name='slug',\n label='Manufacturer (slug)',\n )\n console_ports = django_filters.BooleanFilter(\n method='_console_ports',\n label='Has console ports',\n )\n console_server_ports = django_filters.BooleanFilter(\n method='_console_server_ports',\n label='Has console server ports',\n )\n power_ports = django_filters.BooleanFilter(\n method='_power_ports',\n label='Has power ports',\n )\n power_outlets = django_filters.BooleanFilter(\n method='_power_outlets',\n label='Has power outlets',\n )\n interfaces = django_filters.BooleanFilter(\n method='_interfaces',\n label='Has interfaces',\n )\n pass_through_ports = django_filters.BooleanFilter(\n method='_pass_through_ports',\n label='Has pass-through ports',\n )\n device_bays = django_filters.BooleanFilter(\n method='_device_bays',\n label='Has device bays',\n )\n tag = TagFilter()\n\n class Meta:\n model = DeviceType\n fields = [\n 'id', 'model', 'slug', 'part_number', 'u_height', 'is_full_depth', 'subdevice_role',\n ]\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(manufacturer__name__icontains=value) |\n Q(model__icontains=value) |\n Q(part_number__icontains=value) |\n Q(comments__icontains=value)\n )\n\n def _console_ports(self, queryset, name, value):\n return queryset.exclude(consoleporttemplates__isnull=value)\n\n def _console_server_ports(self, queryset, name, value):\n return queryset.exclude(consoleserverporttemplates__isnull=value)\n\n def _power_ports(self, queryset, name, value):\n return queryset.exclude(powerporttemplates__isnull=value)\n\n def _power_outlets(self, queryset, name, value):\n return queryset.exclude(poweroutlettemplates__isnull=value)\n\n def _interfaces(self, queryset, name, value):\n return queryset.exclude(interfacetemplates__isnull=value)\n\n def _pass_through_ports(self, queryset, name, value):\n return queryset.exclude(\n frontporttemplates__isnull=value,\n rearporttemplates__isnull=value\n )\n\n def _device_bays(self, queryset, name, value):\n return queryset.exclude(devicebaytemplates__isnull=value)\n\n\nclass DeviceTypeComponentFilterSet(NameSlugSearchFilterSet):\n devicetype_id = django_filters.ModelMultipleChoiceFilter(\n queryset=DeviceType.objects.all(),\n field_name='device_type_id',\n label='Device type (ID)',\n )\n\n\nclass ConsolePortTemplateFilterSet(BaseFilterSet, DeviceTypeComponentFilterSet):\n\n class Meta:\n model = ConsolePortTemplate\n fields = ['id', 'name', 'type']\n\n\nclass ConsoleServerPortTemplateFilterSet(BaseFilterSet, DeviceTypeComponentFilterSet):\n\n class Meta:\n model = ConsoleServerPortTemplate\n fields = ['id', 'name', 'type']\n\n\nclass PowerPortTemplateFilterSet(BaseFilterSet, DeviceTypeComponentFilterSet):\n\n class Meta:\n model = PowerPortTemplate\n fields = ['id', 'name', 'type', 'maximum_draw', 'allocated_draw']\n\n\nclass PowerOutletTemplateFilterSet(BaseFilterSet, DeviceTypeComponentFilterSet):\n\n class Meta:\n model = PowerOutletTemplate\n fields = ['id', 'name', 'type', 'feed_leg']\n\n\nclass InterfaceTemplateFilterSet(BaseFilterSet, DeviceTypeComponentFilterSet):\n\n class Meta:\n model = InterfaceTemplate\n fields = ['id', 'name', 'type', 'mgmt_only']\n\n\nclass FrontPortTemplateFilterSet(BaseFilterSet, DeviceTypeComponentFilterSet):\n\n class Meta:\n model = FrontPortTemplate\n fields = ['id', 'name', 'type']\n\n\nclass RearPortTemplateFilterSet(BaseFilterSet, DeviceTypeComponentFilterSet):\n\n class Meta:\n model = RearPortTemplate\n fields = ['id', 'name', 'type', 'positions']\n\n\nclass DeviceBayTemplateFilterSet(BaseFilterSet, DeviceTypeComponentFilterSet):\n\n class Meta:\n model = DeviceBayTemplate\n fields = ['id', 'name']\n\n\nclass DeviceRoleFilterSet(BaseFilterSet, NameSlugSearchFilterSet):\n\n class Meta:\n model = DeviceRole\n fields = ['id', 'name', 'slug', 'color', 'vm_role']\n\n\nclass PlatformFilterSet(BaseFilterSet, NameSlugSearchFilterSet):\n manufacturer_id = django_filters.ModelMultipleChoiceFilter(\n field_name='manufacturer',\n queryset=Manufacturer.objects.all(),\n label='Manufacturer (ID)',\n )\n manufacturer = django_filters.ModelMultipleChoiceFilter(\n field_name='manufacturer__slug',\n queryset=Manufacturer.objects.all(),\n to_field_name='slug',\n label='Manufacturer (slug)',\n )\n\n class Meta:\n model = Platform\n fields = ['id', 'name', 'slug', 'napalm_driver', 'description']\n\n\nclass DeviceFilterSet(\n BaseFilterSet,\n TenancyFilterSet,\n LocalConfigContextFilterSet,\n CustomFieldModelFilterSet,\n CreatedUpdatedFilterSet\n):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n manufacturer_id = django_filters.ModelMultipleChoiceFilter(\n field_name='device_type__manufacturer',\n queryset=Manufacturer.objects.all(),\n label='Manufacturer (ID)',\n )\n manufacturer = django_filters.ModelMultipleChoiceFilter(\n field_name='device_type__manufacturer__slug',\n queryset=Manufacturer.objects.all(),\n to_field_name='slug',\n label='Manufacturer (slug)',\n )\n device_type_id = django_filters.ModelMultipleChoiceFilter(\n queryset=DeviceType.objects.all(),\n label='Device type (ID)',\n )\n role_id = django_filters.ModelMultipleChoiceFilter(\n field_name='device_role_id',\n queryset=DeviceRole.objects.all(),\n label='Role (ID)',\n )\n role = django_filters.ModelMultipleChoiceFilter(\n field_name='device_role__slug',\n queryset=DeviceRole.objects.all(),\n to_field_name='slug',\n label='Role (slug)',\n )\n platform_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Platform.objects.all(),\n label='Platform (ID)',\n )\n platform = django_filters.ModelMultipleChoiceFilter(\n field_name='platform__slug',\n queryset=Platform.objects.all(),\n to_field_name='slug',\n label='Platform (slug)',\n )\n region_id = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='site__region',\n lookup_expr='in',\n label='Region (ID)',\n )\n region = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='site__region',\n lookup_expr='in',\n to_field_name='slug',\n label='Region (slug)',\n )\n site_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Site.objects.all(),\n label='Site (ID)',\n )\n site = django_filters.ModelMultipleChoiceFilter(\n field_name='site__slug',\n queryset=Site.objects.all(),\n to_field_name='slug',\n label='Site name (slug)',\n )\n rack_group_id = TreeNodeMultipleChoiceFilter(\n queryset=RackGroup.objects.all(),\n field_name='rack__group',\n lookup_expr='in',\n label='Rack group (ID)',\n )\n rack_id = django_filters.ModelMultipleChoiceFilter(\n field_name='rack',\n queryset=Rack.objects.all(),\n label='Rack (ID)',\n )\n cluster_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Cluster.objects.all(),\n label='VM cluster (ID)',\n )\n model = django_filters.ModelMultipleChoiceFilter(\n field_name='device_type__slug',\n queryset=DeviceType.objects.all(),\n to_field_name='slug',\n label='Device model (slug)',\n )\n status = django_filters.MultipleChoiceFilter(\n choices=DeviceStatusChoices,\n null_value=None\n )\n is_full_depth = django_filters.BooleanFilter(\n field_name='device_type__is_full_depth',\n label='Is full depth',\n )\n mac_address = MultiValueMACAddressFilter(\n field_name='interfaces__mac_address',\n label='MAC address',\n )\n serial = django_filters.CharFilter(\n lookup_expr='iexact'\n )\n has_primary_ip = django_filters.BooleanFilter(\n method='_has_primary_ip',\n label='Has a primary IP',\n )\n virtual_chassis_id = django_filters.ModelMultipleChoiceFilter(\n field_name='virtual_chassis',\n queryset=VirtualChassis.objects.all(),\n label='Virtual chassis (ID)',\n )\n virtual_chassis_member = django_filters.BooleanFilter(\n method='_virtual_chassis_member',\n label='Is a virtual chassis member'\n )\n console_ports = django_filters.BooleanFilter(\n method='_console_ports',\n label='Has console ports',\n )\n console_server_ports = django_filters.BooleanFilter(\n method='_console_server_ports',\n label='Has console server ports',\n )\n power_ports = django_filters.BooleanFilter(\n method='_power_ports',\n label='Has power ports',\n )\n power_outlets = django_filters.BooleanFilter(\n method='_power_outlets',\n label='Has power outlets',\n )\n interfaces = django_filters.BooleanFilter(\n method='_interfaces',\n label='Has interfaces',\n )\n pass_through_ports = django_filters.BooleanFilter(\n method='_pass_through_ports',\n label='Has pass-through ports',\n )\n device_bays = django_filters.BooleanFilter(\n method='_device_bays',\n label='Has device bays',\n )\n tag = TagFilter()\n\n class Meta:\n model = Device\n fields = ['id', 'name', 'asset_tag', 'face', 'position', 'vc_position', 'vc_priority']\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(name__icontains=value) |\n Q(serial__icontains=value.strip()) |\n Q(inventoryitems__serial__icontains=value.strip()) |\n Q(asset_tag__icontains=value.strip()) |\n Q(comments__icontains=value)\n ).distinct()\n\n def _has_primary_ip(self, queryset, name, value):\n params = Q(primary_ip4__isnull=False) | Q(primary_ip6__isnull=False)\n if value:\n return queryset.filter(params)\n return queryset.exclude(params)\n\n def _virtual_chassis_member(self, queryset, name, value):\n return queryset.exclude(virtual_chassis__isnull=value)\n\n def _console_ports(self, queryset, name, value):\n return queryset.exclude(consoleports__isnull=value)\n\n def _console_server_ports(self, queryset, name, value):\n return queryset.exclude(consoleserverports__isnull=value)\n\n def _power_ports(self, queryset, name, value):\n return queryset.exclude(powerports__isnull=value)\n\n def _power_outlets(self, queryset, name, value):\n return queryset.exclude(poweroutlets__isnull=value)\n\n def _interfaces(self, queryset, name, value):\n return queryset.exclude(interfaces__isnull=value)\n\n def _pass_through_ports(self, queryset, name, value):\n return queryset.exclude(\n frontports__isnull=value,\n rearports__isnull=value\n )\n\n def _device_bays(self, queryset, name, value):\n return queryset.exclude(devicebays__isnull=value)\n\n\nclass DeviceComponentFilterSet(django_filters.FilterSet):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n region_id = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='device__site__region',\n lookup_expr='in',\n label='Region (ID)',\n )\n region = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='device__site__region',\n lookup_expr='in',\n to_field_name='slug',\n label='Region (slug)',\n )\n site_id = django_filters.ModelMultipleChoiceFilter(\n field_name='device__site',\n queryset=Site.objects.all(),\n label='Site (ID)',\n )\n site = django_filters.ModelMultipleChoiceFilter(\n field_name='device__site__slug',\n queryset=Site.objects.all(),\n to_field_name='slug',\n label='Site name (slug)',\n )\n device_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Device.objects.all(),\n label='Device (ID)',\n )\n device = django_filters.ModelMultipleChoiceFilter(\n field_name='device__name',\n queryset=Device.objects.all(),\n to_field_name='name',\n label='Device (name)',\n )\n tag = TagFilter()\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(name__icontains=value) |\n Q(label__icontains=value) |\n Q(description__icontains=value)\n )\n\n\nclass CableTerminationFilterSet(django_filters.FilterSet):\n cabled = django_filters.BooleanFilter(\n field_name='cable',\n lookup_expr='isnull',\n exclude=True\n )\n\n\nclass PathEndpointFilterSet(django_filters.FilterSet):\n connected = django_filters.BooleanFilter(\n method='filter_connected'\n )\n\n def filter_connected(self, queryset, name, value):\n if value:\n return queryset.filter(_path__is_active=True)\n else:\n return queryset.filter(Q(_path__isnull=True) | Q(_path__is_active=False))\n\n\nclass ConsolePortFilterSet(BaseFilterSet, DeviceComponentFilterSet, CableTerminationFilterSet, PathEndpointFilterSet):\n type = django_filters.MultipleChoiceFilter(\n choices=ConsolePortTypeChoices,\n null_value=None\n )\n\n class Meta:\n model = ConsolePort\n fields = ['id', 'name', 'description']\n\n\nclass ConsoleServerPortFilterSet(\n BaseFilterSet,\n DeviceComponentFilterSet,\n CableTerminationFilterSet,\n PathEndpointFilterSet\n):\n type = django_filters.MultipleChoiceFilter(\n choices=ConsolePortTypeChoices,\n null_value=None\n )\n\n class Meta:\n model = ConsoleServerPort\n fields = ['id', 'name', 'description']\n\n\nclass PowerPortFilterSet(BaseFilterSet, DeviceComponentFilterSet, CableTerminationFilterSet, PathEndpointFilterSet):\n type = django_filters.MultipleChoiceFilter(\n choices=PowerPortTypeChoices,\n null_value=None\n )\n\n class Meta:\n model = PowerPort\n fields = ['id', 'name', 'maximum_draw', 'allocated_draw', 'description']\n\n\nclass PowerOutletFilterSet(BaseFilterSet, DeviceComponentFilterSet, CableTerminationFilterSet, PathEndpointFilterSet):\n type = django_filters.MultipleChoiceFilter(\n choices=PowerOutletTypeChoices,\n null_value=None\n )\n\n class Meta:\n model = PowerOutlet\n fields = ['id', 'name', 'feed_leg', 'description']\n\n\nclass InterfaceFilterSet(BaseFilterSet, DeviceComponentFilterSet, CableTerminationFilterSet, PathEndpointFilterSet):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n # Override device and device_id filters from DeviceComponentFilterSet to match against any peer virtual chassis\n # members\n device = MultiValueCharFilter(\n method='filter_device',\n field_name='name',\n label='Device',\n )\n device_id = MultiValueNumberFilter(\n method='filter_device_id',\n field_name='pk',\n label='Device (ID)',\n )\n kind = django_filters.CharFilter(\n method='filter_kind',\n label='Kind of interface',\n )\n lag_id = django_filters.ModelMultipleChoiceFilter(\n field_name='lag',\n queryset=Interface.objects.all(),\n label='LAG interface (ID)',\n )\n mac_address = MultiValueMACAddressFilter()\n tag = TagFilter()\n vlan_id = django_filters.CharFilter(\n method='filter_vlan_id',\n label='Assigned VLAN'\n )\n vlan = django_filters.CharFilter(\n method='filter_vlan',\n label='Assigned VID'\n )\n type = django_filters.MultipleChoiceFilter(\n choices=InterfaceTypeChoices,\n null_value=None\n )\n\n class Meta:\n model = Interface\n fields = ['id', 'name', 'type', 'enabled', 'mtu', 'mgmt_only', 'mode', 'description']\n\n def filter_device(self, queryset, name, value):\n try:\n devices = Device.objects.filter(**{'{}__in'.format(name): value})\n vc_interface_ids = []\n for device in devices:\n vc_interface_ids.extend(device.vc_interfaces.values_list('id', flat=True))\n return queryset.filter(pk__in=vc_interface_ids)\n except Device.DoesNotExist:\n return queryset.none()\n\n def filter_device_id(self, queryset, name, id_list):\n # Include interfaces belonging to peer virtual chassis members\n vc_interface_ids = []\n try:\n devices = Device.objects.filter(pk__in=id_list)\n for device in devices:\n vc_interface_ids += device.vc_interfaces.values_list('id', flat=True)\n return queryset.filter(pk__in=vc_interface_ids)\n except Device.DoesNotExist:\n return queryset.none()\n\n def filter_vlan_id(self, queryset, name, value):\n value = value.strip()\n if not value:\n return queryset\n return queryset.filter(\n Q(untagged_vlan_id=value) |\n Q(tagged_vlans=value)\n )\n\n def filter_vlan(self, queryset, name, value):\n value = value.strip()\n if not value:\n return queryset\n return queryset.filter(\n Q(untagged_vlan_id__vid=value) |\n Q(tagged_vlans__vid=value)\n )\n\n def filter_kind(self, queryset, name, value):\n value = value.strip().lower()\n return {\n 'physical': queryset.exclude(type__in=NONCONNECTABLE_IFACE_TYPES),\n 'virtual': queryset.filter(type__in=VIRTUAL_IFACE_TYPES),\n 'wireless': queryset.filter(type__in=WIRELESS_IFACE_TYPES),\n }.get(value, queryset.none())\n\n\nclass FrontPortFilterSet(BaseFilterSet, DeviceComponentFilterSet, CableTerminationFilterSet):\n\n class Meta:\n model = FrontPort\n fields = ['id', 'name', 'type', 'description']\n\n\nclass RearPortFilterSet(BaseFilterSet, DeviceComponentFilterSet, CableTerminationFilterSet):\n\n class Meta:\n model = RearPort\n fields = ['id', 'name', 'type', 'positions', 'description']\n\n\nclass DeviceBayFilterSet(BaseFilterSet, DeviceComponentFilterSet):\n\n class Meta:\n model = DeviceBay\n fields = ['id', 'name', 'description']\n\n\nclass InventoryItemFilterSet(BaseFilterSet, DeviceComponentFilterSet):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n region_id = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='device__site__region',\n lookup_expr='in',\n label='Region (ID)',\n )\n region = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='device__site__region',\n lookup_expr='in',\n to_field_name='slug',\n label='Region (slug)',\n )\n site_id = django_filters.ModelMultipleChoiceFilter(\n field_name='device__site',\n queryset=Site.objects.all(),\n label='Site (ID)',\n )\n site = django_filters.ModelMultipleChoiceFilter(\n field_name='device__site__slug',\n queryset=Site.objects.all(),\n to_field_name='slug',\n label='Site name (slug)',\n )\n device_id = django_filters.ModelChoiceFilter(\n queryset=Device.objects.all(),\n label='Device (ID)',\n )\n device = django_filters.ModelChoiceFilter(\n queryset=Device.objects.all(),\n to_field_name='name',\n label='Device (name)',\n )\n parent_id = django_filters.ModelMultipleChoiceFilter(\n queryset=InventoryItem.objects.all(),\n label='Parent inventory item (ID)',\n )\n manufacturer_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Manufacturer.objects.all(),\n label='Manufacturer (ID)',\n )\n manufacturer = django_filters.ModelMultipleChoiceFilter(\n field_name='manufacturer__slug',\n queryset=Manufacturer.objects.all(),\n to_field_name='slug',\n label='Manufacturer (slug)',\n )\n serial = django_filters.CharFilter(\n lookup_expr='iexact'\n )\n\n class Meta:\n model = InventoryItem\n fields = ['id', 'name', 'part_id', 'asset_tag', 'discovered']\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n qs_filter = (\n Q(name__icontains=value) |\n Q(part_id__icontains=value) |\n Q(serial__icontains=value) |\n Q(asset_tag__icontains=value) |\n Q(description__icontains=value)\n )\n return queryset.filter(qs_filter)\n\n\nclass VirtualChassisFilterSet(BaseFilterSet):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n master_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Device.objects.all(),\n label='Master (ID)',\n )\n master = django_filters.ModelMultipleChoiceFilter(\n field_name='master__name',\n queryset=Device.objects.all(),\n to_field_name='name',\n label='Master (name)',\n )\n region_id = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='master__site__region',\n lookup_expr='in',\n label='Region (ID)',\n )\n region = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='master__site__region',\n lookup_expr='in',\n to_field_name='slug',\n label='Region (slug)',\n )\n site_id = django_filters.ModelMultipleChoiceFilter(\n field_name='master__site',\n queryset=Site.objects.all(),\n label='Site (ID)',\n )\n site = django_filters.ModelMultipleChoiceFilter(\n field_name='master__site__slug',\n queryset=Site.objects.all(),\n to_field_name='slug',\n label='Site name (slug)',\n )\n tenant_id = django_filters.ModelMultipleChoiceFilter(\n field_name='master__tenant',\n queryset=Tenant.objects.all(),\n label='Tenant (ID)',\n )\n tenant = django_filters.ModelMultipleChoiceFilter(\n field_name='master__tenant__slug',\n queryset=Tenant.objects.all(),\n to_field_name='slug',\n label='Tenant (slug)',\n )\n tag = TagFilter()\n\n class Meta:\n model = VirtualChassis\n fields = ['id', 'domain', 'name']\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n qs_filter = (\n Q(name__icontains=value) |\n Q(members__name__icontains=value) |\n Q(domain__icontains=value)\n )\n return queryset.filter(qs_filter).distinct()\n\n\nclass CableFilterSet(BaseFilterSet):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n type = django_filters.MultipleChoiceFilter(\n choices=CableTypeChoices\n )\n status = django_filters.MultipleChoiceFilter(\n choices=CableStatusChoices\n )\n color = django_filters.MultipleChoiceFilter(\n choices=ColorChoices\n )\n device_id = MultiValueNumberFilter(\n method='filter_device'\n )\n device = MultiValueCharFilter(\n method='filter_device',\n field_name='device__name'\n )\n rack_id = MultiValueNumberFilter(\n method='filter_device',\n field_name='device__rack_id'\n )\n rack = MultiValueNumberFilter(\n method='filter_device',\n field_name='device__rack__name'\n )\n site_id = MultiValueNumberFilter(\n method='filter_device',\n field_name='device__site_id'\n )\n site = MultiValueNumberFilter(\n method='filter_device',\n field_name='device__site__slug'\n )\n tenant_id = MultiValueNumberFilter(\n method='filter_device',\n field_name='device__tenant_id'\n )\n tenant = MultiValueNumberFilter(\n method='filter_device',\n field_name='device__tenant__slug'\n )\n tag = TagFilter()\n\n class Meta:\n model = Cable\n fields = ['id', 'label', 'length', 'length_unit']\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(label__icontains=value)\n\n def filter_device(self, queryset, name, value):\n queryset = queryset.filter(\n Q(**{'_termination_a_{}__in'.format(name): value}) |\n Q(**{'_termination_b_{}__in'.format(name): value})\n )\n return queryset\n\n\nclass ConnectionFilterSet:\n\n def filter_site(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(device__site__slug=value)\n\n def filter_device(self, queryset, name, value):\n if not value:\n return queryset\n return queryset.filter(**{f'{name}__in': value})\n\n\nclass ConsoleConnectionFilterSet(ConnectionFilterSet, BaseFilterSet):\n site = django_filters.CharFilter(\n method='filter_site',\n label='Site (slug)',\n )\n device_id = MultiValueNumberFilter(\n method='filter_device'\n )\n device = MultiValueCharFilter(\n method='filter_device',\n field_name='device__name'\n )\n\n class Meta:\n model = ConsolePort\n fields = ['name']\n\n\nclass PowerConnectionFilterSet(ConnectionFilterSet, BaseFilterSet):\n site = django_filters.CharFilter(\n method='filter_site',\n label='Site (slug)',\n )\n device_id = MultiValueNumberFilter(\n method='filter_device'\n )\n device = MultiValueCharFilter(\n method='filter_device',\n field_name='device__name'\n )\n\n class Meta:\n model = PowerPort\n fields = ['name']\n\n\nclass InterfaceConnectionFilterSet(ConnectionFilterSet, BaseFilterSet):\n site = django_filters.CharFilter(\n method='filter_site',\n label='Site (slug)',\n )\n device_id = MultiValueNumberFilter(\n method='filter_device'\n )\n device = MultiValueCharFilter(\n method='filter_device',\n field_name='device__name'\n )\n\n class Meta:\n model = Interface\n fields = []\n\n\nclass PowerPanelFilterSet(BaseFilterSet):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n region_id = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='site__region',\n lookup_expr='in',\n label='Region (ID)',\n )\n region = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='site__region',\n lookup_expr='in',\n to_field_name='slug',\n label='Region (slug)',\n )\n site_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Site.objects.all(),\n label='Site (ID)',\n )\n site = django_filters.ModelMultipleChoiceFilter(\n field_name='site__slug',\n queryset=Site.objects.all(),\n to_field_name='slug',\n label='Site name (slug)',\n )\n rack_group_id = TreeNodeMultipleChoiceFilter(\n queryset=RackGroup.objects.all(),\n field_name='rack_group',\n lookup_expr='in',\n label='Rack group (ID)',\n )\n tag = TagFilter()\n\n class Meta:\n model = PowerPanel\n fields = ['id', 'name']\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n qs_filter = (\n Q(name__icontains=value)\n )\n return queryset.filter(qs_filter)\n\n\nclass PowerFeedFilterSet(\n BaseFilterSet,\n CableTerminationFilterSet,\n PathEndpointFilterSet,\n CustomFieldModelFilterSet,\n CreatedUpdatedFilterSet\n):\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n region_id = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='power_panel__site__region',\n lookup_expr='in',\n label='Region (ID)',\n )\n region = TreeNodeMultipleChoiceFilter(\n queryset=Region.objects.all(),\n field_name='power_panel__site__region',\n lookup_expr='in',\n to_field_name='slug',\n label='Region (slug)',\n )\n site_id = django_filters.ModelMultipleChoiceFilter(\n field_name='power_panel__site',\n queryset=Site.objects.all(),\n label='Site (ID)',\n )\n site = django_filters.ModelMultipleChoiceFilter(\n field_name='power_panel__site__slug',\n queryset=Site.objects.all(),\n to_field_name='slug',\n label='Site name (slug)',\n )\n power_panel_id = django_filters.ModelMultipleChoiceFilter(\n queryset=PowerPanel.objects.all(),\n label='Power panel (ID)',\n )\n rack_id = django_filters.ModelMultipleChoiceFilter(\n field_name='rack',\n queryset=Rack.objects.all(),\n label='Rack (ID)',\n )\n tag = TagFilter()\n\n class Meta:\n model = PowerFeed\n fields = ['id', 'name', 'status', 'type', 'supply', 'phase', 'voltage', 'amperage', 'max_utilization']\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n qs_filter = (\n Q(name__icontains=value) |\n Q(comments__icontains=value)\n )\n return queryset.filter(qs_filter)\n", "path": "netbox/dcim/filters.py" } ]
diff --git a/netbox/dcim/filters.py b/netbox/dcim/filters.py index 41363c26119..548f401c04f 100644 --- a/netbox/dcim/filters.py +++ b/netbox/dcim/filters.py @@ -1075,7 +1075,7 @@ def search(self, queryset, name, value): Q(members__name__icontains=value) | Q(domain__icontains=value) ) - return queryset.filter(qs_filter) + return queryset.filter(qs_filter).distinct() class CableFilterSet(BaseFilterSet):
unionai-oss__pandera-909
Implicit ignore_na=True behaviour causes custom dataframe checks to be ignored by default **Describe the bug** When using custom dataframe checks returning boolean series, `SchemaModel.validate()` does not report validation errors generated by a custom check if the record in question (or is it the entire dataframe?) contains null values. This behaviour is deafult and can be disabled by setting `ignore_na` to `False` (either via `@pa.dataframe_check(ignore_na=False)` or a Config entry such as `check_name = {"ignore_na": False}`). This default behaviour is surprising, and the root cause of my issue was discovered only after a lengthy debugging process with much help from @cosmicBboy. Perhaps this default behaviour could be changed or at least better documented, as Niels mentions that this has come up more than once previously. - [x] I have checked that this issue has not already been reported. - [x] I have confirmed this bug exists on the latest version of pandera. - [x] (optional) I have confirmed this bug exists on the master branch of pandera. #### Code Sample, a copy-pastable example CSV (DataFrame): ``` field0,field1,field2 ,foo,foo ``` ```python @pa.dataframe_check def field1_does_not_equal_field2(cls, df: pd.DataFrame) -> Series[bool]: return df["field1"] != df["field2"] # Even though field1 == field2, validation passes because field0 is null ``` #### Expected behavior Create a custom Pandera dataframe check returning a boolean series that accepts a CSV containing null values.
[ { "content": "\"\"\"Utility functions for validation.\"\"\"\n\nfrom functools import lru_cache\nfrom typing import NamedTuple, Optional, Tuple, Union\n\nimport pandas as pd\n\nSupportedTypes = NamedTuple(\n \"SupportedTypes\",\n (\n (\"table_types\", Tuple[type, ...]),\n (\"field_types\", Tuple[type, ...]),\n (\"index_types\", Tuple[type, ...]),\n (\"multiindex_types\", Tuple[type, ...]),\n ),\n)\n\n\n@lru_cache(maxsize=None)\ndef _supported_types():\n # pylint: disable=import-outside-toplevel\n table_types = [pd.DataFrame]\n field_types = [pd.Series]\n index_types = [pd.Index]\n multiindex_types = [pd.MultiIndex]\n\n try:\n import pyspark.pandas as ps\n\n table_types.append(ps.DataFrame)\n field_types.append(ps.Series)\n index_types.append(ps.Index)\n multiindex_types.append(ps.MultiIndex)\n except ImportError:\n pass\n try: # pragma: no cover\n import modin.pandas as mpd\n\n table_types.append(mpd.DataFrame)\n field_types.append(mpd.Series)\n index_types.append(mpd.Index)\n multiindex_types.append(mpd.MultiIndex)\n except ImportError:\n pass\n try:\n import dask.dataframe as dd\n\n table_types.append(dd.DataFrame)\n field_types.append(dd.Series)\n index_types.append(dd.Index)\n except ImportError:\n pass\n\n return SupportedTypes(\n tuple(table_types),\n tuple(field_types),\n tuple(index_types),\n tuple(multiindex_types),\n )\n\n\ndef is_table(obj):\n \"\"\"Verifies whether an object is table-like.\n\n Where a table is a 2-dimensional data matrix of rows and columns, which\n can be indexed in multiple different ways.\n \"\"\"\n return isinstance(obj, _supported_types().table_types)\n\n\ndef is_field(obj):\n \"\"\"Verifies whether an object is field-like.\n\n Where a field is a columnar representation of data in a table-like\n data structure.\n \"\"\"\n return isinstance(obj, _supported_types().field_types)\n\n\ndef is_index(obj):\n \"\"\"Verifies whether an object is a table index.\"\"\"\n return isinstance(obj, _supported_types().index_types)\n\n\ndef is_multiindex(obj):\n \"\"\"Verifies whether an object is a multi-level table index.\"\"\"\n return isinstance(obj, _supported_types().multiindex_types)\n\n\ndef is_supported_check_obj(obj):\n \"\"\"Verifies whether an object is table- or field-like.\"\"\"\n return is_table(obj) or is_field(obj)\n\n\ndef prepare_series_check_output(\n check_obj: Union[pd.Series, pd.DataFrame],\n check_output: pd.Series,\n ignore_na: bool = True,\n n_failure_cases: Optional[int] = None,\n) -> Tuple[pd.Series, pd.Series]:\n \"\"\"Prepare the check output and failure cases for a Series check output.\n\n check_obj can be a dataframe, since a check function can potentially return\n a Series resulting from applying some check function that outputs a Series.\n \"\"\"\n if ignore_na:\n isna = (\n check_obj.isna().any(axis=\"columns\")\n if isinstance(check_obj, pd.DataFrame)\n else check_obj.isna()\n )\n try:\n check_output = check_output | isna\n except AttributeError:\n # convert check_output to numpy for modin compatibility\n check_output = check_output.to_numpy() | isna\n\n failure_cases = check_obj[~check_output]\n if not failure_cases.empty and n_failure_cases is not None:\n # NOTE: this is a hack to support pyspark.pandas and modin, since you\n # can't use groupby on a dataframe with another dataframe\n if type(failure_cases).__module__.startswith(\"pyspark.pandas\") or type(\n failure_cases\n ).__module__.startswith(\"modin.pandas\"):\n failure_cases = (\n failure_cases.rename(\"failure_cases\")\n .to_frame()\n .assign(check_output=check_output)\n .groupby(\"check_output\")\n .head(n_failure_cases)[\"failure_cases\"]\n )\n else:\n failure_cases = failure_cases.groupby(check_output).head(\n n_failure_cases\n )\n return check_output, failure_cases\n\n\ndef prepare_dataframe_check_output(\n check_obj: pd.DataFrame,\n check_output: pd.DataFrame,\n df_orig: Optional[pd.DataFrame] = None,\n ignore_na: bool = True,\n n_failure_cases: Optional[int] = None,\n) -> Tuple[pd.Series, pd.Series]:\n \"\"\"Unstack a dataframe of boolean values.\n\n Check results consisting of a boolean dataframe should be reported at the\n most granular level.\n \"\"\"\n if df_orig is not None:\n assert df_orig.shape == check_output.shape\n\n if df_orig is None:\n df_orig = check_obj\n check_output = check_output.unstack()\n if ignore_na:\n check_output = check_output | df_orig.unstack().isna()\n failure_cases = (\n check_obj.unstack()[~check_output]\n .rename(\"failure_case\")\n .rename_axis([\"column\", \"index\"])\n .reset_index()\n )\n if not failure_cases.empty and n_failure_cases is not None:\n failure_cases = failure_cases.drop_duplicates().head(n_failure_cases)\n return check_output, failure_cases\n", "path": "pandera/check_utils.py" } ]
[ { "content": "\"\"\"Utility functions for validation.\"\"\"\n\nfrom functools import lru_cache\nfrom typing import NamedTuple, Optional, Tuple, Union\n\nimport pandas as pd\n\nSupportedTypes = NamedTuple(\n \"SupportedTypes\",\n (\n (\"table_types\", Tuple[type, ...]),\n (\"field_types\", Tuple[type, ...]),\n (\"index_types\", Tuple[type, ...]),\n (\"multiindex_types\", Tuple[type, ...]),\n ),\n)\n\n\n@lru_cache(maxsize=None)\ndef _supported_types():\n # pylint: disable=import-outside-toplevel\n table_types = [pd.DataFrame]\n field_types = [pd.Series]\n index_types = [pd.Index]\n multiindex_types = [pd.MultiIndex]\n\n try:\n import pyspark.pandas as ps\n\n table_types.append(ps.DataFrame)\n field_types.append(ps.Series)\n index_types.append(ps.Index)\n multiindex_types.append(ps.MultiIndex)\n except ImportError:\n pass\n try: # pragma: no cover\n import modin.pandas as mpd\n\n table_types.append(mpd.DataFrame)\n field_types.append(mpd.Series)\n index_types.append(mpd.Index)\n multiindex_types.append(mpd.MultiIndex)\n except ImportError:\n pass\n try:\n import dask.dataframe as dd\n\n table_types.append(dd.DataFrame)\n field_types.append(dd.Series)\n index_types.append(dd.Index)\n except ImportError:\n pass\n\n return SupportedTypes(\n tuple(table_types),\n tuple(field_types),\n tuple(index_types),\n tuple(multiindex_types),\n )\n\n\ndef is_table(obj):\n \"\"\"Verifies whether an object is table-like.\n\n Where a table is a 2-dimensional data matrix of rows and columns, which\n can be indexed in multiple different ways.\n \"\"\"\n return isinstance(obj, _supported_types().table_types)\n\n\ndef is_field(obj):\n \"\"\"Verifies whether an object is field-like.\n\n Where a field is a columnar representation of data in a table-like\n data structure.\n \"\"\"\n return isinstance(obj, _supported_types().field_types)\n\n\ndef is_index(obj):\n \"\"\"Verifies whether an object is a table index.\"\"\"\n return isinstance(obj, _supported_types().index_types)\n\n\ndef is_multiindex(obj):\n \"\"\"Verifies whether an object is a multi-level table index.\"\"\"\n return isinstance(obj, _supported_types().multiindex_types)\n\n\ndef is_supported_check_obj(obj):\n \"\"\"Verifies whether an object is table- or field-like.\"\"\"\n return is_table(obj) or is_field(obj)\n\n\ndef prepare_series_check_output(\n check_obj: Union[pd.Series, pd.DataFrame],\n check_output: pd.Series,\n ignore_na: bool = True,\n n_failure_cases: Optional[int] = None,\n) -> Tuple[pd.Series, pd.Series]:\n \"\"\"Prepare the check output and failure cases for a Series check output.\n\n check_obj can be a dataframe, since a check function can potentially return\n a Series resulting from applying some check function that outputs a Series.\n \"\"\"\n if ignore_na:\n isna = (\n check_obj.isna().all(axis=\"columns\")\n if isinstance(check_obj, pd.DataFrame)\n else check_obj.isna()\n )\n try:\n check_output = check_output | isna\n except AttributeError:\n # convert check_output to numpy for modin compatibility\n check_output = check_output.to_numpy() | isna\n\n failure_cases = check_obj[~check_output]\n if not failure_cases.empty and n_failure_cases is not None:\n # NOTE: this is a hack to support pyspark.pandas and modin, since you\n # can't use groupby on a dataframe with another dataframe\n if type(failure_cases).__module__.startswith(\"pyspark.pandas\") or type(\n failure_cases\n ).__module__.startswith(\"modin.pandas\"):\n failure_cases = (\n failure_cases.rename(\"failure_cases\")\n .to_frame()\n .assign(check_output=check_output)\n .groupby(\"check_output\")\n .head(n_failure_cases)[\"failure_cases\"]\n )\n else:\n failure_cases = failure_cases.groupby(check_output).head(\n n_failure_cases\n )\n return check_output, failure_cases\n\n\ndef prepare_dataframe_check_output(\n check_obj: pd.DataFrame,\n check_output: pd.DataFrame,\n df_orig: Optional[pd.DataFrame] = None,\n ignore_na: bool = True,\n n_failure_cases: Optional[int] = None,\n) -> Tuple[pd.Series, pd.Series]:\n \"\"\"Unstack a dataframe of boolean values.\n\n Check results consisting of a boolean dataframe should be reported at the\n most granular level.\n \"\"\"\n if df_orig is not None:\n assert df_orig.shape == check_output.shape\n\n if df_orig is None:\n df_orig = check_obj\n check_output = check_output.unstack()\n if ignore_na:\n check_output = check_output | df_orig.unstack().isna()\n failure_cases = (\n check_obj.unstack()[~check_output]\n .rename(\"failure_case\")\n .rename_axis([\"column\", \"index\"])\n .reset_index()\n )\n if not failure_cases.empty and n_failure_cases is not None:\n failure_cases = failure_cases.drop_duplicates().head(n_failure_cases)\n return check_output, failure_cases\n", "path": "pandera/check_utils.py" } ]
diff --git a/pandera/check_utils.py b/pandera/check_utils.py index f6bc56b8b..eca859db0 100644 --- a/pandera/check_utils.py +++ b/pandera/check_utils.py @@ -105,7 +105,7 @@ def prepare_series_check_output( """ if ignore_na: isna = ( - check_obj.isna().any(axis="columns") + check_obj.isna().all(axis="columns") if isinstance(check_obj, pd.DataFrame) else check_obj.isna() ) diff --git a/tests/core/test_checks.py b/tests/core/test_checks.py index d3d6cbcf1..587c3354f 100644 --- a/tests/core/test_checks.py +++ b/tests/core/test_checks.py @@ -458,3 +458,19 @@ def test_dataframe_check_schema_error() -> None: "index == 3 & column == 'b'" ).failure_case.iloc[0] ) + + +def test_prepare_series_check_output_df_level(): + """Test that dataframe-level checks only ignore rows where all values are null.""" + df = pd.DataFrame( + { + "a": [1, 1, 2, 2, 3, 3, None], + "b": [2, 1, 4, 3, 6, 5, None], + "c": [None] * 7, + } + ) + check = Check(lambda df: df["b"] == df["a"] * 2, ignore_na=True) + # The last record should evaluate to True, since all values are null + expected_output = [True, False, True, False, True, False, True] + result = check(df) + assert result.check_output.tolist() == expected_output
openfun__marsha-1250
Allow instructors to publish their video publicly ## Feature Request **Is your feature request related to a problem or unsupported use case? Please describe.** We want instructors to be able to publish their video publicly. At the moment, the flag is only accessible via the Django admin interface. **Describe the solution you'd like** expose the "is_public" field to the video API endpoint, allowing only instructors to update its value.
[ { "content": "\"\"\"Structure of Video related models API responses with Django Rest Framework serializers.\"\"\"\nfrom datetime import timedelta\nfrom urllib.parse import quote_plus\n\nfrom django.conf import settings\nfrom django.shortcuts import get_object_or_404\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.text import slugify\n\nfrom botocore.signers import CloudFrontSigner\nfrom rest_framework import serializers\nfrom rest_framework_simplejwt.models import TokenUser\n\nfrom ..defaults import IDLE, JITSI, LIVE_CHOICES, LIVE_TYPE_CHOICES, RUNNING, STOPPED\nfrom ..models import (\n LivePairing,\n LiveRegistration,\n Playlist,\n Thumbnail,\n TimedTextTrack,\n Video,\n)\nfrom ..models.account import ADMINISTRATOR, INSTRUCTOR, LTI_ROLES\nfrom ..utils import cloudfront_utils, time_utils, xmpp_utils\nfrom ..utils.url_utils import build_absolute_uri_behind_proxy\nfrom .base import TimestampField\nfrom .playlist import PlaylistLiteSerializer\n\n\nclass TimedTextTrackSerializer(serializers.ModelSerializer):\n \"\"\"Serializer to display a timed text track model.\"\"\"\n\n class Meta: # noqa\n model = TimedTextTrack\n fields = (\n \"active_stamp\",\n \"id\",\n \"is_ready_to_show\",\n \"mode\",\n \"language\",\n \"upload_state\",\n \"url\",\n \"source_url\",\n \"video\",\n )\n read_only_fields = (\n \"id\",\n \"active_stamp\",\n \"is_ready_to_show\",\n \"upload_state\",\n \"url\",\n \"video\",\n )\n\n active_stamp = TimestampField(\n source=\"uploaded_on\", required=False, allow_null=True, read_only=True\n )\n url = serializers.SerializerMethodField()\n source_url = serializers.SerializerMethodField()\n # Make sure video UUID is converted to a string during serialization\n video = serializers.PrimaryKeyRelatedField(\n read_only=True, pk_field=serializers.CharField()\n )\n is_ready_to_show = serializers.BooleanField(read_only=True)\n\n def create(self, validated_data):\n \"\"\"Force the video field to the video of the JWT Token if any.\n\n Parameters\n ----------\n validated_data : dictionary\n Dictionary of the deserialized values of each field after validation.\n\n Returns\n -------\n dictionary\n The \"validated_data\" dictionary is returned after modification.\n\n \"\"\"\n # user here is a video as it comes from the JWT\n # It is named \"user\" by convention in the `rest_framework_simplejwt` dependency we use.\n user = self.context[\"request\"].user\n # Set the video field from the payload if there is one and the user is identified\n # as a proper user object through access rights\n if (\n self.initial_data.get(\"video\")\n and user.token.get(\"user\")\n and user.token[\"resource_id\"] == user.token.get(\"user\", {}).get(\"id\")\n ):\n validated_data[\"video_id\"] = self.initial_data.get(\"video\")\n # If the user just has a token for a video, force the video ID on the timed text track\n if not validated_data.get(\"video_id\") and isinstance(user, TokenUser):\n validated_data[\"video_id\"] = user.id\n return super().create(validated_data)\n\n def _sign_url(self, url):\n \"\"\"Generate a presigned cloudfront url.\n\n Parameters\n ----------\n url: string\n The url to sign\n\n Returns:\n string\n The signed url\n\n \"\"\"\n date_less_than = timezone.now() + timedelta(\n seconds=settings.CLOUDFRONT_SIGNED_URLS_VALIDITY\n )\n cloudfront_signer = CloudFrontSigner(\n settings.CLOUDFRONT_ACCESS_KEY_ID, cloudfront_utils.rsa_signer\n )\n return cloudfront_signer.generate_presigned_url(\n url, date_less_than=date_less_than\n )\n\n def _generate_url(self, obj, object_path, extension=None, content_disposition=None):\n \"\"\"Generate an url to fetch a timed text track file depending on argument passed.\n\n Parameters:\n obj : Type[models.TimedTextTrack]\n The timed text track that we want to serialize\n\n object_patch: string\n The path in the path the timed text track is stored\n\n extension: string or None\n If the timed text track need an extension in the url, add it to the end\n\n content_disposition: string or None\n Add a response-content-disposition query string to url if present\n \"\"\"\n base = f\"{settings.AWS_S3_URL_PROTOCOL}://{settings.CLOUDFRONT_DOMAIN}/{obj.video.pk}\"\n stamp = time_utils.to_timestamp(obj.uploaded_on)\n mode = f\"_{obj.mode}\" if obj.mode else \"\"\n url = f\"{base}/{object_path}/{stamp}_{obj.language:s}{mode:s}\"\n if extension:\n url = f\"{url}.{extension}\"\n\n if content_disposition:\n url = f\"{url}?response-content-disposition={content_disposition}\"\n return url\n\n def get_source_url(self, obj):\n \"\"\"Source url of the timed text track, signed with a CloudFront key if activated.\n\n This is the url of the uploaded file without any modification.\n\n Parameters\n ----------\n obj : Type[models.TimedTextTrack]\n The timed text track that we want to serialize\n\n Returns\n -------\n string or None\n The url for the timed text track uploaded without modification.\n None if the timed text track is still not uploaded to S3 with success.\n\n \"\"\"\n if obj.uploaded_on and obj.extension:\n stamp = time_utils.to_timestamp(obj.uploaded_on)\n filename = f\"{slugify(obj.video.playlist.title)}_{stamp}.{obj.extension}\"\n url = self._generate_url(\n obj,\n \"timedtext/source\",\n content_disposition=quote_plus(f\"attachment; filename={filename}\"),\n )\n\n # Sign the url only if the functionality is activated\n if settings.CLOUDFRONT_SIGNED_URLS_ACTIVE:\n url = self._sign_url(url)\n return url\n return None\n\n def get_url(self, obj):\n \"\"\"Url of the timed text track, signed with a CloudFront key if activated.\n\n Parameters\n ----------\n obj : Type[models.TimedTextTrack]\n The timed text track that we want to serialize\n\n Returns\n -------\n string or None\n The url for the timed text track converted to vtt.\n None if the timed text track is still not uploaded to S3 with success.\n\n \"\"\"\n if obj.uploaded_on:\n\n url = self._generate_url(obj, \"timedtext\", extension=\"vtt\")\n\n # Sign the url only if the functionality is activated\n if settings.CLOUDFRONT_SIGNED_URLS_ACTIVE:\n url = self._sign_url(url)\n return url\n return None\n\n\nclass ThumbnailSerializer(serializers.ModelSerializer):\n \"\"\"Serializer to display a thumbnail.\"\"\"\n\n class Meta: # noqa\n model = Thumbnail\n fields = (\n \"active_stamp\",\n \"id\",\n \"is_ready_to_show\",\n \"upload_state\",\n \"urls\",\n \"video\",\n )\n read_only_fields = (\n \"active_stamp\",\n \"id\",\n \"is_ready_to_show\",\n \"upload_state\",\n \"urls\",\n \"video\",\n )\n\n active_stamp = TimestampField(\n source=\"uploaded_on\", required=False, allow_null=True, read_only=True\n )\n video = serializers.PrimaryKeyRelatedField(\n read_only=True, pk_field=serializers.CharField()\n )\n is_ready_to_show = serializers.BooleanField(read_only=True)\n urls = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Force the video field to the video of the JWT Token if any.\n\n Parameters\n ----------\n validated_data : dictionary\n Dictionary of the deserialized values of each field after validation.\n\n Returns\n -------\n dictionary\n The \"validated_data\" dictionary is returned after modification.\n\n \"\"\"\n # user here is a video as it comes from the JWT\n # It is named \"user\" by convention in the `rest_framework_simplejwt` dependency we use.\n user = self.context[\"request\"].user\n if not validated_data.get(\"video_id\") and isinstance(user, TokenUser):\n validated_data[\"video_id\"] = user.id\n return super().create(validated_data)\n\n def get_urls(self, obj):\n \"\"\"Urls of the thumbnail.\n\n Parameters\n ----------\n obj : Type[models.Thumbnail]\n The thumbnail that we want to serialize\n\n Returns\n -------\n Dict or None\n The urls for the thumbnail.\n None if the thumbnail is still not uploaded to S3 with success.\n\n \"\"\"\n if obj.uploaded_on:\n base = f\"{settings.AWS_S3_URL_PROTOCOL}://{settings.CLOUDFRONT_DOMAIN}/{obj.video.pk}\"\n urls = {}\n stamp = time_utils.to_timestamp(obj.uploaded_on)\n for resolution in settings.VIDEO_RESOLUTIONS:\n urls[resolution] = f\"{base}/thumbnails/{stamp}_{resolution}.jpg\"\n return urls\n return None\n\n\nclass UpdateLiveStateSerializer(serializers.Serializer):\n \"\"\"A serializer to validate data submitted on the UpdateLiveState API endpoint.\"\"\"\n\n state = serializers.ChoiceField(\n tuple(c for c in LIVE_CHOICES if c[0] in (RUNNING, STOPPED))\n )\n logGroupName = serializers.CharField()\n requestId = serializers.CharField()\n\n\nclass InitLiveStateSerializer(serializers.Serializer):\n \"\"\"A serializer to validate data submitted on the initiate-live API endpoint.\"\"\"\n\n type = serializers.ChoiceField(LIVE_TYPE_CHOICES)\n\n\nclass LivePairingSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for LivePairing model.\"\"\"\n\n class Meta: # noqa\n model = LivePairing\n fields = (\"secret\", \"expires_in\")\n read_only_fields = (\"secret\", \"expires_in\")\n\n expires_in = serializers.SerializerMethodField()\n\n # pylint: disable=unused-argument\n def get_expires_in(self, obj):\n \"\"\"Returns LivePairing expiration setting.\"\"\"\n return settings.LIVE_PAIRING_EXPIRATION_SECONDS\n\n\nclass PairingChallengeSerializer(serializers.Serializer):\n \"\"\"A serializer to validate data submitted on the PairingChallenge API endpoint.\"\"\"\n\n box_id = serializers.UUIDField()\n secret = serializers.CharField(min_length=6, max_length=6)\n\n\nclass LiveRegistrationSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for liveRegistration model.\"\"\"\n\n class Meta: # noqa\n model = LiveRegistration\n fields = (\n \"email\",\n \"id\",\n \"consumer_site\",\n \"lti_user_id\",\n \"should_send_reminders\",\n \"video\",\n )\n read_only_fields = (\n \"id\",\n \"consumer_site\",\n \"lti_user_id\",\n \"video\",\n )\n\n # Make sure video UUID is converted to a string during serialization\n video = serializers.PrimaryKeyRelatedField(\n read_only=True, pk_field=serializers.CharField()\n )\n\n def validate(self, attrs):\n \"\"\"Control or set data with token informations.\n\n Force the video field to the video of the JWT Token if any.\n Check email, if present in the token, is equal to the one in the request.\n Set lti informations if they are present in the token. Control integrity\n errors and set specific messages.\n\n Parameters\n ----------\n data : dictionary\n Dictionary of the deserialized values of each field after validation.\n\n Returns\n -------\n dictionary\n The \"data\" dictionary is returned after modification.\n\n \"\"\"\n # User here is a video as it comes from the JWT\n # It is named \"user\" by convention in the `rest_framework_simplejwt` dependency we use.\n user = self.context[\"request\"].user\n video = get_object_or_404(Video, pk=user.id)\n if video.is_scheduled is False:\n raise serializers.ValidationError(\n {\"video\": f\"video with id {user.id} doesn't accept registration.\"}\n )\n\n if not attrs.get(\"video_id\") and isinstance(user, TokenUser):\n attrs[\"video_id\"] = user.id\n # consumer_site is defined if context_id exists in the token\n attrs[\"consumer_site\"] = (\n Playlist.objects.get(\n lti_id=user.token.payload[\"context_id\"]\n ).consumer_site\n if user.token.payload.get(\"context_id\")\n else None\n )\n\n if user.token.payload.get(\"user\"):\n attrs[\"lti_user_id\"] = user.token.payload[\"user\"][\"id\"]\n\n # If email is present in token, we make sure the one sent is the one expected\n if user.token.payload[\"user\"].get(\"email\"):\n if attrs[\"email\"] != user.token.payload[\"user\"].get(\"email\"):\n raise serializers.ValidationError(\n {\n \"email\": \"You are not authorized to register with a specific email\"\n f\" {attrs['email']}. You can only use the email from your \"\n \"authentication.\"\n }\n )\n\n # We can identify the user for this context_id, we make sure this user hasn't\n # already registered for this video. It's only relevant if context_id is defined.\n if (\n user.token.payload.get(\"context_id\")\n and LiveRegistration.objects.filter(\n consumer_site=attrs[\"consumer_site\"],\n deleted=None,\n lti_user_id=attrs[\"lti_user_id\"],\n video=video,\n ).exists()\n ):\n raise serializers.ValidationError(\n {\n \"lti_user_id\": \"This identified user is already \"\n \"registered for this video and consumer site.\"\n }\n )\n\n # Controls this email hasn't already been used for this video and this consumer\n # site. Consumer site can be defined or not, in both case, it will raise the same\n # error.\n if LiveRegistration.objects.filter(\n consumer_site=attrs[\"consumer_site\"],\n deleted=None,\n email=attrs[\"email\"],\n video=video,\n ).exists():\n raise serializers.ValidationError(\n {\n \"email\": f\"{attrs['email']} is already \"\n \"registered for this video and consumer site.\"\n }\n )\n\n return super().validate(attrs)\n\n\nclass VideoBaseSerializer(serializers.ModelSerializer):\n \"\"\"Base Serializer to factorize common Video attributes.\"\"\"\n\n class Meta: # noqa\n model = Video\n fields = (\n \"urls\",\n \"thumbnail\",\n \"is_ready_to_show\",\n )\n read_only_fields = (\n \"urls\",\n \"is_ready_to_show\",\n )\n\n urls = serializers.SerializerMethodField()\n thumbnail = ThumbnailSerializer(read_only=True, allow_null=True)\n is_ready_to_show = serializers.BooleanField(read_only=True)\n\n def get_urls(self, obj):\n \"\"\"Urls of the video for each type of encoding.\n\n Parameters\n ----------\n obj : Type[models.Video]\n The video that we want to serialize\n\n Returns\n -------\n Dictionary or None\n A dictionary of all urls for:\n - mp4 encodings of the video in each resolution\n - jpeg thumbnails of the video in each resolution\n - manifest of the HLS encodings of the video\n For a video in live mode only the HLS url is added\n None if the video is still not uploaded to S3 with success\n\n \"\"\"\n if obj.live_info is not None:\n # Adaptive Bit Rate manifests\n return {\n \"manifests\": {\n \"hls\": obj.live_info[\"mediapackage\"][\"endpoints\"][\"hls\"][\"url\"],\n },\n \"mp4\": {},\n \"thumbnails\": {},\n }\n\n if obj.uploaded_on is None:\n return None\n\n thumbnail_urls = {}\n try:\n thumbnail = obj.thumbnail\n except Thumbnail.DoesNotExist:\n pass\n else:\n if thumbnail.uploaded_on is not None:\n thumbnail_serialized = ThumbnailSerializer(thumbnail)\n thumbnail_urls.update(thumbnail_serialized.data.get(\"urls\"))\n\n urls = {\"mp4\": {}, \"thumbnails\": {}}\n\n base = f\"{settings.AWS_S3_URL_PROTOCOL}://{settings.CLOUDFRONT_DOMAIN}/{obj.pk}\"\n stamp = time_utils.to_timestamp(obj.uploaded_on)\n\n date_less_than = timezone.now() + timedelta(\n seconds=settings.CLOUDFRONT_SIGNED_URLS_VALIDITY\n )\n filename = f\"{slugify(obj.playlist.title)}_{stamp}.mp4\"\n content_disposition = quote_plus(f\"attachment; filename={filename}\")\n for resolution in obj.resolutions:\n # MP4\n mp4_url = (\n f\"{base}/mp4/{stamp}_{resolution}.mp4\"\n f\"?response-content-disposition={content_disposition}\"\n )\n\n # Thumbnails\n urls[\"thumbnails\"][resolution] = thumbnail_urls.get(\n resolution,\n f\"{base}/thumbnails/{stamp}_{resolution}.0000000.jpg\",\n )\n\n # Sign the urls of mp4 videos only if the functionality is activated\n if settings.CLOUDFRONT_SIGNED_URLS_ACTIVE:\n cloudfront_signer = CloudFrontSigner(\n settings.CLOUDFRONT_ACCESS_KEY_ID, cloudfront_utils.rsa_signer\n )\n mp4_url = cloudfront_signer.generate_presigned_url(\n mp4_url, date_less_than=date_less_than\n )\n\n urls[\"mp4\"][resolution] = mp4_url\n\n # Adaptive Bit Rate manifests\n urls[\"manifests\"] = {\n \"hls\": f\"{base}/cmaf/{stamp}.m3u8\",\n }\n\n # Previews\n urls[\"previews\"] = f\"{base}/previews/{stamp}_100.jpg\"\n\n return urls\n\n\nclass VideoSerializer(VideoBaseSerializer):\n \"\"\"Serializer to display a video model with all its resolution options.\"\"\"\n\n class Meta: # noqa\n model = Video\n fields = (\n \"active_stamp\",\n \"description\",\n \"id\",\n \"is_ready_to_show\",\n \"is_scheduled\",\n \"timed_text_tracks\",\n \"thumbnail\",\n \"title\",\n \"upload_state\",\n \"urls\",\n \"show_download\",\n \"should_use_subtitle_as_transcript\",\n \"starting_at\",\n \"has_transcript\",\n \"playlist\",\n \"live_info\",\n \"live_state\",\n \"live_type\",\n \"xmpp\",\n )\n read_only_fields = (\n \"id\",\n \"active_stamp\",\n \"is_ready_to_show\",\n \"is_scheduled\",\n \"urls\",\n \"has_transcript\",\n \"live_info\",\n \"live_state\",\n )\n\n active_stamp = TimestampField(\n source=\"uploaded_on\", required=False, allow_null=True, read_only=True\n )\n timed_text_tracks = TimedTextTrackSerializer(\n source=\"timedtexttracks\", many=True, read_only=True\n )\n playlist = PlaylistLiteSerializer(read_only=True)\n has_transcript = serializers.SerializerMethodField()\n live_info = serializers.SerializerMethodField()\n xmpp = serializers.SerializerMethodField()\n\n def validate_starting_at(self, value):\n \"\"\"Add extra controls for starting_at field.\"\"\"\n # Field starting_at has a new value\n if value != self.instance.starting_at:\n # New value is past, it can't be updated\n if value is not None and value < timezone.now():\n raise serializers.ValidationError(\n f\"{value} is not a valid date, date should be planned after!\"\n )\n # Check live_state is in IDLE state as expected when scheduling a live\n if self.instance.live_state != IDLE:\n raise serializers.ValidationError(\n \"Field starting_at can't be changed, video live is \"\n \"not in default mode.\"\n )\n # Initial value is already past, it can't be updated anymore\n if (\n self.instance.starting_at is not None\n and self.instance.starting_at < timezone.now()\n ):\n raise serializers.ValidationError(\n f\"Field starting_at {self.instance.starting_at} is already \"\n \"past and can't be updated!\"\n )\n\n return value\n\n def get_xmpp(self, obj):\n \"\"\"Chat info.\n\n Parameters\n ----------\n obj : Type[models.Video]\n The video that we want to serialize\n\n Returns\n -------\n Dictionnary\n A dictionary containing all info needed to manage a connection to a xmpp server.\n \"\"\"\n user_id = self.context.get(\"user\", {}).get(\"id\") or self.context.get(\n \"session_id\"\n )\n if settings.LIVE_CHAT_ENABLED and user_id and obj.live_state is not None:\n roles = self.context.get(\"roles\", [])\n is_admin = bool(LTI_ROLES[ADMINISTRATOR] & set(roles))\n is_instructor = bool(LTI_ROLES[INSTRUCTOR] & set(roles))\n token = xmpp_utils.generate_jwt(\n str(obj.id),\n user_id,\n \"owner\" if is_admin or is_instructor else \"member\",\n timezone.now() + timedelta(days=1),\n )\n\n return {\n \"bosh_url\": xmpp_utils.add_jwt_token_to_url(\n settings.XMPP_BOSH_URL, token\n ),\n \"websocket_url\": xmpp_utils.add_jwt_token_to_url(\n settings.XMPP_WEBSOCKET_URL, token\n ),\n \"conference_url\": f\"{obj.id}@{settings.XMPP_CONFERENCE_DOMAIN}\",\n \"jid\": settings.XMPP_DOMAIN,\n }\n\n return None\n\n def get_live_info(self, obj):\n \"\"\"Live streaming informations.\n\n Parameters\n ----------\n obj : Type[models.Video]\n The video that we want to serialize\n\n Returns\n -------\n Dictionnary\n A dictionnary containing all info needed to manage a live stream for an admin.\n For other users, an empty dictionnary is returned.\n The data are filtered to only return RTMP endpoints and jitsi configuration if needed.\n All other data are sensitive, used only by the backend and must never be exposed.\n \"\"\"\n can_return_live_info = self.context.get(\"can_return_live_info\", False)\n\n if obj.live_state is None:\n return {}\n\n live_info = {}\n\n if obj.live_info is not None and obj.live_info.get(\"paused_at\"):\n live_info.update({\"paused_at\": obj.live_info[\"paused_at\"]})\n\n if can_return_live_info is False:\n return live_info\n\n if obj.live_info is not None:\n live_info.update(\n {\n \"medialive\": {\n \"input\": {\n \"endpoints\": obj.live_info[\"medialive\"][\"input\"][\n \"endpoints\"\n ],\n }\n },\n }\n )\n\n if obj.live_type == JITSI:\n live_info.update(\n {\n \"jitsi\": {\n \"external_api_url\": settings.JITSI_EXTERNAL_API_URL,\n \"domain\": settings.JITSI_DOMAIN,\n \"config_overwrite\": settings.JITSI_CONFIG_OVERWRITE,\n \"interface_config_overwrite\": settings.JITSI_INTERFACE_CONFIG_OVERWRITE,\n }\n }\n )\n\n return live_info\n\n def get_has_transcript(self, obj):\n \"\"\"Compute if should_use_subtitle_as_transcript behavior is disabled.\n\n Parameters\n ----------\n obj : Type[models.Video]\n The video that we want to serialize\n\n Returns\n -------\n Boolean\n If there is at least one transcript ready to be shown the method will return True.\n Returns False otherwise.\n \"\"\"\n return obj.timedtexttracks.filter(mode=\"ts\", uploaded_on__isnull=False).exists()\n\n\nclass VideoSelectLTISerializer(VideoBaseSerializer):\n \"\"\"A serializer to display a Video resource for LTI select content request.\"\"\"\n\n class Meta: # noqa\n model = Video\n fields = (\n \"id\",\n \"is_ready_to_show\",\n \"thumbnail\",\n \"title\",\n \"upload_state\",\n \"urls\",\n \"lti_url\",\n )\n read_only_fields = (\n \"id\",\n \"is_ready_to_show\",\n \"thumbnail\",\n \"title\",\n \"upload_state\",\n \"urls\",\n \"lti_url\",\n )\n\n lti_url = serializers.SerializerMethodField()\n\n def get_lti_url(self, obj):\n \"\"\"LTI Url of the Video.\n\n Parameters\n ----------\n obj : Type[models.Video]\n The document that we want to serialize\n\n Returns\n -------\n String\n the LTI url to be used by LTI consumers\n\n \"\"\"\n return build_absolute_uri_behind_proxy(\n self.context[\"request\"],\n reverse(\"video_lti_view\", args=[obj.id]),\n )\n", "path": "src/backend/marsha/core/serializers/video.py" } ]
[ { "content": "\"\"\"Structure of Video related models API responses with Django Rest Framework serializers.\"\"\"\nfrom datetime import timedelta\nfrom urllib.parse import quote_plus\n\nfrom django.conf import settings\nfrom django.shortcuts import get_object_or_404\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.text import slugify\n\nfrom botocore.signers import CloudFrontSigner\nfrom rest_framework import serializers\nfrom rest_framework_simplejwt.models import TokenUser\n\nfrom ..defaults import IDLE, JITSI, LIVE_CHOICES, LIVE_TYPE_CHOICES, RUNNING, STOPPED\nfrom ..models import (\n LivePairing,\n LiveRegistration,\n Playlist,\n Thumbnail,\n TimedTextTrack,\n Video,\n)\nfrom ..models.account import ADMINISTRATOR, INSTRUCTOR, LTI_ROLES\nfrom ..utils import cloudfront_utils, time_utils, xmpp_utils\nfrom ..utils.url_utils import build_absolute_uri_behind_proxy\nfrom .base import TimestampField\nfrom .playlist import PlaylistLiteSerializer\n\n\nclass TimedTextTrackSerializer(serializers.ModelSerializer):\n \"\"\"Serializer to display a timed text track model.\"\"\"\n\n class Meta: # noqa\n model = TimedTextTrack\n fields = (\n \"active_stamp\",\n \"id\",\n \"is_ready_to_show\",\n \"mode\",\n \"language\",\n \"upload_state\",\n \"url\",\n \"source_url\",\n \"video\",\n )\n read_only_fields = (\n \"id\",\n \"active_stamp\",\n \"is_ready_to_show\",\n \"upload_state\",\n \"url\",\n \"video\",\n )\n\n active_stamp = TimestampField(\n source=\"uploaded_on\", required=False, allow_null=True, read_only=True\n )\n url = serializers.SerializerMethodField()\n source_url = serializers.SerializerMethodField()\n # Make sure video UUID is converted to a string during serialization\n video = serializers.PrimaryKeyRelatedField(\n read_only=True, pk_field=serializers.CharField()\n )\n is_ready_to_show = serializers.BooleanField(read_only=True)\n\n def create(self, validated_data):\n \"\"\"Force the video field to the video of the JWT Token if any.\n\n Parameters\n ----------\n validated_data : dictionary\n Dictionary of the deserialized values of each field after validation.\n\n Returns\n -------\n dictionary\n The \"validated_data\" dictionary is returned after modification.\n\n \"\"\"\n # user here is a video as it comes from the JWT\n # It is named \"user\" by convention in the `rest_framework_simplejwt` dependency we use.\n user = self.context[\"request\"].user\n # Set the video field from the payload if there is one and the user is identified\n # as a proper user object through access rights\n if (\n self.initial_data.get(\"video\")\n and user.token.get(\"user\")\n and user.token[\"resource_id\"] == user.token.get(\"user\", {}).get(\"id\")\n ):\n validated_data[\"video_id\"] = self.initial_data.get(\"video\")\n # If the user just has a token for a video, force the video ID on the timed text track\n if not validated_data.get(\"video_id\") and isinstance(user, TokenUser):\n validated_data[\"video_id\"] = user.id\n return super().create(validated_data)\n\n def _sign_url(self, url):\n \"\"\"Generate a presigned cloudfront url.\n\n Parameters\n ----------\n url: string\n The url to sign\n\n Returns:\n string\n The signed url\n\n \"\"\"\n date_less_than = timezone.now() + timedelta(\n seconds=settings.CLOUDFRONT_SIGNED_URLS_VALIDITY\n )\n cloudfront_signer = CloudFrontSigner(\n settings.CLOUDFRONT_ACCESS_KEY_ID, cloudfront_utils.rsa_signer\n )\n return cloudfront_signer.generate_presigned_url(\n url, date_less_than=date_less_than\n )\n\n def _generate_url(self, obj, object_path, extension=None, content_disposition=None):\n \"\"\"Generate an url to fetch a timed text track file depending on argument passed.\n\n Parameters:\n obj : Type[models.TimedTextTrack]\n The timed text track that we want to serialize\n\n object_patch: string\n The path in the path the timed text track is stored\n\n extension: string or None\n If the timed text track need an extension in the url, add it to the end\n\n content_disposition: string or None\n Add a response-content-disposition query string to url if present\n \"\"\"\n base = f\"{settings.AWS_S3_URL_PROTOCOL}://{settings.CLOUDFRONT_DOMAIN}/{obj.video.pk}\"\n stamp = time_utils.to_timestamp(obj.uploaded_on)\n mode = f\"_{obj.mode}\" if obj.mode else \"\"\n url = f\"{base}/{object_path}/{stamp}_{obj.language:s}{mode:s}\"\n if extension:\n url = f\"{url}.{extension}\"\n\n if content_disposition:\n url = f\"{url}?response-content-disposition={content_disposition}\"\n return url\n\n def get_source_url(self, obj):\n \"\"\"Source url of the timed text track, signed with a CloudFront key if activated.\n\n This is the url of the uploaded file without any modification.\n\n Parameters\n ----------\n obj : Type[models.TimedTextTrack]\n The timed text track that we want to serialize\n\n Returns\n -------\n string or None\n The url for the timed text track uploaded without modification.\n None if the timed text track is still not uploaded to S3 with success.\n\n \"\"\"\n if obj.uploaded_on and obj.extension:\n stamp = time_utils.to_timestamp(obj.uploaded_on)\n filename = f\"{slugify(obj.video.playlist.title)}_{stamp}.{obj.extension}\"\n url = self._generate_url(\n obj,\n \"timedtext/source\",\n content_disposition=quote_plus(f\"attachment; filename={filename}\"),\n )\n\n # Sign the url only if the functionality is activated\n if settings.CLOUDFRONT_SIGNED_URLS_ACTIVE:\n url = self._sign_url(url)\n return url\n return None\n\n def get_url(self, obj):\n \"\"\"Url of the timed text track, signed with a CloudFront key if activated.\n\n Parameters\n ----------\n obj : Type[models.TimedTextTrack]\n The timed text track that we want to serialize\n\n Returns\n -------\n string or None\n The url for the timed text track converted to vtt.\n None if the timed text track is still not uploaded to S3 with success.\n\n \"\"\"\n if obj.uploaded_on:\n\n url = self._generate_url(obj, \"timedtext\", extension=\"vtt\")\n\n # Sign the url only if the functionality is activated\n if settings.CLOUDFRONT_SIGNED_URLS_ACTIVE:\n url = self._sign_url(url)\n return url\n return None\n\n\nclass ThumbnailSerializer(serializers.ModelSerializer):\n \"\"\"Serializer to display a thumbnail.\"\"\"\n\n class Meta: # noqa\n model = Thumbnail\n fields = (\n \"active_stamp\",\n \"id\",\n \"is_ready_to_show\",\n \"upload_state\",\n \"urls\",\n \"video\",\n )\n read_only_fields = (\n \"active_stamp\",\n \"id\",\n \"is_ready_to_show\",\n \"upload_state\",\n \"urls\",\n \"video\",\n )\n\n active_stamp = TimestampField(\n source=\"uploaded_on\", required=False, allow_null=True, read_only=True\n )\n video = serializers.PrimaryKeyRelatedField(\n read_only=True, pk_field=serializers.CharField()\n )\n is_ready_to_show = serializers.BooleanField(read_only=True)\n urls = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Force the video field to the video of the JWT Token if any.\n\n Parameters\n ----------\n validated_data : dictionary\n Dictionary of the deserialized values of each field after validation.\n\n Returns\n -------\n dictionary\n The \"validated_data\" dictionary is returned after modification.\n\n \"\"\"\n # user here is a video as it comes from the JWT\n # It is named \"user\" by convention in the `rest_framework_simplejwt` dependency we use.\n user = self.context[\"request\"].user\n if not validated_data.get(\"video_id\") and isinstance(user, TokenUser):\n validated_data[\"video_id\"] = user.id\n return super().create(validated_data)\n\n def get_urls(self, obj):\n \"\"\"Urls of the thumbnail.\n\n Parameters\n ----------\n obj : Type[models.Thumbnail]\n The thumbnail that we want to serialize\n\n Returns\n -------\n Dict or None\n The urls for the thumbnail.\n None if the thumbnail is still not uploaded to S3 with success.\n\n \"\"\"\n if obj.uploaded_on:\n base = f\"{settings.AWS_S3_URL_PROTOCOL}://{settings.CLOUDFRONT_DOMAIN}/{obj.video.pk}\"\n urls = {}\n stamp = time_utils.to_timestamp(obj.uploaded_on)\n for resolution in settings.VIDEO_RESOLUTIONS:\n urls[resolution] = f\"{base}/thumbnails/{stamp}_{resolution}.jpg\"\n return urls\n return None\n\n\nclass UpdateLiveStateSerializer(serializers.Serializer):\n \"\"\"A serializer to validate data submitted on the UpdateLiveState API endpoint.\"\"\"\n\n state = serializers.ChoiceField(\n tuple(c for c in LIVE_CHOICES if c[0] in (RUNNING, STOPPED))\n )\n logGroupName = serializers.CharField()\n requestId = serializers.CharField()\n\n\nclass InitLiveStateSerializer(serializers.Serializer):\n \"\"\"A serializer to validate data submitted on the initiate-live API endpoint.\"\"\"\n\n type = serializers.ChoiceField(LIVE_TYPE_CHOICES)\n\n\nclass LivePairingSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for LivePairing model.\"\"\"\n\n class Meta: # noqa\n model = LivePairing\n fields = (\"secret\", \"expires_in\")\n read_only_fields = (\"secret\", \"expires_in\")\n\n expires_in = serializers.SerializerMethodField()\n\n # pylint: disable=unused-argument\n def get_expires_in(self, obj):\n \"\"\"Returns LivePairing expiration setting.\"\"\"\n return settings.LIVE_PAIRING_EXPIRATION_SECONDS\n\n\nclass PairingChallengeSerializer(serializers.Serializer):\n \"\"\"A serializer to validate data submitted on the PairingChallenge API endpoint.\"\"\"\n\n box_id = serializers.UUIDField()\n secret = serializers.CharField(min_length=6, max_length=6)\n\n\nclass LiveRegistrationSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for liveRegistration model.\"\"\"\n\n class Meta: # noqa\n model = LiveRegistration\n fields = (\n \"email\",\n \"id\",\n \"consumer_site\",\n \"lti_user_id\",\n \"should_send_reminders\",\n \"video\",\n )\n read_only_fields = (\n \"id\",\n \"consumer_site\",\n \"lti_user_id\",\n \"video\",\n )\n\n # Make sure video UUID is converted to a string during serialization\n video = serializers.PrimaryKeyRelatedField(\n read_only=True, pk_field=serializers.CharField()\n )\n\n def validate(self, attrs):\n \"\"\"Control or set data with token informations.\n\n Force the video field to the video of the JWT Token if any.\n Check email, if present in the token, is equal to the one in the request.\n Set lti informations if they are present in the token. Control integrity\n errors and set specific messages.\n\n Parameters\n ----------\n data : dictionary\n Dictionary of the deserialized values of each field after validation.\n\n Returns\n -------\n dictionary\n The \"data\" dictionary is returned after modification.\n\n \"\"\"\n # User here is a video as it comes from the JWT\n # It is named \"user\" by convention in the `rest_framework_simplejwt` dependency we use.\n user = self.context[\"request\"].user\n video = get_object_or_404(Video, pk=user.id)\n if video.is_scheduled is False:\n raise serializers.ValidationError(\n {\"video\": f\"video with id {user.id} doesn't accept registration.\"}\n )\n\n if not attrs.get(\"video_id\") and isinstance(user, TokenUser):\n attrs[\"video_id\"] = user.id\n # consumer_site is defined if context_id exists in the token\n attrs[\"consumer_site\"] = (\n Playlist.objects.get(\n lti_id=user.token.payload[\"context_id\"]\n ).consumer_site\n if user.token.payload.get(\"context_id\")\n else None\n )\n\n if user.token.payload.get(\"user\"):\n attrs[\"lti_user_id\"] = user.token.payload[\"user\"][\"id\"]\n\n # If email is present in token, we make sure the one sent is the one expected\n if user.token.payload[\"user\"].get(\"email\"):\n if attrs[\"email\"] != user.token.payload[\"user\"].get(\"email\"):\n raise serializers.ValidationError(\n {\n \"email\": \"You are not authorized to register with a specific email\"\n f\" {attrs['email']}. You can only use the email from your \"\n \"authentication.\"\n }\n )\n\n # We can identify the user for this context_id, we make sure this user hasn't\n # already registered for this video. It's only relevant if context_id is defined.\n if (\n user.token.payload.get(\"context_id\")\n and LiveRegistration.objects.filter(\n consumer_site=attrs[\"consumer_site\"],\n deleted=None,\n lti_user_id=attrs[\"lti_user_id\"],\n video=video,\n ).exists()\n ):\n raise serializers.ValidationError(\n {\n \"lti_user_id\": \"This identified user is already \"\n \"registered for this video and consumer site.\"\n }\n )\n\n # Controls this email hasn't already been used for this video and this consumer\n # site. Consumer site can be defined or not, in both case, it will raise the same\n # error.\n if LiveRegistration.objects.filter(\n consumer_site=attrs[\"consumer_site\"],\n deleted=None,\n email=attrs[\"email\"],\n video=video,\n ).exists():\n raise serializers.ValidationError(\n {\n \"email\": f\"{attrs['email']} is already \"\n \"registered for this video and consumer site.\"\n }\n )\n\n return super().validate(attrs)\n\n\nclass VideoBaseSerializer(serializers.ModelSerializer):\n \"\"\"Base Serializer to factorize common Video attributes.\"\"\"\n\n class Meta: # noqa\n model = Video\n fields = (\n \"urls\",\n \"thumbnail\",\n \"is_ready_to_show\",\n )\n read_only_fields = (\n \"urls\",\n \"is_ready_to_show\",\n )\n\n urls = serializers.SerializerMethodField()\n thumbnail = ThumbnailSerializer(read_only=True, allow_null=True)\n is_ready_to_show = serializers.BooleanField(read_only=True)\n\n def get_urls(self, obj):\n \"\"\"Urls of the video for each type of encoding.\n\n Parameters\n ----------\n obj : Type[models.Video]\n The video that we want to serialize\n\n Returns\n -------\n Dictionary or None\n A dictionary of all urls for:\n - mp4 encodings of the video in each resolution\n - jpeg thumbnails of the video in each resolution\n - manifest of the HLS encodings of the video\n For a video in live mode only the HLS url is added\n None if the video is still not uploaded to S3 with success\n\n \"\"\"\n if obj.live_info is not None:\n # Adaptive Bit Rate manifests\n return {\n \"manifests\": {\n \"hls\": obj.live_info[\"mediapackage\"][\"endpoints\"][\"hls\"][\"url\"],\n },\n \"mp4\": {},\n \"thumbnails\": {},\n }\n\n if obj.uploaded_on is None:\n return None\n\n thumbnail_urls = {}\n try:\n thumbnail = obj.thumbnail\n except Thumbnail.DoesNotExist:\n pass\n else:\n if thumbnail.uploaded_on is not None:\n thumbnail_serialized = ThumbnailSerializer(thumbnail)\n thumbnail_urls.update(thumbnail_serialized.data.get(\"urls\"))\n\n urls = {\"mp4\": {}, \"thumbnails\": {}}\n\n base = f\"{settings.AWS_S3_URL_PROTOCOL}://{settings.CLOUDFRONT_DOMAIN}/{obj.pk}\"\n stamp = time_utils.to_timestamp(obj.uploaded_on)\n\n date_less_than = timezone.now() + timedelta(\n seconds=settings.CLOUDFRONT_SIGNED_URLS_VALIDITY\n )\n filename = f\"{slugify(obj.playlist.title)}_{stamp}.mp4\"\n content_disposition = quote_plus(f\"attachment; filename={filename}\")\n for resolution in obj.resolutions:\n # MP4\n mp4_url = (\n f\"{base}/mp4/{stamp}_{resolution}.mp4\"\n f\"?response-content-disposition={content_disposition}\"\n )\n\n # Thumbnails\n urls[\"thumbnails\"][resolution] = thumbnail_urls.get(\n resolution,\n f\"{base}/thumbnails/{stamp}_{resolution}.0000000.jpg\",\n )\n\n # Sign the urls of mp4 videos only if the functionality is activated\n if settings.CLOUDFRONT_SIGNED_URLS_ACTIVE:\n cloudfront_signer = CloudFrontSigner(\n settings.CLOUDFRONT_ACCESS_KEY_ID, cloudfront_utils.rsa_signer\n )\n mp4_url = cloudfront_signer.generate_presigned_url(\n mp4_url, date_less_than=date_less_than\n )\n\n urls[\"mp4\"][resolution] = mp4_url\n\n # Adaptive Bit Rate manifests\n urls[\"manifests\"] = {\n \"hls\": f\"{base}/cmaf/{stamp}.m3u8\",\n }\n\n # Previews\n urls[\"previews\"] = f\"{base}/previews/{stamp}_100.jpg\"\n\n return urls\n\n\nclass VideoSerializer(VideoBaseSerializer):\n \"\"\"Serializer to display a video model with all its resolution options.\"\"\"\n\n class Meta: # noqa\n model = Video\n fields = (\n \"active_stamp\",\n \"description\",\n \"id\",\n \"is_public\",\n \"is_ready_to_show\",\n \"is_scheduled\",\n \"timed_text_tracks\",\n \"thumbnail\",\n \"title\",\n \"upload_state\",\n \"urls\",\n \"show_download\",\n \"should_use_subtitle_as_transcript\",\n \"starting_at\",\n \"has_transcript\",\n \"playlist\",\n \"live_info\",\n \"live_state\",\n \"live_type\",\n \"xmpp\",\n )\n read_only_fields = (\n \"id\",\n \"active_stamp\",\n \"is_ready_to_show\",\n \"is_scheduled\",\n \"urls\",\n \"has_transcript\",\n \"live_info\",\n \"live_state\",\n )\n\n active_stamp = TimestampField(\n source=\"uploaded_on\", required=False, allow_null=True, read_only=True\n )\n timed_text_tracks = TimedTextTrackSerializer(\n source=\"timedtexttracks\", many=True, read_only=True\n )\n playlist = PlaylistLiteSerializer(read_only=True)\n has_transcript = serializers.SerializerMethodField()\n live_info = serializers.SerializerMethodField()\n xmpp = serializers.SerializerMethodField()\n\n def validate_starting_at(self, value):\n \"\"\"Add extra controls for starting_at field.\"\"\"\n # Field starting_at has a new value\n if value != self.instance.starting_at:\n # New value is past, it can't be updated\n if value is not None and value < timezone.now():\n raise serializers.ValidationError(\n f\"{value} is not a valid date, date should be planned after!\"\n )\n # Check live_state is in IDLE state as expected when scheduling a live\n if self.instance.live_state != IDLE:\n raise serializers.ValidationError(\n \"Field starting_at can't be changed, video live is \"\n \"not in default mode.\"\n )\n # Initial value is already past, it can't be updated anymore\n if (\n self.instance.starting_at is not None\n and self.instance.starting_at < timezone.now()\n ):\n raise serializers.ValidationError(\n f\"Field starting_at {self.instance.starting_at} is already \"\n \"past and can't be updated!\"\n )\n\n return value\n\n def get_xmpp(self, obj):\n \"\"\"Chat info.\n\n Parameters\n ----------\n obj : Type[models.Video]\n The video that we want to serialize\n\n Returns\n -------\n Dictionnary\n A dictionary containing all info needed to manage a connection to a xmpp server.\n \"\"\"\n user_id = self.context.get(\"user\", {}).get(\"id\") or self.context.get(\n \"session_id\"\n )\n if settings.LIVE_CHAT_ENABLED and user_id and obj.live_state is not None:\n roles = self.context.get(\"roles\", [])\n is_admin = bool(LTI_ROLES[ADMINISTRATOR] & set(roles))\n is_instructor = bool(LTI_ROLES[INSTRUCTOR] & set(roles))\n token = xmpp_utils.generate_jwt(\n str(obj.id),\n user_id,\n \"owner\" if is_admin or is_instructor else \"member\",\n timezone.now() + timedelta(days=1),\n )\n\n return {\n \"bosh_url\": xmpp_utils.add_jwt_token_to_url(\n settings.XMPP_BOSH_URL, token\n ),\n \"websocket_url\": xmpp_utils.add_jwt_token_to_url(\n settings.XMPP_WEBSOCKET_URL, token\n ),\n \"conference_url\": f\"{obj.id}@{settings.XMPP_CONFERENCE_DOMAIN}\",\n \"jid\": settings.XMPP_DOMAIN,\n }\n\n return None\n\n def get_live_info(self, obj):\n \"\"\"Live streaming informations.\n\n Parameters\n ----------\n obj : Type[models.Video]\n The video that we want to serialize\n\n Returns\n -------\n Dictionnary\n A dictionnary containing all info needed to manage a live stream for an admin.\n For other users, an empty dictionnary is returned.\n The data are filtered to only return RTMP endpoints and jitsi configuration if needed.\n All other data are sensitive, used only by the backend and must never be exposed.\n \"\"\"\n can_return_live_info = self.context.get(\"can_return_live_info\", False)\n\n if obj.live_state is None:\n return {}\n\n live_info = {}\n\n if obj.live_info is not None and obj.live_info.get(\"paused_at\"):\n live_info.update({\"paused_at\": obj.live_info[\"paused_at\"]})\n\n if can_return_live_info is False:\n return live_info\n\n if obj.live_info is not None:\n live_info.update(\n {\n \"medialive\": {\n \"input\": {\n \"endpoints\": obj.live_info[\"medialive\"][\"input\"][\n \"endpoints\"\n ],\n }\n },\n }\n )\n\n if obj.live_type == JITSI:\n live_info.update(\n {\n \"jitsi\": {\n \"external_api_url\": settings.JITSI_EXTERNAL_API_URL,\n \"domain\": settings.JITSI_DOMAIN,\n \"config_overwrite\": settings.JITSI_CONFIG_OVERWRITE,\n \"interface_config_overwrite\": settings.JITSI_INTERFACE_CONFIG_OVERWRITE,\n }\n }\n )\n\n return live_info\n\n def get_has_transcript(self, obj):\n \"\"\"Compute if should_use_subtitle_as_transcript behavior is disabled.\n\n Parameters\n ----------\n obj : Type[models.Video]\n The video that we want to serialize\n\n Returns\n -------\n Boolean\n If there is at least one transcript ready to be shown the method will return True.\n Returns False otherwise.\n \"\"\"\n return obj.timedtexttracks.filter(mode=\"ts\", uploaded_on__isnull=False).exists()\n\n\nclass VideoSelectLTISerializer(VideoBaseSerializer):\n \"\"\"A serializer to display a Video resource for LTI select content request.\"\"\"\n\n class Meta: # noqa\n model = Video\n fields = (\n \"id\",\n \"is_ready_to_show\",\n \"thumbnail\",\n \"title\",\n \"upload_state\",\n \"urls\",\n \"lti_url\",\n )\n read_only_fields = (\n \"id\",\n \"is_ready_to_show\",\n \"thumbnail\",\n \"title\",\n \"upload_state\",\n \"urls\",\n \"lti_url\",\n )\n\n lti_url = serializers.SerializerMethodField()\n\n def get_lti_url(self, obj):\n \"\"\"LTI Url of the Video.\n\n Parameters\n ----------\n obj : Type[models.Video]\n The document that we want to serialize\n\n Returns\n -------\n String\n the LTI url to be used by LTI consumers\n\n \"\"\"\n return build_absolute_uri_behind_proxy(\n self.context[\"request\"],\n reverse(\"video_lti_view\", args=[obj.id]),\n )\n", "path": "src/backend/marsha/core/serializers/video.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index 59da34e4d4..81348b263b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ Versioning](https://semver.org/spec/v2.0.0.html). - Add API endpoints to pair an external device to Jitsi live videos - Add a store in the frontend to control live layout - Add frontend components to pair an external device to Jitsi live videos +- Add public availability to video api ### Changed diff --git a/src/backend/marsha/core/serializers/video.py b/src/backend/marsha/core/serializers/video.py index 822fccdc3b..d494a98709 100644 --- a/src/backend/marsha/core/serializers/video.py +++ b/src/backend/marsha/core/serializers/video.py @@ -548,6 +548,7 @@ class Meta: # noqa "active_stamp", "description", "id", + "is_public", "is_ready_to_show", "is_scheduled", "timed_text_tracks", diff --git a/src/backend/marsha/core/tests/test_api_video.py b/src/backend/marsha/core/tests/test_api_video.py index cafc752ae0..535a01778f 100644 --- a/src/backend/marsha/core/tests/test_api_video.py +++ b/src/backend/marsha/core/tests/test_api_video.py @@ -226,6 +226,7 @@ def test_api_video_read_detail_token_user(self): "id": str(video.id), "title": video.title, "active_stamp": "1533686400", + "is_public": False, "is_ready_to_show": True, "is_scheduled": False, "show_download": True, @@ -337,6 +338,7 @@ def test_api_video_read_detail_token_user_no_active_stamp(self): "id": str(video.id), "title": video.title, "active_stamp": None, + "is_public": False, "is_ready_to_show": False, "is_scheduled": False, "show_download": True, @@ -389,6 +391,7 @@ def test_api_video_read_detail_token_user_not_sucessfully_uploaded(self): "id": str(video.id), "title": video.title, "active_stamp": None, + "is_public": False, "is_ready_to_show": False, "is_scheduled": False, "show_download": True, @@ -528,6 +531,7 @@ def test_api_video_read_detail_by_organization_admin(self): "description": video.description, "has_transcript": False, "id": str(video.id), + "is_public": False, "is_ready_to_show": False, "is_scheduled": False, "live_info": {}, @@ -602,6 +606,7 @@ def test_api_video_read_detail_by_playlist_admin(self): "description": video.description, "has_transcript": False, "id": str(video.id), + "is_public": False, "is_ready_to_show": False, "is_scheduled": False, "live_info": {}, @@ -726,6 +731,7 @@ def test_api_video_read_list_user_with_playlist_access(self): "description": video.description, "has_transcript": False, "id": str(video.id), + "is_public": False, "is_ready_to_show": False, "is_scheduled": False, "live_info": {}, @@ -803,6 +809,7 @@ def test_api_video_read_list_user_with_organization_access(self): "description": video_1.description, "has_transcript": False, "id": str(video_1.id), + "is_public": False, "is_ready_to_show": False, "is_scheduled": False, "live_info": {}, @@ -828,6 +835,7 @@ def test_api_video_read_list_user_with_organization_access(self): "description": video_2.description, "has_transcript": False, "id": str(video_2.id), + "is_public": False, "is_ready_to_show": False, "is_scheduled": False, "live_info": {}, @@ -919,6 +927,7 @@ def test_api_video_read_list_by_playlist_user_with_playlist_access(self): "description": video.description, "has_transcript": False, "id": str(video.id), + "is_public": False, "is_ready_to_show": False, "is_scheduled": False, "live_info": {}, @@ -985,6 +994,7 @@ def test_api_video_read_list_by_playlist_user_with_org_access(self): "description": video.description, "has_transcript": False, "id": str(video.id), + "is_public": False, "is_ready_to_show": False, "is_scheduled": False, "live_info": {}, @@ -1080,6 +1090,7 @@ def test_api_video_read_list_by_org_user_with_playlist_access(self): "description": video.description, "has_transcript": False, "id": str(video.id), + "is_public": False, "is_ready_to_show": False, "is_scheduled": False, "live_info": {}, @@ -1145,6 +1156,7 @@ def test_api_video_read_list_by_org_user_with_org_access(self): "description": video_1.description, "has_transcript": False, "id": str(video_1.id), + "is_public": False, "is_ready_to_show": False, "is_scheduled": False, "live_info": {}, @@ -1170,6 +1182,7 @@ def test_api_video_read_list_by_org_user_with_org_access(self): "description": video_2.description, "has_transcript": False, "id": str(video_2.id), + "is_public": False, "is_ready_to_show": False, "is_scheduled": False, "live_info": {}, @@ -1284,6 +1297,20 @@ def test_api_video_create_token_user_playlist_preexists(self): self.assertEqual(response.status_code, 401) self.assertFalse(models.Video.objects.exists()) + def test_api_video_create_student(self): + """Student users should not be able to create videos.""" + video = factories.VideoFactory() + jwt_token = AccessToken() + jwt_token.payload["resource_id"] = str(video.id) + jwt_token.payload["roles"] = ["student"] + jwt_token.payload["permissions"] = {"can_update": False} + response = self.client.post( + "/api/videos/", + HTTP_AUTHORIZATION=f"Bearer {jwt_token}", + ) + self.assertEqual(response.status_code, 403) + self.assertEqual(models.Video.objects.count(), 1) + def test_api_video_create_staff_or_user(self): """Users authenticated via a session should not be able to create videos.""" for user in [factories.UserFactory(), factories.UserFactory(is_staff=True)]: @@ -1333,6 +1360,7 @@ def test_api_video_create_by_playlist_admin(self): "description": "", "has_transcript": False, "id": str(models.Video.objects.get().id), + "is_public": False, "is_ready_to_show": False, "is_scheduled": False, "live_info": {}, @@ -1493,6 +1521,7 @@ def test_api_video_create_by_organization_admin(self): "description": "", "has_transcript": False, "id": str(models.Video.objects.get().id), + "is_public": False, "is_ready_to_show": False, "is_scheduled": False, "live_info": {}, @@ -1559,6 +1588,7 @@ def test_api_video_create_with_scheduled_date_gets_ignored(self): "description": "", "has_transcript": False, "id": str(models.Video.objects.get().id), + "is_public": False, "is_ready_to_show": False, "is_scheduled": False, "live_info": {}, @@ -1630,6 +1660,28 @@ def test_api_video_update_detail_anonymous(self): video.refresh_from_db() self.assertEqual(video.title, "my title") + def test_api_video_update_detail_student(self): + """Student users should not be allowed to update a video through the API.""" + video = factories.VideoFactory(title="my title") + jwt_token = AccessToken() + jwt_token.payload["resource_id"] = str(video.id) + jwt_token.payload["roles"] = ["student"] + + data = {"title": "my new title"} + response = self.client.put( + f"/api/videos/{video.id}/", + data, + content_type="application/json", + HTTP_AUTHORIZATION=f"Bearer {jwt_token}", + ) + self.assertEqual(response.status_code, 403) + content = json.loads(response.content) + self.assertEqual( + content, {"detail": "You do not have permission to perform this action."} + ) + video.refresh_from_db() + self.assertEqual(video.title, "my title") + def test_api_video_update_detail_token_user_title(self): """Token users should be able to update the title of their video through the API.""" video = factories.VideoFactory(title="my title") @@ -1886,6 +1938,41 @@ def test_api_video_instructor_update_video_in_read_only(self): ) self.assertEqual(response.status_code, 403) + def test_api_video_patch_video_anonymous(self): + """Anonymous users should not be allowed to patch a video through the API.""" + video = factories.VideoFactory(title="my title") + data = {"title": "my new title"} + response = self.client.patch( + f"/api/videos/{video.id}/", + data, + content_type="application/json", + ) + self.assertEqual(response.status_code, 401) + video.refresh_from_db() + self.assertEqual(video.title, "my title") + + def test_api_video_patch_video_student(self): + """Student users should not be allowed to patch a video through the API.""" + video = factories.VideoFactory(title="my title") + jwt_token = AccessToken() + jwt_token.payload["resource_id"] = str(video.id) + jwt_token.payload["roles"] = ["student"] + + data = {"title": "my new title"} + response = self.client.patch( + f"/api/videos/{video.id}/", + data, + content_type="application/json", + HTTP_AUTHORIZATION=f"Bearer {jwt_token}", + ) + self.assertEqual(response.status_code, 403) + content = json.loads(response.content) + self.assertEqual( + content, {"detail": "You do not have permission to perform this action."} + ) + video.refresh_from_db() + self.assertEqual(video.title, "my title") + def test_api_video_instructor_patch_video_in_read_only(self): """An instructor with read_only set to true should not be able to patch the video.""" video = factories.VideoFactory() @@ -1996,6 +2083,25 @@ def test_api_video_patch_detail_token_user_description(self): video.refresh_from_db() self.assertEqual(video.description, "my new description") + def test_api_video_patch_detail_token_user_is_public(self): + """Instructors and administrators should be able to + patch the public flag of their video through the API.""" + video = factories.VideoFactory(is_public=False) + jwt_token = AccessToken() + jwt_token.payload["resource_id"] = str(video.id) + jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])] + jwt_token.payload["permissions"] = {"can_update": True} + data = {"is_public": True} + response = self.client.patch( + f"/api/videos/{video.id}/", + data, + HTTP_AUTHORIZATION=f"Bearer {jwt_token}", + content_type="application/json", + ) + self.assertEqual(response.status_code, 200) + video.refresh_from_db() + self.assertTrue(video.is_public) + def test_api_video_patch_by_organization_instructor(self): """Organization instructors cannot patch videos on the API.""" user = factories.UserFactory() @@ -2472,6 +2578,24 @@ def test_api_video_delete_detail_token_user(self): self.assertEqual(response.status_code, 403) self.assertTrue(models.Video.objects.filter(id=video.id).exists()) + def test_api_video_delete_detail_student(self): + """Student users should not be able to delete a video.""" + video = factories.VideoFactory() + jwt_token = AccessToken() + jwt_token.payload["resource_id"] = str(video.id) + jwt_token.payload["roles"] = ["student"] + + response = self.client.delete( + f"/api/videos/{video.id}/", + HTTP_AUTHORIZATION=f"Bearer {jwt_token}", + ) + + self.assertEqual(response.status_code, 403) + content = json.loads(response.content) + self.assertEqual( + content, {"detail": "You do not have permission to perform this action."} + ) + def test_api_video_delete_detail_staff_or_user(self): """Users authenticated via a session should not be able to delete a video.""" video = factories.VideoFactory() @@ -3076,6 +3200,7 @@ def test_api_video_instructor_initiate_live(self): "id": str(video.id), "title": video.title, "active_stamp": None, + "is_public": False, "is_ready_to_show": True, "is_scheduled": False, "show_download": True, @@ -3125,6 +3250,7 @@ def test_api_video_instructor_initiate_jitsi_live(self): "id": str(video.id), "title": video.title, "active_stamp": None, + "is_public": False, "is_ready_to_show": True, "is_scheduled": False, "show_download": True, @@ -3226,6 +3352,7 @@ def test_api_instructor_start_non_created_live(self): "id": str(video.id), "title": video.title, "active_stamp": None, + "is_public": False, "is_ready_to_show": True, "is_scheduled": False, "show_download": True, @@ -3345,6 +3472,7 @@ def test_api_instructor_start_already_created_live(self): "id": str(video.id), "title": video.title, "active_stamp": None, + "is_public": False, "is_ready_to_show": True, "is_scheduled": False, "show_download": True, @@ -3547,6 +3675,7 @@ def test_api_video_instructor_stop_live(self): "id": str(video.id), "title": video.title, "active_stamp": None, + "is_public": False, "is_ready_to_show": True, "is_scheduled": False, "show_download": True, @@ -3710,6 +3839,7 @@ def test_api_video_instructor_end_idle_live(self): "id": str(video.id), "title": video.title, "active_stamp": None, + "is_public": False, "is_ready_to_show": False, "is_scheduled": False, "show_download": True, @@ -3791,6 +3921,7 @@ def test_api_video_instructor_end_paused_live(self): "id": str(video.id), "title": video.title, "active_stamp": None, + "is_public": False, "is_ready_to_show": True, "is_scheduled": False, "show_download": True, @@ -3895,6 +4026,7 @@ def test_api_video_instructor_end_paused_live_missing_manifest(self): "id": str(video.id), "title": video.title, "active_stamp": None, + "is_public": False, "is_ready_to_show": False, "is_scheduled": False, "show_download": True, diff --git a/src/backend/marsha/core/tests/test_views_lti_development.py b/src/backend/marsha/core/tests/test_views_lti_development.py index 76b8896c55..516f2ff90c 100644 --- a/src/backend/marsha/core/tests/test_views_lti_development.py +++ b/src/backend/marsha/core/tests/test_views_lti_development.py @@ -144,6 +144,7 @@ def test_views_lti_development_post_bypass_lti_instructor(self): context.get("resource"), { "active_stamp": None, + "is_public": False, "is_ready_to_show": False, "is_scheduled": False, "show_download": True, @@ -216,6 +217,7 @@ def test_views_lti_development_post_bypass_lti_instructor_no_video(self): context.get("resource"), { "active_stamp": None, + "is_public": False, "is_ready_to_show": False, "is_scheduled": False, "show_download": True, diff --git a/src/backend/marsha/core/tests/test_views_lti_video.py b/src/backend/marsha/core/tests/test_views_lti_video.py index c08c1f983e..f6484770c4 100644 --- a/src/backend/marsha/core/tests/test_views_lti_video.py +++ b/src/backend/marsha/core/tests/test_views_lti_video.py @@ -105,6 +105,7 @@ def test_views_lti_video_post_instructor(self, mock_get_consumer_site, mock_veri context.get("resource"), { "active_stamp": None, + "is_public": False, "is_ready_to_show": False, "is_scheduled": False, "show_download": True, @@ -227,6 +228,7 @@ def test_views_lti_video_instructor_live_mode_on( context.get("resource"), { "active_stamp": None, + "is_public": False, "is_ready_to_show": True, "is_scheduled": False, "show_download": True, @@ -376,6 +378,7 @@ def test_views_lti_video_instructor_live_mode_and_chat_on( context.get("resource"), { "active_stamp": None, + "is_public": False, "is_ready_to_show": True, "is_scheduled": False, "show_download": True, @@ -517,6 +520,7 @@ def test_views_lti_video_student_live_mode_on( context.get("resource"), { "active_stamp": None, + "is_public": False, "is_ready_to_show": True, "is_scheduled": False, "show_download": True, @@ -640,6 +644,7 @@ def test_views_lti_video_post_administrator( context.get("resource"), { "active_stamp": None, + "is_public": False, "is_ready_to_show": False, "is_scheduled": False, "show_download": True, @@ -720,6 +725,7 @@ def test_views_lti_video_read_other_playlist( context.get("resource"), { "active_stamp": "1569309880", + "is_public": False, "is_ready_to_show": True, "is_scheduled": False, "show_download": True, @@ -848,6 +854,7 @@ def test_views_lti_video_restricted_resolutions_list( context.get("resource"), { "active_stamp": "1569309880", + "is_public": False, "is_ready_to_show": True, "is_scheduled": False, "show_download": True, @@ -964,6 +971,7 @@ def test_views_lti_video_harvested_upload_state( context.get("resource"), { "active_stamp": "1569309880", + "is_public": False, "is_ready_to_show": False, "is_scheduled": False, "show_download": True, @@ -1082,6 +1090,7 @@ def test_views_lti_video_post_student_with_video( context.get("resource"), { "active_stamp": "1569309880", + "is_public": False, "is_ready_to_show": True, "is_scheduled": False, "show_download": True, @@ -1388,6 +1397,7 @@ def test_views_lti_video_has_transcript(self, mock_get_consumer_site, mock_verif context.get("resource"), { "active_stamp": None, + "is_public": False, "is_ready_to_show": False, "is_scheduled": False, "show_download": True, @@ -1478,6 +1488,7 @@ def test_views_lti_video_has_transcript_false( context.get("resource"), { "active_stamp": None, + "is_public": False, "is_ready_to_show": False, "is_scheduled": False, "show_download": True, diff --git a/src/backend/marsha/core/tests/test_views_public_video.py b/src/backend/marsha/core/tests/test_views_public_video.py index e0cc80b9e6..093dc505b8 100644 --- a/src/backend/marsha/core/tests/test_views_public_video.py +++ b/src/backend/marsha/core/tests/test_views_public_video.py @@ -60,6 +60,7 @@ def test_video_publicly_accessible(self): context.get("resource"), { "active_stamp": "1569309880", + "is_public": True, "is_ready_to_show": True, "is_scheduled": False, "show_download": True, @@ -235,6 +236,7 @@ def test_video_live_publicly_available(self): context.get("resource"), { "active_stamp": "1569309880", + "is_public": True, "is_ready_to_show": True, "is_scheduled": False, "show_download": True,
Mailu__Mailu-840
Document the new setup utility Title says all
[ { "content": "import flask\nimport flask_bootstrap\nimport redis\nimport json\nimport os\nimport jinja2\nimport uuid\nimport string\nimport random\nimport ipaddress\nimport hashlib\n\n\nversion = os.getenv(\"this_version\")\nstatic_url_path = \"/\" + version + \"/static\"\napp = flask.Flask(__name__, static_url_path=static_url_path)\nflask_bootstrap.Bootstrap(app)\ndb = redis.StrictRedis(host='redis', port=6379, db=0)\n\n\ndef render_flavor(flavor, template, data):\n return flask.render_template(\n os.path.join(flavor, template),\n **data\n )\n\n\[email protected]_template_global\ndef secret(length=16):\n charset = string.ascii_uppercase + string.digits\n return ''.join(\n random.SystemRandom().choice(charset)\n for _ in range(length)\n )\n\n\ndef build_app(path):\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n\n @app.context_processor\n def app_context():\n return dict(versions=os.getenv(\"VERSIONS\",\"master\").split(','))\n\n prefix_bp = flask.Blueprint(version, __name__)\n prefix_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n root_bp = flask.Blueprint(\"root\", __name__)\n root_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n @prefix_bp.context_processor\n @root_bp.context_processor\n def bp_context(version=version):\n return dict(version=version)\n\n @prefix_bp.route(\"/\")\n @root_bp.route(\"/\")\n def wizard():\n return flask.render_template('wizard.html')\n\n @prefix_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n @root_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n def submit_flavor():\n data = flask.request.form.copy()\n steps = sorted(os.listdir(os.path.join(path, \"templates\", \"steps\", data[\"flavor\"])))\n return flask.render_template('wizard.html', flavor=data[\"flavor\"], steps=steps)\n\n @prefix_bp.route(\"/submit\", methods=[\"POST\"])\n @root_bp.route(\"/submit\", methods=[\"POST\"])\n def submit():\n data = flask.request.form.copy()\n data['uid'] = str(uuid.uuid4())\n data['dns'] = str(ipaddress.IPv4Network(data['subnet'])[-2])\n db.set(data['uid'], json.dumps(data))\n return flask.redirect(flask.url_for('.setup', uid=data['uid']))\n\n @prefix_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n @root_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n def setup(uid):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n rendered = render_flavor(flavor, \"setup.html\", data)\n return flask.render_template(\"setup.html\", contents=rendered)\n\n @prefix_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n @root_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n def file(uid, filepath):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n return flask.Response(\n render_flavor(flavor, filepath, data),\n mimetype=\"application/text\"\n )\n\n app.register_blueprint(prefix_bp, url_prefix=\"/{}\".format(version))\n app.register_blueprint(root_bp)\n\n\nif __name__ == \"__main__\":\n build_app(\"/tmp/mailutest\")\n app.run(debug=True)\n", "path": "setup/server.py" } ]
[ { "content": "import flask\nimport flask_bootstrap\nimport redis\nimport json\nimport os\nimport jinja2\nimport uuid\nimport string\nimport random\nimport ipaddress\nimport hashlib\n\n\nversion = os.getenv(\"this_version\", \"master\")\nstatic_url_path = \"/\" + version + \"/static\"\napp = flask.Flask(__name__, static_url_path=static_url_path)\nflask_bootstrap.Bootstrap(app)\ndb = redis.StrictRedis(host='redis', port=6379, db=0)\n\n\ndef render_flavor(flavor, template, data):\n return flask.render_template(\n os.path.join(flavor, template),\n **data\n )\n\n\[email protected]_template_global\ndef secret(length=16):\n charset = string.ascii_uppercase + string.digits\n return ''.join(\n random.SystemRandom().choice(charset)\n for _ in range(length)\n )\n\n\ndef build_app(path):\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n\n @app.context_processor\n def app_context():\n return dict(versions=os.getenv(\"VERSIONS\",\"master\").split(','))\n\n prefix_bp = flask.Blueprint(version, __name__)\n prefix_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n root_bp = flask.Blueprint(\"root\", __name__)\n root_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n @prefix_bp.context_processor\n @root_bp.context_processor\n def bp_context(version=version):\n return dict(version=version)\n\n @prefix_bp.route(\"/\")\n @root_bp.route(\"/\")\n def wizard():\n return flask.render_template('wizard.html')\n\n @prefix_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n @root_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n def submit_flavor():\n data = flask.request.form.copy()\n steps = sorted(os.listdir(os.path.join(path, \"templates\", \"steps\", data[\"flavor\"])))\n return flask.render_template('wizard.html', flavor=data[\"flavor\"], steps=steps)\n\n @prefix_bp.route(\"/submit\", methods=[\"POST\"])\n @root_bp.route(\"/submit\", methods=[\"POST\"])\n def submit():\n data = flask.request.form.copy()\n data['uid'] = str(uuid.uuid4())\n data['dns'] = str(ipaddress.IPv4Network(data['subnet'])[-2])\n db.set(data['uid'], json.dumps(data))\n return flask.redirect(flask.url_for('.setup', uid=data['uid']))\n\n @prefix_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n @root_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n def setup(uid):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n rendered = render_flavor(flavor, \"setup.html\", data)\n return flask.render_template(\"setup.html\", contents=rendered)\n\n @prefix_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n @root_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n def file(uid, filepath):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n return flask.Response(\n render_flavor(flavor, filepath, data),\n mimetype=\"application/text\"\n )\n\n app.register_blueprint(prefix_bp, url_prefix=\"/{}\".format(version))\n app.register_blueprint(root_bp)\n\n\nif __name__ == \"__main__\":\n build_app(\"/tmp/mailutest\")\n app.run(debug=True)\n", "path": "setup/server.py" } ]
diff --git a/docs/compose/.env b/docs/compose/.env index cf906b580..218b94d23 100644 --- a/docs/compose/.env +++ b/docs/compose/.env @@ -1,3 +1,5 @@ +# WARNING: this file is being deprecated over the new setup utility, found at https://setup.mailu.io + # Mailu main configuration file ## Most configuration variables can be modified through the Web interface, # these few settings must however be configured before starting the mail diff --git a/docs/compose/docker-compose.yml b/docs/compose/docker-compose.yml index 2cff9608e..2686ee279 100644 --- a/docs/compose/docker-compose.yml +++ b/docs/compose/docker-compose.yml @@ -1,3 +1,5 @@ +# WARNING: this file is being deprecated over the new setup utility, found at https://setup.mailu.io + version: '2' services: diff --git a/docs/compose/setup.rst b/docs/compose/setup.rst index 3ff1f6787..c1a620e6a 100644 --- a/docs/compose/setup.rst +++ b/docs/compose/setup.rst @@ -12,34 +12,22 @@ Mailu will store all of its persistent data in a path of your choice mkdir /mailu cd /mailu -Download the initial configuration file ---------------------------------------- +Create the configuration files +------------------------------ -Docker Compose configuration is stored in a file named -:download:`docker-compose.yml`. Additionally, Mailu -relies on a :download:`.env` file for various settings. Download -the proper template files from the git repository. To download the configuration -for the ``VERSION_TAG`` branch, use: +Docker Compose configuration is stored in a file named ``docker-compose.yml``. +Additionally, Mailu relies on a ``mailu.env`` file for various settings. +Both files can be generated by the `mailu setup utility`_. The setup utility +is mostly self-explanatory, with some more additional information in this section. -.. code-block:: bash - - wget https://mailu.io/VERSION_TAG/_downloads/docker-compose.yml - wget https://mailu.io/VERSION_TAG/_downloads/.env - -Important configuration variables ---------------------------------- - -Open the ``.env`` file and review the following variable settings: +.. _`mailu setup utility`: https://setup.mailu.io -- Change ``ROOT`` if you have your setup directory in a different location then ``/mailu``. -- Check ``VERSION`` to reflect the version you picked. (``master`` or ``1.5``). - -Make sure to read the comments in the file and instructions from the :ref:`common_cfg` section. +.. _tls_flavor: TLS certificates ```````````````` -Set the ``TLS_FLAVOR`` to one of the following +Sets the ``TLS_FLAVOR`` to one of the following values: - ``cert`` is the default and requires certificates to be setup manually; @@ -59,7 +47,7 @@ values: Bind address ```````````` -Modify ``BIND_ADDRESS4`` and ``BIND_ADDRESS6`` to match the public IP addresses assigned to your server. For IPv6 you will need the ``<global>`` scope address. +The bind addresses need to match the public IP addresses assigned to your server. For IPv6 you will need the ``<global>`` scope address. You can find those addresses by running the following: @@ -81,56 +69,17 @@ you would simply like the server to listen on all interfaces, use ``0.0.0.0`` an .. _issues: https://github.com/Mailu/Mailu/issues/641 -Enable optional features ------------------------- - -Some of Mailu features are not used by every user and are thus not enabled in a -default configuration. - -A Webmail is a Web interface exposing an email client. Mailu webmails are -bound to the internal IMAP and SMTP server for users to access their mailbox through -the Web. By exposing a complex application such as a Webmail, you should be aware of -the security implications caused by such an increase of attack surface. The ``WEBMAIL`` -configuration option must be one of the following: - -- ``none`` is the default value, no Webmail service will be exposed; -- ``roundcube`` will run the popular Roundcube Webmail; -- ``rainloop`` will run the popular Rainloop Webmail. - -The administration interface is not exposed on the public address by default, -you will need to set the ``ADMIN`` variable accordingly: - -- ``true`` will expose the admin interface in ``/admin``; -- ``false`` (or any other value) will disable this behaviour. - -A Webdav server exposes a Dav interface over HTTP so that clients can store -contacts or calendars using the mail account. This can be enabled using the `WEBDAV` -setting. The configuration option must be one of the following: - -- ``none`` is the default value, no webdav service will be exposed; -- ``radicale`` exposes the radicale Webdav service. - -An antivirus server helps fighting large scale virus spreading campaigns -that leverage e-mail for initial infection. This can be setup using the ``ANTIVIRUS`` -setting. The configuration option must be one of the following: - -- ``none`` disables antivirus checks; -- ``clamav`` is the default values, the popular ClamAV antivirus is enabled. - -Make sure that you have at least 1GB of memory for ClamAV to load its signature -database. +Review configuration variables +------------------------------ -If you run Mailu behind a reverse proxy you can use ``REAL_IP_HEADER`` and -``REAL_IP_FROM`` to set the values of respective the Nginx directives -``real_ip_header`` and ``set_real_ip_from``. The ``REAL_IP_FROM`` configuration -option is a comma-separated list of IPs (or CIDRs) of which for each a -``set_real_ip_from`` directive is added in the Nginx configuration file. +After downloading the files, open ``mailu.env`` and review the variable settings. +Make sure to read the comments in the file and instructions from the :ref:`common_cfg` page. Finish setting up TLS --------------------- Mailu relies heavily on TLS and must have a key pair and a certificate -available, at least for the hostname configured in the ``.env`` file. +available, at least for the hostname configured in the ``mailu.env`` file. If you set ``TLS_FLAVOR`` to ``cert`` or ``mail`` then you must create a ``certs`` directory in your root path and setup a key-certificate pair there: @@ -155,4 +104,4 @@ Finally, you must create the initial admin user account: This will create a user named ``[email protected]`` with password ``password`` and administration privileges. Connect to the Web admin interface and change the password to a strong one. - .. note:: It is vitally important that either a user with the same email as ``POSTMASTER`` in your ``.env`` exists, or you remember to create an alias with this name after you log in. All kinds of strange errors will occur as a result of not doing so! + .. note:: It is vitally important that either a user with the same email as ``POSTMASTER`` in your ``mailu.env`` exists, or you remember to create an alias with this name after you log in. All kinds of strange errors will occur as a result of not doing so! diff --git a/docs/configuration.rst b/docs/configuration.rst index ec114c979..e7dfa2af8 100644 --- a/docs/configuration.rst +++ b/docs/configuration.rst @@ -1,5 +1,9 @@ -Mailu configuration settings -============================ +Configuration reference +======================= + +This page explains the variables found in ``mailu.env``. +In most cases ``mailu.env`` is setup correctly by the setup utility and can be left as-is. +However, some advanced settings or modifications can be done by modifying this file. .. _common_cfg: @@ -37,6 +41,9 @@ The ``AUTH_RATELIMIT`` holds a security setting for fighting attackers that try to guess user passwords. The value is the limit of requests that a single IP address can perform against IMAP, POP and SMTP authentication endpoints. +The ``TLS_FLAVOR`` sets how Mailu handles TLS connections. Setting this value to +``notls`` will cause Mailu not to server any web content! More on :ref:`tls_flavor`. + Mail settings ------------- diff --git a/docs/docker-compose.yml b/docs/docker-compose.yml index b7026564b..9c5d24731 100644 --- a/docs/docker-compose.yml +++ b/docs/docker-compose.yml @@ -1,28 +1,10 @@ -version: '3' +# This file is used to test the mailu/docs website +# Deployment files can be found on github.com/mailu/infra +version: '3' services: - docs_master: - image: mailu/docs:master - networks: - - web - labels: - - traefik.enable=true - - traefik.port=80 - - traefik.main.frontend.rule=Host:${ADDRESS};PathPrefix:/master/ - - docs_15: - image: mailu/docs:1.5 - networks: - - web - labels: - - traefik.enable=true - - traefik.port=80 - - traefik.root.frontend.redirect.regex=.* - - traefik.root.frontend.redirect.replacement=/1.5/ - - traefik.root.frontend.rule=Host:${ADDRESS};PathPrefix:/ - - traefik.main.frontend.rule=Host:${ADDRESS};PathPrefix:/1.5/ - -networks: - web: - external: true + docs: + image: ${DOCKER_ORG:-mailu}/docs:${MAILU_VERSION:-master} + ports: + - 127.0.0.1:8000:80 diff --git a/docs/index.rst b/docs/index.rst index 98825ab6b..0808010c2 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -53,10 +53,10 @@ the version of Mailu that you are running. :caption: Setup setup - configuration compose/requirements compose/setup kubernetes/mailu/index + configuration dns reverse database diff --git a/setup/docker-compose.yml b/setup/docker-compose.yml index 6d14153af..9c93fd6fe 100644 --- a/setup/docker-compose.yml +++ b/setup/docker-compose.yml @@ -1,50 +1,16 @@ -# This file is used to run the mailu/setup utility +# This file is used to test the mailu/setup utility +# Deployment files can be found on github.com/mailu/infra version: '3.6' services: redis: image: redis:alpine - networks: - - default - setup_master: - image: mailu/setup:master - networks: - - web - - default + setup: + image: ${DOCKER_ORG:-mailu}/setup:${MAILU_VERSION:-master} env_file: .env - environment: - this_version: "master" - labels: - - traefik.enable=true - - traefik.port=80 - - traefik.docker.network=web - - traefik.main.frontend.rule=Host:${ADDRESS};PathPrefix:/master/ depends_on: - redis - - setup_release: - image: mailu/setup:${RELEASE} - networks: - - web - - default - env_file: .env - environment: - this_version: ${RELEASE} - labels: - - traefik.enable=true - - traefik.port=80 - - traefik.docker.network=web - - traefik.root.frontend.redirect.regex=.* - - traefik.root.frontend.redirect.replacement=/${RELEASE}/ - - traefik.root.frontend.rule=Host:${ADDRESS};PathPrefix:/ - - traefik.main.frontend.rule=Host:${ADDRESS};PathPrefix:/${RELEASE}/ - depends_on: - - redis - -networks: - web: - external: true - default: - external: false + ports: + - 127.0.0.1:8001:80 diff --git a/setup/server.py b/setup/server.py index fea27ead1..556d4b3a4 100644 --- a/setup/server.py +++ b/setup/server.py @@ -11,7 +11,7 @@ import hashlib -version = os.getenv("this_version") +version = os.getenv("this_version", "master") static_url_path = "/" + version + "/static" app = flask.Flask(__name__, static_url_path=static_url_path) flask_bootstrap.Bootstrap(app) diff --git a/setup/templates/steps/compose/02_services.html b/setup/templates/steps/compose/02_services.html index 11e7a14e0..a78a3f620 100644 --- a/setup/templates/steps/compose/02_services.html +++ b/setup/templates/steps/compose/02_services.html @@ -1,13 +1,13 @@ {% call macros.panel("info", "Step 3 - pick some features") %} <p>Mailu comes with multiple base features, including a specific admin -interface, Web email clients (webmails), antispam, antivirus, etc. If you -wish to disable some of these features, you are free to do so.</p> - -<p>Emails will be available through IMAP and POP3. You may also enable a Web -email client. These do add some complexity but provide an easier way of -accessing messages for beginner users.</p> +interface, Web email clients, antispam, antivirus, etc. +In this section you can enable the services to you liking.</p> <!-- Switched from radio buttons to dropdown menu in order to remove the checkbox --> +<p>A Webmail is a Web interface exposing an email client. Mailu webmails are +bound to the internal IMAP and SMTP server for users to access their mailbox through +the Web. By exposing a complex application such as a Webmail, you should be aware of +the security implications caused by such an increase of attack surface.<p> <div class="form-group"> <label>Enable Web email client (and path to the Web email client)</label> <!-- <div class="radio"> --> @@ -26,10 +26,9 @@ </div> </div> -<p>Email filtering is a really important features. You can still disable it, which -will prevent Mailu from doing spam filtering, virus filtering, and from applying -white and blacklists that you may configure in the admin interface. You may -also disable the antivirus if required (it does use aroung 1GB of ram).</p> +<p>An antivirus server helps fighting large scale virus spreading campaigns that leverage +e-mail for initial infection. Make sure that you have at least 1GB of memory for ClamAV to +load its signature database.</p> <div class="form-check form-check-inline"> <label class="form-check-label"> @@ -38,6 +37,9 @@ </label> </div> +<p>A Webdav server exposes a Dav interface over HTTP so that clients can store +contacts or calendars using the mail account.</p> + <div class="form-check form-check-inline"> <label class="form-check-label"> <input class="form-check-input" type="checkbox" name="webdav_enabled" value="radicale"> @@ -45,6 +47,8 @@ </label> </div> +<p>Fetchmail allows to download mails over IMAP/POP3 and uploads it your Mailu mailbox.</p> + <div class="form-check form-check-inline"> <label class="form-check-label"> <input class="form-check-input" type="checkbox" name="fetchmail_enabled" value="true"> diff --git a/setup/templates/steps/compose/03_expose.html b/setup/templates/steps/compose/03_expose.html index 0c9127784..837b7bba2 100644 --- a/setup/templates/steps/compose/03_expose.html +++ b/setup/templates/steps/compose/03_expose.html @@ -10,7 +10,8 @@ an IPv4 or an IPv6 address if you wish to access Mailu.</p> <p><span class="label label-warning">Warning</span> You must use specific addresses, please -avoid generic all-interfaces addresses like <code>0.0.0.0</code> or <code>::</code>.</p> +avoid generic all-interfaces addresses like <code>0.0.0.0</code> or <code>::</code>. +<a href="https://mailu.io/{{ version }}/compose/setup.html#bind-address">How to find these addresses.</a></p> <div class="form-group"> <label>IPv4 listen address</label> @@ -26,13 +27,14 @@ pattern="^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8]))?$"> </div> +<p>The unbound resolver enables Mailu to do DNSsec verification, DNS root lookups and caching. This also helps the antispam service not to get blocked by the public or ISP DNS servers.</p> <div class="form-check form-check-inline"> <label class="form-check-label"> <input class="form-check-input" type="checkbox" name="resolver_enabled" value="true"> Enable unbound resolver </label> </div> - +<br><br> <div class="form-group"> <label>Subnet of the docker network. This should not conflict with any networks to which your system is connected. (Internal and external!)</label> <input class="form-control" type="text" name="subnet" required pattern="^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))$" diff --git a/setup/templates/steps/config.html b/setup/templates/steps/config.html index d843d684e..330e008f3 100644 --- a/setup/templates/steps/config.html +++ b/setup/templates/steps/config.html @@ -1,8 +1,17 @@ +{% if flavor == "stack" %} +{% call macros.panel("danger", "Docker stack / swarm is experimental") %} +Setup is capable of generating a somewhat decent docker-compose.yml, +for the docker stack flavor. However its usage is for advanced users an is experimental. +Expect many challenges is shared mail storage and fail-over scenarios! Some user experiences +have been <a href="https://github.com/Mailu/Mailu/blob/master/docs/swarm/master/README.md">shared on GitHub.</a> +{% endcall %} +{% endif %} + {% call macros.panel("info", "Step 2 - Initial configuration") %} <p>Before starting some variables must be set</p> <div class="form-group"> - <label>Root path: </label> + <label>Mailu storage path: </label> <!-- Validates path --> <input class="form-control" type="text" name="root" value="/mailu" required pattern="^/[-_A-Za-z0-9]+(/[-_A-Za-z0-9]*)*"> </div> @@ -27,7 +36,7 @@ </div> <div class="form-group"> - <label>Choose how you wish to handle security (TLS) certificates</label> + <label>Choose how you wish to handle security <a href="https://mailu.io/{{ version }}/compose/setup.html#tls-certificates">TLS certificates</a></label> <br/> <select class="btn btn-primary dropdown-toggle" name="tls_flavor"> {% for tlsflavor in ["letsencrypt", "cert", "notls", "mail", "mail-letsencrypt"] %} @@ -61,7 +70,7 @@ <label>Linked Website URL</label> <!-- Validates url with or without https:// --> <input class="form-control" type="url" name="website" value="https://mailu.io" required - pattern="^(https?://)?([a-zA-Z0-9]([a-zA-ZäöüÄÖÜ0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,6}$"> + pattern="^(https?://)?([a-zA-Z0-9]([a-zA-ZäöüÄÖÜ0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,}$"> </div> <p>The admin interface is the main Mailu-specific bit, it provides tools to diff --git a/setup/templates/steps/flavor.html b/setup/templates/steps/flavor.html index 64aa0158b..a77b0215b 100644 --- a/setup/templates/steps/flavor.html +++ b/setup/templates/steps/flavor.html @@ -9,8 +9,5 @@ <div class="radio"> {{ macros.radio("flavor", "compose", "Compose", "simply using Docker Compose manager", flavor) }} {{ macros.radio("flavor", "stack", "Stack", "using stack deployments in a Swarm cluster", flavor) }} - {{ macros.radio("flavor", "rancher", "Rancher", "on top of the Rancher container manager", flavor) }} - {{ macros.radio("flavor", "kubernetes", "Kubernetes", "on top of the Kubernetes container manager", flavor) }} </div> - {% endcall %} diff --git a/setup/templates/steps/stack/02_services.html b/setup/templates/steps/stack/02_services.html index 36493e05d..4f50bb400 100644 --- a/setup/templates/steps/stack/02_services.html +++ b/setup/templates/steps/stack/02_services.html @@ -1,19 +1,15 @@ {% call macros.panel("info", "Step 3 - pick some features") %} <p>Mailu comes with multiple base features, including a specific admin -interface, Web email clients (webmails), antispam, antivirus, etc. If you -wish to disable some of these features, you are free to do so.</p> - -<p>Emails will be available through IMAP and POP3. You may also enable a Web -email client. These do add some complexity but provide an easier way of -accessing messages for beginner users.</p> +interface, Web email clients, antispam, antivirus, etc. +In this section you can enable the services to you liking.</p> <!-- Switched from radio buttons to dropdown menu in order to remove the checkbox --> +<p>A Webmail is a Web interface exposing an email client. Mailu webmails are +bound to the internal IMAP and SMTP server for users to access their mailbox through +the Web. By exposing a complex application such as a Webmail, you should be aware of +the security implications caused by such an increase of attack surface.<p> <div class="form-group"> <label>Enable Web email client (and path to the Web email client)</label> -<!-- <div class="radio"> --> -<!-- {{ macros.radio("webmail_type", "roundcube", "RoundCube", "popular Webmail running on top of PHP") }} --> -<!-- {{ macros.radio("webmail_type", "rainloop", "Rainloop", "lightweight Webmail based on PHP, no database") }} --> -<!-- </div> --> <br/> <select class="btn btn-primary dropdown-toggle" name="webmail_type" id="webmail"> {% for webmailtype in ["none", "roundcube", "rainloop"] %} @@ -26,10 +22,9 @@ </div> </div> -<p>Email filtering is a really important features. You can still disable it, which -will prevent Mailu from doing spam filtering, virus filtering, and from applying -white and blacklists that you may configure in the admin interface. You may -also disable the antivirus if required (it does use aroung 1GB of ram).</p> +<p>An antivirus server helps fighting large scale virus spreading campaigns that leverage +e-mail for initial infection. Make sure that you have at least 1GB of memory for ClamAV to +load its signature database.</p> <div class="form-check form-check-inline"> <label class="form-check-label"> @@ -38,6 +33,9 @@ </label> </div> +<p>A Webdav server exposes a Dav interface over HTTP so that clients can store +contacts or calendars using the mail account.</p> + <div class="form-check form-check-inline"> <label class="form-check-label"> <input class="form-check-input" type="checkbox" name="webdav_enabled" value="radicale"> @@ -45,6 +43,8 @@ </label> </div> +<p>Fetchmail allows to download mails over IMAP/POP3 and uploads it your Mailu mailbox.</p> + <div class="form-check form-check-inline"> <label class="form-check-label"> <input class="form-check-input" type="checkbox" name="fetchmail_enabled" value="true">
ansible__ansible-25551
wait_for missing import for os: "global name 'os' is not defined" ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME wait_for ##### ANSIBLE VERSION <!--- Paste verbatim output from “ansible --version” between quotes below --> ``` ansible 2.4.0 (devel 416d9774ce) last updated 2017/06/09 10:50:57 (GMT -400) config file = /etc/ansible/ansible.cfg configured module search path = [u'/root/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /root/tmp/ansible/lib/ansible executable location = /root/tmp/ansible/bin/ansible python version = 2.7.5 (default, Aug 2 2016, 04:20:16) [GCC 4.8.5 20150623 (Red Hat 4.8.5-4)] ``` ##### CONFIGURATION <!--- Mention any settings you have changed/added/removed in ansible.cfg (or using the ANSIBLE_* environment variables). --> ##### OS / ENVIRONMENT N/A ##### SUMMARY When running the wait_for task to check for the existence of a file that is present, ansible reports a module failure due to `global name 'os' is not defined`. ##### STEPS TO REPRODUCE ``` cat << EOF > inventory [local] localhost ansible_connection=local EOF cat << EOF > playbook.yml --- - hosts: local tasks: - wait_for: path: '/tmp/test_file' timeout: 1 EOF touch /tmp/test_file ansible -i inventory playbook.yml ``` <!--- You can also paste gist.github.com links for larger files --> ##### EXPECTED RESULTS `wait_for` successfully confirms file exists ##### ACTUAL RESULTS ``` Traceback (most recent call last): File "/tmp/ansible_SksI7U/ansible_module_wait_for.py", line 606, in <module> main() File "/tmp/ansible_SksI7U/ansible_module_wait_for.py", line 505, in main os.stat(path) NameError: global name 'os' is not defined fatal: [localhost]: FAILED! => { "changed": false, "failed": true, "module_stderr": "Traceback (most recent call last):\n File \"/tmp/ansible_SksI7U/ansible_module_wait_for.py\", line 606, in <module>\n main()\n File \"/tmp/ansible_SksI7U/ansible_module_wait_for.py\", line 505, in main\n os.stat(path)\nNameError: global name 'os' is not defined\n", "module_stdout": "", "msg": "MODULE FAILURE", "rc": 0 } ``` Full output: https://gist.github.com/jladdjr/bd5506b56d3bb66d975de278eb61c207#file-gistfile1-txt-L114
[ { "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2012, Jeroen Hoekx <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nANSIBLE_METADATA = {'metadata_version': '1.0',\n 'status': ['stableinterface'],\n 'supported_by': 'core'}\n\nDOCUMENTATION = r'''\n---\nmodule: wait_for\nshort_description: Waits for a condition before continuing\ndescription:\n - You can wait for a set amount of time C(timeout), this is the default if nothing is specified.\n - Waiting for a port to become available is useful for when services\n are not immediately available after their init scripts return\n which is true of certain Java application servers. It is also\n useful when starting guests with the M(virt) module and\n needing to pause until they are ready.\n - This module can also be used to wait for a regex match a string to be present in a file.\n - In 1.6 and later, this module can also be used to wait for a file to be available or\n absent on the filesystem.\n - In 1.8 and later, this module can also be used to wait for active\n connections to be closed before continuing, useful if a node\n is being rotated out of a load balancer pool.\nversion_added: \"0.7\"\noptions:\n host:\n description:\n - A resolvable hostname or IP address to wait for.\n default: \"127.0.0.1\"\n timeout:\n description:\n - Maximum number of seconds to wait for.\n default: 300\n connect_timeout:\n description:\n - Maximum number of seconds to wait for a connection to happen before closing and retrying.\n default: 5\n delay:\n description:\n - Number of seconds to wait before starting to poll.\n default: 0\n port:\n description:\n - Port number to poll.\n active_connection_states:\n description:\n - The list of TCP connection states which are counted as active connections.\n default: [ ESTABLISHED, FIN_WAIT1, FIN_WAIT2, SYN_RECV, SYN_SENT, TIME_WAIT ]\n version_added: \"2.3\"\n state:\n description:\n - Either C(present), C(started), or C(stopped), C(absent), or C(drained).\n - When checking a port C(started) will ensure the port is open, C(stopped) will check that it is closed, C(drained) will check for active connections.\n - When checking for a file or a search string C(present) or C(started) will ensure that the file or string is present before continuing,\n C(absent) will check that file is absent or removed.\n choices: [ absent, drained, present, started, stopped ]\n default: started\n path:\n version_added: \"1.4\"\n description:\n - Path to a file on the filesytem that must exist before continuing.\n search_regex:\n version_added: \"1.4\"\n description:\n - Can be used to match a string in either a file or a socket connection.\n - Defaults to a multiline regex.\n exclude_hosts:\n version_added: \"1.8\"\n description:\n - List of hosts or IPs to ignore when looking for active TCP connections for C(drained) state.\n sleep:\n version_added: \"2.3\"\n default: 1\n description:\n - Number of seconds to sleep between checks, before 2.3 this was hardcoded to 1 second.\n msg:\n version_added: \"2.4\"\n required: false\n default: null\n description:\n - This overrides the normal error message from a failure to meet the required conditions.\nnotes:\n - The ability to use search_regex with a port connection was added in 1.7.\nauthor:\n - Jeroen Hoekx (@jhoekx)\n - John Jarvis (@jarv)\n - Andrii Radyk (@AnderEnder)\n'''\n\nEXAMPLES = r'''\n\n- name: Wait 300 seconds for port 8000 to become open on the host, don't start checking for 10 seconds\n wait_for:\n port: 8000\n delay: 10\n\n- name: Wait 300 seconds for port 8000 of any IP to close active connections, don't start checking for 10 seconds\n wait_for:\n host: 0.0.0.0\n port: 8000\n delay: 10\n state: drained\n\n- name: Wait 300 seconds for port 8000 of any IP to close active connections, ignoring connections for specified hosts\n wait_for:\n host: 0.0.0.0\n port: 8000\n state: drained\n exclude_hosts: 10.2.1.2,10.2.1.3\n\n- name: Wait until the file /tmp/foo is present before continuing\n wait_for:\n path: /tmp/foo\n\n- name: Wait until the string \"completed\" is in the file /tmp/foo before continuing\n wait_for:\n path: /tmp/foo\n search_regex: completed\n\n- name: Wait until the lock file is removed\n wait_for:\n path: /var/lock/file.lock\n state: absent\n\n- name: Wait until the process is finished and pid was destroyed\n wait_for:\n path: /proc/3466/status\n state: absent\n\n- name: Output customized message when failed\n wait_for:\n path: /tmp/foo\n state: present\n msg: Timeout to find file /tmp/foo\n\n# Don't assume the inventory_hostname is resolvable and delay 10 seconds at start\n- name: Wait 300 seconds for port 22 to become open and contain \"OpenSSH\"\n wait_for:\n port: 22\n host: '{{ (ansible_ssh_host|default(ansible_host))|default(inventory_hostname) }}'\n search_regex: OpenSSH\n delay: 10\n connection: local\n\n# Same as above but you normally have ansible_connection set in inventory, which overrides 'connection'\n- name: Wait 300 seconds for port 22 to become open and contain \"OpenSSH\"\n wait_for:\n port: 22\n host: '{{ (ansible_ssh_host|default(ansible_host))|default(inventory_hostname) }}'\n search_regex: OpenSSH\n delay: 10\n vars:\n ansible_connection: local\n'''\n\nimport binascii\nimport datetime\nimport math\nimport os\nimport re\nimport select\nimport socket\nimport sys\nimport time\n\nfrom ansible.module_utils.basic import AnsibleModule, load_platform_subclass\nfrom ansible.module_utils._text import to_native\n\n\nHAS_PSUTIL = False\ntry:\n import psutil\n HAS_PSUTIL = True\n # just because we can import it on Linux doesn't mean we will use it\nexcept ImportError:\n pass\n\n\nclass TCPConnectionInfo(object):\n \"\"\"\n This is a generic TCP Connection Info strategy class that relies\n on the psutil module, which is not ideal for targets, but necessary\n for cross platform support.\n\n A subclass may wish to override some or all of these methods.\n - _get_exclude_ips()\n - get_active_connections()\n\n All subclasses MUST define platform and distribution (which may be None).\n \"\"\"\n platform = 'Generic'\n distribution = None\n\n match_all_ips = {\n socket.AF_INET: '0.0.0.0',\n socket.AF_INET6: '::',\n }\n ipv4_mapped_ipv6_address = {\n 'prefix': '::ffff',\n 'match_all': '::ffff:0.0.0.0'\n }\n\n def __new__(cls, *args, **kwargs):\n return load_platform_subclass(TCPConnectionInfo, args, kwargs)\n\n def __init__(self, module):\n self.module = module\n self.ips = _convert_host_to_ip(module.params['host'])\n self.port = int(self.module.params['port'])\n self.exclude_ips = self._get_exclude_ips()\n if not HAS_PSUTIL:\n module.fail_json(msg=\"psutil module required for wait_for\")\n\n def _get_exclude_ips(self):\n exclude_hosts = self.module.params['exclude_hosts']\n exclude_ips = []\n if exclude_hosts is not None:\n for host in exclude_hosts:\n exclude_ips.extend(_convert_host_to_ip(host))\n return exclude_ips\n\n def get_active_connections_count(self):\n active_connections = 0\n for p in psutil.process_iter():\n connections = p.get_connections(kind='inet')\n for conn in connections:\n if conn.status not in self.module.params['active_connection_states']:\n continue\n (local_ip, local_port) = conn.local_address\n if self.port != local_port:\n continue\n (remote_ip, remote_port) = conn.remote_address\n if (conn.family, remote_ip) in self.exclude_ips:\n continue\n if any((\n (conn.family, local_ip) in self.ips,\n (conn.family, self.match_all_ips[conn.family]) in self.ips,\n local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and\n (conn.family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips,\n )):\n active_connections += 1\n return active_connections\n\n\n# ===========================================\n# Subclass: Linux\n\nclass LinuxTCPConnectionInfo(TCPConnectionInfo):\n \"\"\"\n This is a TCP Connection Info evaluation strategy class\n that utilizes information from Linux's procfs. While less universal,\n does allow Linux targets to not require an additional library.\n \"\"\"\n platform = 'Linux'\n distribution = None\n\n source_file = {\n socket.AF_INET: '/proc/net/tcp',\n socket.AF_INET6: '/proc/net/tcp6'\n }\n match_all_ips = {\n socket.AF_INET: '00000000',\n socket.AF_INET6: '00000000000000000000000000000000',\n }\n ipv4_mapped_ipv6_address = {\n 'prefix': '0000000000000000FFFF0000',\n 'match_all': '0000000000000000FFFF000000000000'\n }\n local_address_field = 1\n remote_address_field = 2\n connection_state_field = 3\n\n def __init__(self, module):\n self.module = module\n self.ips = _convert_host_to_hex(module.params['host'])\n self.port = \"%0.4X\" % int(module.params['port'])\n self.exclude_ips = self._get_exclude_ips()\n\n def _get_exclude_ips(self):\n exclude_hosts = self.module.params['exclude_hosts']\n exclude_ips = []\n if exclude_hosts is not None:\n for host in exclude_hosts:\n exclude_ips.extend(_convert_host_to_hex(host))\n return exclude_ips\n\n def get_active_connections_count(self):\n active_connections = 0\n for family in self.source_file.keys():\n f = open(self.source_file[family])\n for tcp_connection in f.readlines():\n tcp_connection = tcp_connection.strip().split()\n if tcp_connection[self.local_address_field] == 'local_address':\n continue\n if (tcp_connection[self.connection_state_field] not in\n [get_connection_state_id(_connection_state) for _connection_state in self.module.params['active_connection_states']]):\n continue\n (local_ip, local_port) = tcp_connection[self.local_address_field].split(':')\n if self.port != local_port:\n continue\n (remote_ip, remote_port) = tcp_connection[self.remote_address_field].split(':')\n if (family, remote_ip) in self.exclude_ips:\n continue\n if any((\n (family, local_ip) in self.ips,\n (family, self.match_all_ips[family]) in self.ips,\n local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and\n (family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips,\n )):\n active_connections += 1\n f.close()\n return active_connections\n\n\ndef _convert_host_to_ip(host):\n \"\"\"\n Perform forward DNS resolution on host, IP will give the same IP\n\n Args:\n host: String with either hostname, IPv4, or IPv6 address\n\n Returns:\n List of tuples containing address family and IP\n \"\"\"\n addrinfo = socket.getaddrinfo(host, 80, 0, 0, socket.SOL_TCP)\n ips = []\n for family, socktype, proto, canonname, sockaddr in addrinfo:\n ip = sockaddr[0]\n ips.append((family, ip))\n if family == socket.AF_INET:\n ips.append((socket.AF_INET6, \"::ffff:\" + ip))\n return ips\n\n\ndef _convert_host_to_hex(host):\n \"\"\"\n Convert the provided host to the format in /proc/net/tcp*\n\n /proc/net/tcp uses little-endian four byte hex for ipv4\n /proc/net/tcp6 uses little-endian per 4B word for ipv6\n\n Args:\n host: String with either hostname, IPv4, or IPv6 address\n\n Returns:\n List of tuples containing address family and the\n little-endian converted host\n \"\"\"\n ips = []\n if host is not None:\n for family, ip in _convert_host_to_ip(host):\n hexip_nf = binascii.b2a_hex(socket.inet_pton(family, ip))\n hexip_hf = \"\"\n for i in range(0, len(hexip_nf), 8):\n ipgroup_nf = hexip_nf[i:i + 8]\n ipgroup_hf = socket.ntohl(int(ipgroup_nf, base=16))\n hexip_hf = \"%s%08X\" % (hexip_hf, ipgroup_hf)\n ips.append((family, hexip_hf))\n return ips\n\n\ndef _create_connection(host, port, connect_timeout):\n \"\"\"\n Connect to a 2-tuple (host, port) and return\n the socket object.\n\n Args:\n 2-tuple (host, port) and connection timeout\n Returns:\n Socket object\n \"\"\"\n if sys.version_info < (2, 6):\n (family, _) = (_convert_host_to_ip(host))[0]\n connect_socket = socket.socket(family, socket.SOCK_STREAM)\n connect_socket.settimeout(connect_timeout)\n connect_socket.connect((host, port))\n else:\n connect_socket = socket.create_connection((host, port), connect_timeout)\n return connect_socket\n\n\ndef _timedelta_total_seconds(timedelta):\n return (\n timedelta.microseconds + 0.0 +\n (timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6) / 10 ** 6\n\n\ndef get_connection_state_id(state):\n connection_state_id = {\n 'ESTABLISHED': '01',\n 'SYN_SENT': '02',\n 'SYN_RECV': '03',\n 'FIN_WAIT1': '04',\n 'FIN_WAIT2': '05',\n 'TIME_WAIT': '06',\n }\n return connection_state_id[state]\n\n\ndef main():\n\n module = AnsibleModule(\n argument_spec=dict(\n host=dict(type='str', default='127.0.0.1'),\n timeout=dict(type='int', default=300),\n connect_timeout=dict(type='int', default=5),\n delay=dict(type='int', default=0),\n port=dict(type='int'),\n active_connection_states=dict(type='list', default=['ESTABLISHED', 'FIN_WAIT1', 'FIN_WAIT2', 'SYN_RECV', 'SYN_SENT', 'TIME_WAIT']),\n path=dict(type='path'),\n search_regex=dict(type='str'),\n state=dict(type='str', default='started', choices=['absent', 'drained', 'present', 'started', 'stopped']),\n exclude_hosts=dict(type='list'),\n sleep=dict(type='int', default=1),\n msg=dict(type='str'),\n ),\n )\n\n host = module.params['host']\n timeout = module.params['timeout']\n connect_timeout = module.params['connect_timeout']\n delay = module.params['delay']\n port = module.params['port']\n state = module.params['state']\n path = module.params['path']\n search_regex = module.params['search_regex']\n msg = module.params['msg']\n\n if search_regex is not None:\n compiled_search_re = re.compile(search_regex, re.MULTILINE)\n else:\n compiled_search_re = None\n\n if port and path:\n module.fail_json(msg=\"port and path parameter can not both be passed to wait_for\")\n if path and state == 'stopped':\n module.fail_json(msg=\"state=stopped should only be used for checking a port in the wait_for module\")\n if path and state == 'drained':\n module.fail_json(msg=\"state=drained should only be used for checking a port in the wait_for module\")\n if module.params['exclude_hosts'] is not None and state != 'drained':\n module.fail_json(msg=\"exclude_hosts should only be with state=drained\")\n for _connection_state in module.params['active_connection_states']:\n try:\n get_connection_state_id(_connection_state)\n except:\n module.fail_json(msg=\"unknown active_connection_state (%s) defined\" % _connection_state)\n\n start = datetime.datetime.utcnow()\n\n if delay:\n time.sleep(delay)\n\n if not port and not path and state != 'drained':\n time.sleep(timeout)\n elif state in ['absent', 'stopped']:\n # first wait for the stop condition\n end = start + datetime.timedelta(seconds=timeout)\n\n while datetime.datetime.utcnow() < end:\n if path:\n try:\n f = open(path)\n f.close()\n except IOError:\n break\n elif port:\n try:\n s = _create_connection(host, port, connect_timeout)\n s.shutdown(socket.SHUT_RDWR)\n s.close()\n except:\n break\n # Conditions not yet met, wait and try again\n time.sleep(module.params['sleep'])\n else:\n elapsed = datetime.datetime.utcnow() - start\n if port:\n module.fail_json(msg=msg or \"Timeout when waiting for %s:%s to stop.\" % (host, port), elapsed=elapsed.seconds)\n elif path:\n module.fail_json(msg=msg or \"Timeout when waiting for %s to be absent.\" % (path), elapsed=elapsed.seconds)\n\n elif state in ['started', 'present']:\n # wait for start condition\n end = start + datetime.timedelta(seconds=timeout)\n while datetime.datetime.utcnow() < end:\n if path:\n try:\n os.stat(path)\n except OSError:\n e = get_exception()\n # If anything except file not present, throw an error\n if e.errno != 2:\n elapsed = datetime.datetime.utcnow() - start\n module.fail_json(msg=msg or \"Failed to stat %s, %s\" % (path, e.strerror), elapsed=elapsed.seconds)\n # file doesn't exist yet, so continue\n else:\n # File exists. Are there additional things to check?\n if not compiled_search_re:\n # nope, succeed!\n break\n try:\n f = open(path)\n try:\n if re.search(compiled_search_re, f.read()):\n # String found, success!\n break\n finally:\n f.close()\n except IOError:\n pass\n elif port:\n alt_connect_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.utcnow()))\n try:\n s = _create_connection(host, port, min(connect_timeout, alt_connect_timeout))\n except:\n # Failed to connect by connect_timeout. wait and try again\n pass\n else:\n # Connected -- are there additional conditions?\n if compiled_search_re:\n data = ''\n matched = False\n while datetime.datetime.utcnow() < end:\n max_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.utcnow()))\n (readable, w, e) = select.select([s], [], [], max_timeout)\n if not readable:\n # No new data. Probably means our timeout\n # expired\n continue\n response = s.recv(1024)\n if not response:\n # Server shutdown\n break\n data += to_native(response, errors='surrogate_or_strict')\n if re.search(compiled_search_re, data):\n matched = True\n break\n\n # Shutdown the client socket\n s.shutdown(socket.SHUT_RDWR)\n s.close()\n if matched:\n # Found our string, success!\n break\n else:\n # Connection established, success!\n s.shutdown(socket.SHUT_RDWR)\n s.close()\n break\n\n # Conditions not yet met, wait and try again\n time.sleep(module.params['sleep'])\n\n else: # while-else\n # Timeout expired\n elapsed = datetime.datetime.utcnow() - start\n if port:\n if search_regex:\n module.fail_json(msg=msg or \"Timeout when waiting for search string %s in %s:%s\" % (search_regex, host, port), elapsed=elapsed.seconds)\n else:\n module.fail_json(msg=msg or \"Timeout when waiting for %s:%s\" % (host, port), elapsed=elapsed.seconds)\n elif path:\n if search_regex:\n module.fail_json(msg=msg or \"Timeout when waiting for search string %s in %s\" % (search_regex, path), elapsed=elapsed.seconds)\n else:\n module.fail_json(msg=msg or \"Timeout when waiting for file %s\" % (path), elapsed=elapsed.seconds)\n\n elif state == 'drained':\n # wait until all active connections are gone\n end = start + datetime.timedelta(seconds=timeout)\n tcpconns = TCPConnectionInfo(module)\n while datetime.datetime.utcnow() < end:\n try:\n if tcpconns.get_active_connections_count() == 0:\n break\n except IOError:\n pass\n # Conditions not yet met, wait and try again\n time.sleep(module.params['sleep'])\n else:\n elapsed = datetime.datetime.utcnow() - start\n module.fail_json(msg=msg or \"Timeout when waiting for %s:%s to drain\" % (host, port), elapsed=elapsed.seconds)\n\n elapsed = datetime.datetime.utcnow() - start\n module.exit_json(state=state, port=port, search_regex=search_regex, path=path, elapsed=elapsed.seconds)\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/ansible/modules/utilities/logic/wait_for.py" } ]
[ { "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2012, Jeroen Hoekx <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nANSIBLE_METADATA = {'metadata_version': '1.0',\n 'status': ['stableinterface'],\n 'supported_by': 'core'}\n\nDOCUMENTATION = r'''\n---\nmodule: wait_for\nshort_description: Waits for a condition before continuing\ndescription:\n - You can wait for a set amount of time C(timeout), this is the default if nothing is specified.\n - Waiting for a port to become available is useful for when services\n are not immediately available after their init scripts return\n which is true of certain Java application servers. It is also\n useful when starting guests with the M(virt) module and\n needing to pause until they are ready.\n - This module can also be used to wait for a regex match a string to be present in a file.\n - In 1.6 and later, this module can also be used to wait for a file to be available or\n absent on the filesystem.\n - In 1.8 and later, this module can also be used to wait for active\n connections to be closed before continuing, useful if a node\n is being rotated out of a load balancer pool.\nversion_added: \"0.7\"\noptions:\n host:\n description:\n - A resolvable hostname or IP address to wait for.\n default: \"127.0.0.1\"\n timeout:\n description:\n - Maximum number of seconds to wait for.\n default: 300\n connect_timeout:\n description:\n - Maximum number of seconds to wait for a connection to happen before closing and retrying.\n default: 5\n delay:\n description:\n - Number of seconds to wait before starting to poll.\n default: 0\n port:\n description:\n - Port number to poll.\n active_connection_states:\n description:\n - The list of TCP connection states which are counted as active connections.\n default: [ ESTABLISHED, FIN_WAIT1, FIN_WAIT2, SYN_RECV, SYN_SENT, TIME_WAIT ]\n version_added: \"2.3\"\n state:\n description:\n - Either C(present), C(started), or C(stopped), C(absent), or C(drained).\n - When checking a port C(started) will ensure the port is open, C(stopped) will check that it is closed, C(drained) will check for active connections.\n - When checking for a file or a search string C(present) or C(started) will ensure that the file or string is present before continuing,\n C(absent) will check that file is absent or removed.\n choices: [ absent, drained, present, started, stopped ]\n default: started\n path:\n version_added: \"1.4\"\n description:\n - Path to a file on the filesytem that must exist before continuing.\n search_regex:\n version_added: \"1.4\"\n description:\n - Can be used to match a string in either a file or a socket connection.\n - Defaults to a multiline regex.\n exclude_hosts:\n version_added: \"1.8\"\n description:\n - List of hosts or IPs to ignore when looking for active TCP connections for C(drained) state.\n sleep:\n version_added: \"2.3\"\n default: 1\n description:\n - Number of seconds to sleep between checks, before 2.3 this was hardcoded to 1 second.\n msg:\n version_added: \"2.4\"\n required: false\n default: null\n description:\n - This overrides the normal error message from a failure to meet the required conditions.\nnotes:\n - The ability to use search_regex with a port connection was added in 1.7.\nauthor:\n - Jeroen Hoekx (@jhoekx)\n - John Jarvis (@jarv)\n - Andrii Radyk (@AnderEnder)\n'''\n\nEXAMPLES = r'''\n\n- name: Wait 300 seconds for port 8000 to become open on the host, don't start checking for 10 seconds\n wait_for:\n port: 8000\n delay: 10\n\n- name: Wait 300 seconds for port 8000 of any IP to close active connections, don't start checking for 10 seconds\n wait_for:\n host: 0.0.0.0\n port: 8000\n delay: 10\n state: drained\n\n- name: Wait 300 seconds for port 8000 of any IP to close active connections, ignoring connections for specified hosts\n wait_for:\n host: 0.0.0.0\n port: 8000\n state: drained\n exclude_hosts: 10.2.1.2,10.2.1.3\n\n- name: Wait until the file /tmp/foo is present before continuing\n wait_for:\n path: /tmp/foo\n\n- name: Wait until the string \"completed\" is in the file /tmp/foo before continuing\n wait_for:\n path: /tmp/foo\n search_regex: completed\n\n- name: Wait until the lock file is removed\n wait_for:\n path: /var/lock/file.lock\n state: absent\n\n- name: Wait until the process is finished and pid was destroyed\n wait_for:\n path: /proc/3466/status\n state: absent\n\n- name: Output customized message when failed\n wait_for:\n path: /tmp/foo\n state: present\n msg: Timeout to find file /tmp/foo\n\n# Don't assume the inventory_hostname is resolvable and delay 10 seconds at start\n- name: Wait 300 seconds for port 22 to become open and contain \"OpenSSH\"\n wait_for:\n port: 22\n host: '{{ (ansible_ssh_host|default(ansible_host))|default(inventory_hostname) }}'\n search_regex: OpenSSH\n delay: 10\n connection: local\n\n# Same as above but you normally have ansible_connection set in inventory, which overrides 'connection'\n- name: Wait 300 seconds for port 22 to become open and contain \"OpenSSH\"\n wait_for:\n port: 22\n host: '{{ (ansible_ssh_host|default(ansible_host))|default(inventory_hostname) }}'\n search_regex: OpenSSH\n delay: 10\n vars:\n ansible_connection: local\n'''\n\nimport binascii\nimport datetime\nimport math\nimport os\nimport re\nimport select\nimport socket\nimport sys\nimport time\n\nfrom ansible.module_utils.basic import AnsibleModule, load_platform_subclass\nfrom ansible.module_utils._text import to_native\nfrom ansible.module_utils.pycompat24 import get_exception\n\n\nHAS_PSUTIL = False\ntry:\n import psutil\n HAS_PSUTIL = True\n # just because we can import it on Linux doesn't mean we will use it\nexcept ImportError:\n pass\n\n\nclass TCPConnectionInfo(object):\n \"\"\"\n This is a generic TCP Connection Info strategy class that relies\n on the psutil module, which is not ideal for targets, but necessary\n for cross platform support.\n\n A subclass may wish to override some or all of these methods.\n - _get_exclude_ips()\n - get_active_connections()\n\n All subclasses MUST define platform and distribution (which may be None).\n \"\"\"\n platform = 'Generic'\n distribution = None\n\n match_all_ips = {\n socket.AF_INET: '0.0.0.0',\n socket.AF_INET6: '::',\n }\n ipv4_mapped_ipv6_address = {\n 'prefix': '::ffff',\n 'match_all': '::ffff:0.0.0.0'\n }\n\n def __new__(cls, *args, **kwargs):\n return load_platform_subclass(TCPConnectionInfo, args, kwargs)\n\n def __init__(self, module):\n self.module = module\n self.ips = _convert_host_to_ip(module.params['host'])\n self.port = int(self.module.params['port'])\n self.exclude_ips = self._get_exclude_ips()\n if not HAS_PSUTIL:\n module.fail_json(msg=\"psutil module required for wait_for\")\n\n def _get_exclude_ips(self):\n exclude_hosts = self.module.params['exclude_hosts']\n exclude_ips = []\n if exclude_hosts is not None:\n for host in exclude_hosts:\n exclude_ips.extend(_convert_host_to_ip(host))\n return exclude_ips\n\n def get_active_connections_count(self):\n active_connections = 0\n for p in psutil.process_iter():\n connections = p.get_connections(kind='inet')\n for conn in connections:\n if conn.status not in self.module.params['active_connection_states']:\n continue\n (local_ip, local_port) = conn.local_address\n if self.port != local_port:\n continue\n (remote_ip, remote_port) = conn.remote_address\n if (conn.family, remote_ip) in self.exclude_ips:\n continue\n if any((\n (conn.family, local_ip) in self.ips,\n (conn.family, self.match_all_ips[conn.family]) in self.ips,\n local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and\n (conn.family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips,\n )):\n active_connections += 1\n return active_connections\n\n\n# ===========================================\n# Subclass: Linux\n\nclass LinuxTCPConnectionInfo(TCPConnectionInfo):\n \"\"\"\n This is a TCP Connection Info evaluation strategy class\n that utilizes information from Linux's procfs. While less universal,\n does allow Linux targets to not require an additional library.\n \"\"\"\n platform = 'Linux'\n distribution = None\n\n source_file = {\n socket.AF_INET: '/proc/net/tcp',\n socket.AF_INET6: '/proc/net/tcp6'\n }\n match_all_ips = {\n socket.AF_INET: '00000000',\n socket.AF_INET6: '00000000000000000000000000000000',\n }\n ipv4_mapped_ipv6_address = {\n 'prefix': '0000000000000000FFFF0000',\n 'match_all': '0000000000000000FFFF000000000000'\n }\n local_address_field = 1\n remote_address_field = 2\n connection_state_field = 3\n\n def __init__(self, module):\n self.module = module\n self.ips = _convert_host_to_hex(module.params['host'])\n self.port = \"%0.4X\" % int(module.params['port'])\n self.exclude_ips = self._get_exclude_ips()\n\n def _get_exclude_ips(self):\n exclude_hosts = self.module.params['exclude_hosts']\n exclude_ips = []\n if exclude_hosts is not None:\n for host in exclude_hosts:\n exclude_ips.extend(_convert_host_to_hex(host))\n return exclude_ips\n\n def get_active_connections_count(self):\n active_connections = 0\n for family in self.source_file.keys():\n f = open(self.source_file[family])\n for tcp_connection in f.readlines():\n tcp_connection = tcp_connection.strip().split()\n if tcp_connection[self.local_address_field] == 'local_address':\n continue\n if (tcp_connection[self.connection_state_field] not in\n [get_connection_state_id(_connection_state) for _connection_state in self.module.params['active_connection_states']]):\n continue\n (local_ip, local_port) = tcp_connection[self.local_address_field].split(':')\n if self.port != local_port:\n continue\n (remote_ip, remote_port) = tcp_connection[self.remote_address_field].split(':')\n if (family, remote_ip) in self.exclude_ips:\n continue\n if any((\n (family, local_ip) in self.ips,\n (family, self.match_all_ips[family]) in self.ips,\n local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and\n (family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips,\n )):\n active_connections += 1\n f.close()\n return active_connections\n\n\ndef _convert_host_to_ip(host):\n \"\"\"\n Perform forward DNS resolution on host, IP will give the same IP\n\n Args:\n host: String with either hostname, IPv4, or IPv6 address\n\n Returns:\n List of tuples containing address family and IP\n \"\"\"\n addrinfo = socket.getaddrinfo(host, 80, 0, 0, socket.SOL_TCP)\n ips = []\n for family, socktype, proto, canonname, sockaddr in addrinfo:\n ip = sockaddr[0]\n ips.append((family, ip))\n if family == socket.AF_INET:\n ips.append((socket.AF_INET6, \"::ffff:\" + ip))\n return ips\n\n\ndef _convert_host_to_hex(host):\n \"\"\"\n Convert the provided host to the format in /proc/net/tcp*\n\n /proc/net/tcp uses little-endian four byte hex for ipv4\n /proc/net/tcp6 uses little-endian per 4B word for ipv6\n\n Args:\n host: String with either hostname, IPv4, or IPv6 address\n\n Returns:\n List of tuples containing address family and the\n little-endian converted host\n \"\"\"\n ips = []\n if host is not None:\n for family, ip in _convert_host_to_ip(host):\n hexip_nf = binascii.b2a_hex(socket.inet_pton(family, ip))\n hexip_hf = \"\"\n for i in range(0, len(hexip_nf), 8):\n ipgroup_nf = hexip_nf[i:i + 8]\n ipgroup_hf = socket.ntohl(int(ipgroup_nf, base=16))\n hexip_hf = \"%s%08X\" % (hexip_hf, ipgroup_hf)\n ips.append((family, hexip_hf))\n return ips\n\n\ndef _create_connection(host, port, connect_timeout):\n \"\"\"\n Connect to a 2-tuple (host, port) and return\n the socket object.\n\n Args:\n 2-tuple (host, port) and connection timeout\n Returns:\n Socket object\n \"\"\"\n if sys.version_info < (2, 6):\n (family, _) = (_convert_host_to_ip(host))[0]\n connect_socket = socket.socket(family, socket.SOCK_STREAM)\n connect_socket.settimeout(connect_timeout)\n connect_socket.connect((host, port))\n else:\n connect_socket = socket.create_connection((host, port), connect_timeout)\n return connect_socket\n\n\ndef _timedelta_total_seconds(timedelta):\n return (\n timedelta.microseconds + 0.0 +\n (timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6) / 10 ** 6\n\n\ndef get_connection_state_id(state):\n connection_state_id = {\n 'ESTABLISHED': '01',\n 'SYN_SENT': '02',\n 'SYN_RECV': '03',\n 'FIN_WAIT1': '04',\n 'FIN_WAIT2': '05',\n 'TIME_WAIT': '06',\n }\n return connection_state_id[state]\n\n\ndef main():\n\n module = AnsibleModule(\n argument_spec=dict(\n host=dict(type='str', default='127.0.0.1'),\n timeout=dict(type='int', default=300),\n connect_timeout=dict(type='int', default=5),\n delay=dict(type='int', default=0),\n port=dict(type='int'),\n active_connection_states=dict(type='list', default=['ESTABLISHED', 'FIN_WAIT1', 'FIN_WAIT2', 'SYN_RECV', 'SYN_SENT', 'TIME_WAIT']),\n path=dict(type='path'),\n search_regex=dict(type='str'),\n state=dict(type='str', default='started', choices=['absent', 'drained', 'present', 'started', 'stopped']),\n exclude_hosts=dict(type='list'),\n sleep=dict(type='int', default=1),\n msg=dict(type='str'),\n ),\n )\n\n host = module.params['host']\n timeout = module.params['timeout']\n connect_timeout = module.params['connect_timeout']\n delay = module.params['delay']\n port = module.params['port']\n state = module.params['state']\n path = module.params['path']\n search_regex = module.params['search_regex']\n msg = module.params['msg']\n\n if search_regex is not None:\n compiled_search_re = re.compile(search_regex, re.MULTILINE)\n else:\n compiled_search_re = None\n\n if port and path:\n module.fail_json(msg=\"port and path parameter can not both be passed to wait_for\")\n if path and state == 'stopped':\n module.fail_json(msg=\"state=stopped should only be used for checking a port in the wait_for module\")\n if path and state == 'drained':\n module.fail_json(msg=\"state=drained should only be used for checking a port in the wait_for module\")\n if module.params['exclude_hosts'] is not None and state != 'drained':\n module.fail_json(msg=\"exclude_hosts should only be with state=drained\")\n for _connection_state in module.params['active_connection_states']:\n try:\n get_connection_state_id(_connection_state)\n except:\n module.fail_json(msg=\"unknown active_connection_state (%s) defined\" % _connection_state)\n\n start = datetime.datetime.utcnow()\n\n if delay:\n time.sleep(delay)\n\n if not port and not path and state != 'drained':\n time.sleep(timeout)\n elif state in ['absent', 'stopped']:\n # first wait for the stop condition\n end = start + datetime.timedelta(seconds=timeout)\n\n while datetime.datetime.utcnow() < end:\n if path:\n try:\n f = open(path)\n f.close()\n except IOError:\n break\n elif port:\n try:\n s = _create_connection(host, port, connect_timeout)\n s.shutdown(socket.SHUT_RDWR)\n s.close()\n except:\n break\n # Conditions not yet met, wait and try again\n time.sleep(module.params['sleep'])\n else:\n elapsed = datetime.datetime.utcnow() - start\n if port:\n module.fail_json(msg=msg or \"Timeout when waiting for %s:%s to stop.\" % (host, port), elapsed=elapsed.seconds)\n elif path:\n module.fail_json(msg=msg or \"Timeout when waiting for %s to be absent.\" % (path), elapsed=elapsed.seconds)\n\n elif state in ['started', 'present']:\n # wait for start condition\n end = start + datetime.timedelta(seconds=timeout)\n while datetime.datetime.utcnow() < end:\n if path:\n try:\n os.stat(path)\n except OSError:\n e = get_exception()\n # If anything except file not present, throw an error\n if e.errno != 2:\n elapsed = datetime.datetime.utcnow() - start\n module.fail_json(msg=msg or \"Failed to stat %s, %s\" % (path, e.strerror), elapsed=elapsed.seconds)\n # file doesn't exist yet, so continue\n else:\n # File exists. Are there additional things to check?\n if not compiled_search_re:\n # nope, succeed!\n break\n try:\n f = open(path)\n try:\n if re.search(compiled_search_re, f.read()):\n # String found, success!\n break\n finally:\n f.close()\n except IOError:\n pass\n elif port:\n alt_connect_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.utcnow()))\n try:\n s = _create_connection(host, port, min(connect_timeout, alt_connect_timeout))\n except:\n # Failed to connect by connect_timeout. wait and try again\n pass\n else:\n # Connected -- are there additional conditions?\n if compiled_search_re:\n data = ''\n matched = False\n while datetime.datetime.utcnow() < end:\n max_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.utcnow()))\n (readable, w, e) = select.select([s], [], [], max_timeout)\n if not readable:\n # No new data. Probably means our timeout\n # expired\n continue\n response = s.recv(1024)\n if not response:\n # Server shutdown\n break\n data += to_native(response, errors='surrogate_or_strict')\n if re.search(compiled_search_re, data):\n matched = True\n break\n\n # Shutdown the client socket\n s.shutdown(socket.SHUT_RDWR)\n s.close()\n if matched:\n # Found our string, success!\n break\n else:\n # Connection established, success!\n s.shutdown(socket.SHUT_RDWR)\n s.close()\n break\n\n # Conditions not yet met, wait and try again\n time.sleep(module.params['sleep'])\n\n else: # while-else\n # Timeout expired\n elapsed = datetime.datetime.utcnow() - start\n if port:\n if search_regex:\n module.fail_json(msg=msg or \"Timeout when waiting for search string %s in %s:%s\" % (search_regex, host, port), elapsed=elapsed.seconds)\n else:\n module.fail_json(msg=msg or \"Timeout when waiting for %s:%s\" % (host, port), elapsed=elapsed.seconds)\n elif path:\n if search_regex:\n module.fail_json(msg=msg or \"Timeout when waiting for search string %s in %s\" % (search_regex, path), elapsed=elapsed.seconds)\n else:\n module.fail_json(msg=msg or \"Timeout when waiting for file %s\" % (path), elapsed=elapsed.seconds)\n\n elif state == 'drained':\n # wait until all active connections are gone\n end = start + datetime.timedelta(seconds=timeout)\n tcpconns = TCPConnectionInfo(module)\n while datetime.datetime.utcnow() < end:\n try:\n if tcpconns.get_active_connections_count() == 0:\n break\n except IOError:\n pass\n # Conditions not yet met, wait and try again\n time.sleep(module.params['sleep'])\n else:\n elapsed = datetime.datetime.utcnow() - start\n module.fail_json(msg=msg or \"Timeout when waiting for %s:%s to drain\" % (host, port), elapsed=elapsed.seconds)\n\n elapsed = datetime.datetime.utcnow() - start\n module.exit_json(state=state, port=port, search_regex=search_regex, path=path, elapsed=elapsed.seconds)\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/ansible/modules/utilities/logic/wait_for.py" } ]
diff --git a/lib/ansible/modules/utilities/logic/wait_for.py b/lib/ansible/modules/utilities/logic/wait_for.py index 9fe7bcbbd416a0..083e672bfe1206 100644 --- a/lib/ansible/modules/utilities/logic/wait_for.py +++ b/lib/ansible/modules/utilities/logic/wait_for.py @@ -183,6 +183,7 @@ from ansible.module_utils.basic import AnsibleModule, load_platform_subclass from ansible.module_utils._text import to_native +from ansible.module_utils.pycompat24 import get_exception HAS_PSUTIL = False
ktbyers__netmiko-2037
ssh_autodetect.py fails to detect Dell OS9 devices I found issues with Netmiko ssh_autodetect.py feature with Dell OS9 (or dell_force10) switches but this same issues might appear with other vendor OSs as well. I'm asking for the comments and ideas for the best possible implementation. The first issue is that the ssh_autodetect.py detects only one Dell hardware type, S4048-ON, instead of detecting the running OS. For example, it is also possible to run Dell OS10 on that specific hardware type. It would be better to match on the line 'Networking OS Version : 9.14(0.1)' on the output of 'show version' command and it would be simple to fix. The other, more complex, issue is that there is 'show system' command in 'SSH_MAPPER_BASE' which is valid for Dell OS9 switches but it returns paginated output and therefore breaks the detection. I tested this with python3.6 in which dictionaries are insertion ordered. The code loops through the items in SSH_MAPPER_BASE and the cmds are checked in order ‘show system’, ‘show version’, ‘show system’, ‘show version’, ‘show version’ etc against the corresponding search patterns. Here's the output of the 'show system' command ``` Stack MAC : 00:00:00:00:00:00 Reload-Type : normal-reload [Next boot : normal-reload] -- Unit 1 -- Unit Type : Management Unit Status : online Next Boot : online Required Type : S3048-ON - 52-port GE/TE (SG-ON) Current Type : S3048-ON - 52-port GE/TE (SG-ON) Master priority : 0 Hardware Rev : 0.0 Num Ports : 52 Up Time : 22 wk, 1 day, 21 hr, 54 min Networking OS Version : 9.14(0.1) Jumbo Capable : yes POE Capable : no FIPS Mode : disabled Burned In MAC : 00:00:00:00:00:00 No Of MACs : 3 -- Power Supplies -- --More-- ``` and then the next command entered to the cli is ‘how version’ as the first character, ‘s’, just ‘exists’ from the previous output. ``` sw1#how version ^ % Error: Invalid input at "^" marker. sw1# ``` I came up with couple of options how this could be solved; 1. Use OrderedDict for SSH_MAPPER_BASE and change the order of the commands Currently items in SSH_MAPPER_BASE are in alphabetical order based on vendor name. There would be option to change the order of items in ‘SSH_MAPPER_BASE’ (as an ordered dict) so that the order of commands sent to the devices would be in the order of frequency in ‘SSH_MAPPER_BASE’ i.e. 'show version' -> appeares 11 times 'show system' -> appears 2 times rest of the commands -> only once This order would be more optimal as most of the devices can be identified based on output of 'show version'. 1. Change the commands to include only the matched line on the output This would also solve the issue but there would be more commands to be sent to the devices which is not optimal 'show version | i ASA' 'show version | i Networking OS Version' etc 1. Add the support for the paginated output I suppose this would be rather complicated as the OS and the corresponding command is unknown. Any other ideas, recommendations, comments etc?
[ { "content": "\"\"\"\nThe ssh_autodetect module is used to auto-detect the netmiko device_type to use to further initiate\na new SSH connection with a remote host. This auto-detection is based on a unique class called\n**SSHDetect**.\n\nNotes\n-----\n\nThe **SSHDetect** class is instantiated using the same parameters than a standard Netmiko\nconnection (see the *netmiko.ssh_dispatacher.ConnectHandler* function). The only acceptable value\nfor the 'device_type' argument is 'autodetect'.\n\nThe auto-detection is solely based on *SSH_MAPPER_BASE*. The keys are the name of\nthe 'device_type' supported for auto-detection and the value is another dictionary describing how\nto handle the auto-detection.\n\n* \"cmd\" : The command to send to the remote device. **The command output must not require paging.**\n* \"search_patterns\" : A list of regex to compare with the output of the command\n* \"priority\" : An integer (0-99) which specifies the confidence of the match above\n* \"dispatch\" : The function to call to try the autodetection (per default SSHDetect._autodetect_std)\n\nExamples\n--------\n\n# Auto-detection section\n>>> from netmiko.ssh_autodetect import SSHDetect\n>>> from netmiko.ssh_dispatcher import ConnectHandler\n>>> remote_device = {'device_type': 'autodetect',\n 'host': 'remote.host',\n 'username': 'test',\n 'password': 'foo'}\n>>> guesser = SSHDetect(**remote_device)\n>>> best_match = guesser.autodetect()\n>>> print(best_match) # Name of the best device_type to use further\n>>> print(guesser.potential_matches) # Dictionary of the whole matching result\n\n# Netmiko connection creation section\n>>> remote_device['device_type'] = best_match\n>>> connection = ConnectHandler(**remote_device)\n\"\"\"\nimport re\nimport time\nfrom netmiko.ssh_dispatcher import ConnectHandler\nfrom netmiko.base_connection import BaseConnection\n\n\n# 'dispatch' key is the SSHDetect method to call. dispatch key will be popped off dictionary\n# remaining keys indicate kwargs that will be passed to dispatch method.\n# Note, the 'cmd' needs to avoid output paging.\nSSH_MAPPER_BASE = {\n \"alcatel_aos\": {\n \"cmd\": \"show system\",\n \"search_patterns\": [r\"Alcatel-Lucent\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"alcatel_sros\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [\"Nokia\", \"Alcatel\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"apresia_aeos\": {\n \"cmd\": \"show system\",\n \"search_patterns\": [\"Apresia\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"arista_eos\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Arista\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"cisco_asa\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Cisco Adaptive Security Appliance\", r\"Cisco ASA\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"cisco_ios\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [\n \"Cisco IOS Software\",\n \"Cisco Internetwork Operating System Software\",\n ],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"cisco_nxos\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Cisco Nexus Operating System\", r\"NX-OS\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"cisco_xr\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Cisco IOS XR\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"dell_force10\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"S4048-ON\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"dell_os9\": {\n \"cmd\": \"show system\",\n \"search_patterns\": [\n r\"Dell Application Software Version: 9\",\n r\"Dell Networking OS Version : 9\",\n ],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"dell_os10\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Dell EMC Networking OS10-Enterprise\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"dell_powerconnect\": {\n \"cmd\": \"show system\",\n \"search_patterns\": [r\"PowerConnect\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"f5_tmsh\": {\n \"cmd\": \"show sys version\",\n \"search_patterns\": [r\"BIG-IP\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"f5_linux\": {\n \"cmd\": \"cat /etc/issue\",\n \"search_patterns\": [r\"BIG-IP\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"hp_comware\": {\n \"cmd\": \"display version\",\n \"search_patterns\": [\"HPE Comware\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"huawei\": {\n \"cmd\": \"display version\",\n \"search_patterns\": [\n r\"Huawei Technologies\",\n r\"Huawei Versatile Routing Platform Software\",\n ],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"juniper_junos\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [\n r\"JUNOS Software Release\",\n r\"JUNOS .+ Software\",\n r\"JUNOS OS Kernel\",\n r\"JUNOS Base Version\",\n ],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"linux\": {\n \"cmd\": \"uname -a\",\n \"search_patterns\": [r\"Linux\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"brocade_netiron\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"NetIron\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"extreme_slx\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"SLX-OS Operating System Software\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"ubiquiti_edgeswitch\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"EdgeSwitch\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"cisco_wlc\": {\n \"cmd\": \"\",\n \"dispatch\": \"_autodetect_remote_version\",\n \"search_patterns\": [r\"CISCO_WLC\"],\n \"priority\": 99,\n },\n \"mellanox_mlnxos\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Onyx\", r\"SX_PPC_M460EX\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"yamaha\": {\n \"cmd\": \"show copyright\",\n \"search_patterns\": [r\"Yamaha Corporation\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"fortinet\": {\n \"cmd\": \"get system status\",\n \"search_patterns\": [r\"FortiOS\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n}\n\n# Sort SSH_MAPPER_BASE such that the most common commands are first\ncmd_count = {}\nfor k, v in SSH_MAPPER_BASE.items():\n count = cmd_count.setdefault(v[\"cmd\"], 0)\n cmd_count[v[\"cmd\"]] = count + 1\ncmd_count = {k: v for k, v in sorted(cmd_count.items(), key=lambda item: item[1])}\n\n# SSH_MAPPER_BASE will be a list after this\nSSH_MAPPER_BASE = sorted(\n SSH_MAPPER_BASE.items(), key=lambda item: int(cmd_count[item[1][\"cmd\"]])\n)\nSSH_MAPPER_BASE.reverse()\n\n\nclass SSHDetect(object):\n \"\"\"\n The SSHDetect class tries to automatically guess the device type running on the SSH remote end.\n Be careful that the kwargs 'device_type' must be set to 'autodetect', otherwise it won't work at\n all.\n\n Parameters\n ----------\n *args : list\n The same *args that you might provide to the netmiko.ssh_dispatcher.ConnectHandler.\n *kwargs : dict\n The same *kwargs that you might provide to the netmiko.ssh_dispatcher.ConnectHandler.\n\n Attributes\n ----------\n connection : netmiko.terminal_server.TerminalServerSSH\n A basic connection to the remote SSH end.\n potential_matches: dict\n Dict of (device_type, accuracy) that is populated through an interaction with the\n remote end.\n\n Methods\n -------\n autodetect()\n Try to determine the device type.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Constructor of the SSHDetect class\n \"\"\"\n if kwargs[\"device_type\"] != \"autodetect\":\n raise ValueError(\"The connection device_type must be 'autodetect'\")\n # Always set cmd_verify to False for autodetect\n kwargs[\"global_cmd_verify\"] = False\n self.connection = ConnectHandler(*args, **kwargs)\n # Call the _test_channel_read() in base to clear initial data\n output = BaseConnection._test_channel_read(self.connection)\n self.initial_buffer = output\n self.potential_matches = {}\n self._results_cache = {}\n\n def autodetect(self):\n \"\"\"\n Try to guess the best 'device_type' based on patterns defined in SSH_MAPPER_BASE\n\n Returns\n -------\n best_match : str or None\n The device type that is currently the best to use to interact with the device\n \"\"\"\n for device_type, autodetect_dict in SSH_MAPPER_BASE:\n tmp_dict = autodetect_dict.copy()\n call_method = tmp_dict.pop(\"dispatch\")\n autodetect_method = getattr(self, call_method)\n accuracy = autodetect_method(**tmp_dict)\n if accuracy:\n self.potential_matches[device_type] = accuracy\n if accuracy >= 99: # Stop the loop as we are sure of our match\n best_match = sorted(\n self.potential_matches.items(), key=lambda t: t[1], reverse=True\n )\n self.connection.disconnect()\n return best_match[0][0]\n\n if not self.potential_matches:\n self.connection.disconnect()\n return None\n\n best_match = sorted(\n self.potential_matches.items(), key=lambda t: t[1], reverse=True\n )\n self.connection.disconnect()\n return best_match[0][0]\n\n def _send_command(self, cmd=\"\"):\n \"\"\"\n Handle reading/writing channel directly. It is also sanitizing the output received.\n\n Parameters\n ----------\n cmd : str, optional\n The command to send to the remote device (default : \"\", just send a new line)\n\n Returns\n -------\n output : str\n The output from the command sent\n \"\"\"\n self.connection.write_channel(cmd + \"\\n\")\n time.sleep(1)\n output = self.connection._read_channel_timing()\n output = self.connection.strip_backspaces(output)\n return output\n\n def _send_command_wrapper(self, cmd):\n \"\"\"\n Send command to the remote device with a caching feature to avoid sending the same command\n twice based on the SSH_MAPPER_BASE dict cmd key.\n\n Parameters\n ----------\n cmd : str\n The command to send to the remote device after checking cache.\n\n Returns\n -------\n response : str\n The response from the remote device.\n \"\"\"\n cached_results = self._results_cache.get(cmd)\n if not cached_results:\n response = self._send_command(cmd)\n self._results_cache[cmd] = response\n return response\n else:\n return cached_results\n\n def _autodetect_remote_version(\n self, search_patterns=None, re_flags=re.IGNORECASE, priority=99\n ):\n \"\"\"\n Method to try auto-detect the device type, by matching a regular expression on the reported\n remote version of the SSH server.\n\n Parameters\n ----------\n search_patterns : list\n A list of regular expression to look for in the reported remote SSH version\n (default: None).\n re_flags: re.flags, optional\n Any flags from the python re module to modify the regular expression (default: re.I).\n priority: int, optional\n The confidence the match is right between 0 and 99 (default: 99).\n \"\"\"\n invalid_responses = [r\"^$\"]\n\n if not search_patterns:\n return 0\n\n try:\n remote_version = self.connection.remote_conn.transport.remote_version\n for pattern in invalid_responses:\n match = re.search(pattern, remote_version, flags=re.I)\n if match:\n return 0\n for pattern in search_patterns:\n match = re.search(pattern, remote_version, flags=re_flags)\n if match:\n return priority\n except Exception:\n return 0\n return 0\n\n def _autodetect_std(\n self, cmd=\"\", search_patterns=None, re_flags=re.IGNORECASE, priority=99\n ):\n \"\"\"\n Standard method to try to auto-detect the device type. This method will be called for each\n device_type present in SSH_MAPPER_BASE dict ('dispatch' key). It will attempt to send a\n command and match some regular expression from the ouput for each entry in SSH_MAPPER_BASE\n ('cmd' and 'search_pattern' keys).\n\n Parameters\n ----------\n cmd : str\n The command to send to the remote device after checking cache.\n search_patterns : list\n A list of regular expression to look for in the command's output (default: None).\n re_flags: re.flags, optional\n Any flags from the python re module to modify the regular expression (default: re.I).\n priority: int, optional\n The confidence the match is right between 0 and 99 (default: 99).\n \"\"\"\n invalid_responses = [\n r\"% Invalid input detected\",\n r\"syntax error, expecting\",\n r\"Error: Unrecognized command\",\n r\"%Error\",\n r\"command not found\",\n r\"Syntax Error: unexpected argument\",\n ]\n if not cmd or not search_patterns:\n return 0\n try:\n # _send_command_wrapper will use already cached results if available\n response = self._send_command_wrapper(cmd)\n # Look for error conditions in output\n for pattern in invalid_responses:\n match = re.search(pattern, response, flags=re.I)\n if match:\n return 0\n for pattern in search_patterns:\n match = re.search(pattern, response, flags=re_flags)\n if match:\n return priority\n except Exception:\n return 0\n return 0\n", "path": "netmiko/ssh_autodetect.py" } ]
[ { "content": "\"\"\"\nThe ssh_autodetect module is used to auto-detect the netmiko device_type to use to further initiate\na new SSH connection with a remote host. This auto-detection is based on a unique class called\n**SSHDetect**.\n\nNotes\n-----\n\nThe **SSHDetect** class is instantiated using the same parameters than a standard Netmiko\nconnection (see the *netmiko.ssh_dispatacher.ConnectHandler* function). The only acceptable value\nfor the 'device_type' argument is 'autodetect'.\n\nThe auto-detection is solely based on *SSH_MAPPER_BASE*. The keys are the name of\nthe 'device_type' supported for auto-detection and the value is another dictionary describing how\nto handle the auto-detection.\n\n* \"cmd\" : The command to send to the remote device. **The command output must not require paging.**\n* \"search_patterns\" : A list of regex to compare with the output of the command\n* \"priority\" : An integer (0-99) which specifies the confidence of the match above\n* \"dispatch\" : The function to call to try the autodetection (per default SSHDetect._autodetect_std)\n\nExamples\n--------\n\n# Auto-detection section\n>>> from netmiko.ssh_autodetect import SSHDetect\n>>> from netmiko.ssh_dispatcher import ConnectHandler\n>>> remote_device = {'device_type': 'autodetect',\n 'host': 'remote.host',\n 'username': 'test',\n 'password': 'foo'}\n>>> guesser = SSHDetect(**remote_device)\n>>> best_match = guesser.autodetect()\n>>> print(best_match) # Name of the best device_type to use further\n>>> print(guesser.potential_matches) # Dictionary of the whole matching result\n\n# Netmiko connection creation section\n>>> remote_device['device_type'] = best_match\n>>> connection = ConnectHandler(**remote_device)\n\"\"\"\nimport re\nimport time\nfrom netmiko.ssh_dispatcher import ConnectHandler\nfrom netmiko.base_connection import BaseConnection\n\n\n# 'dispatch' key is the SSHDetect method to call. dispatch key will be popped off dictionary\n# remaining keys indicate kwargs that will be passed to dispatch method.\n# Note, the 'cmd' needs to avoid output paging.\nSSH_MAPPER_BASE = {\n \"alcatel_aos\": {\n \"cmd\": \"show system\",\n \"search_patterns\": [r\"Alcatel-Lucent\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"alcatel_sros\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [\"Nokia\", \"Alcatel\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"apresia_aeos\": {\n \"cmd\": \"show system\",\n \"search_patterns\": [\"Apresia\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"arista_eos\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Arista\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"cisco_asa\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Cisco Adaptive Security Appliance\", r\"Cisco ASA\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"cisco_ios\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [\n \"Cisco IOS Software\",\n \"Cisco Internetwork Operating System Software\",\n ],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"cisco_nxos\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Cisco Nexus Operating System\", r\"NX-OS\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"cisco_xr\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Cisco IOS XR\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"dell_force10\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Real Time Operating System Software\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"dell_os9\": {\n \"cmd\": \"show system\",\n \"search_patterns\": [\n r\"Dell Application Software Version: 9\",\n r\"Dell Networking OS Version : 9\",\n ],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"dell_os10\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Dell EMC Networking OS10-Enterprise\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"dell_powerconnect\": {\n \"cmd\": \"show system\",\n \"search_patterns\": [r\"PowerConnect\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"f5_tmsh\": {\n \"cmd\": \"show sys version\",\n \"search_patterns\": [r\"BIG-IP\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"f5_linux\": {\n \"cmd\": \"cat /etc/issue\",\n \"search_patterns\": [r\"BIG-IP\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"hp_comware\": {\n \"cmd\": \"display version\",\n \"search_patterns\": [\"HPE Comware\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"huawei\": {\n \"cmd\": \"display version\",\n \"search_patterns\": [\n r\"Huawei Technologies\",\n r\"Huawei Versatile Routing Platform Software\",\n ],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"juniper_junos\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [\n r\"JUNOS Software Release\",\n r\"JUNOS .+ Software\",\n r\"JUNOS OS Kernel\",\n r\"JUNOS Base Version\",\n ],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"linux\": {\n \"cmd\": \"uname -a\",\n \"search_patterns\": [r\"Linux\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"brocade_netiron\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"NetIron\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"extreme_slx\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"SLX-OS Operating System Software\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"ubiquiti_edgeswitch\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"EdgeSwitch\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"cisco_wlc\": {\n \"cmd\": \"\",\n \"dispatch\": \"_autodetect_remote_version\",\n \"search_patterns\": [r\"CISCO_WLC\"],\n \"priority\": 99,\n },\n \"mellanox_mlnxos\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Onyx\", r\"SX_PPC_M460EX\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"yamaha\": {\n \"cmd\": \"show copyright\",\n \"search_patterns\": [r\"Yamaha Corporation\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"fortinet\": {\n \"cmd\": \"get system status\",\n \"search_patterns\": [r\"FortiOS\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n}\n\n# Sort SSH_MAPPER_BASE such that the most common commands are first\ncmd_count = {}\nfor k, v in SSH_MAPPER_BASE.items():\n count = cmd_count.setdefault(v[\"cmd\"], 0)\n cmd_count[v[\"cmd\"]] = count + 1\ncmd_count = {k: v for k, v in sorted(cmd_count.items(), key=lambda item: item[1])}\n\n# SSH_MAPPER_BASE will be a list after this\nSSH_MAPPER_BASE = sorted(\n SSH_MAPPER_BASE.items(), key=lambda item: int(cmd_count[item[1][\"cmd\"]])\n)\nSSH_MAPPER_BASE.reverse()\n\n\nclass SSHDetect(object):\n \"\"\"\n The SSHDetect class tries to automatically guess the device type running on the SSH remote end.\n Be careful that the kwargs 'device_type' must be set to 'autodetect', otherwise it won't work at\n all.\n\n Parameters\n ----------\n *args : list\n The same *args that you might provide to the netmiko.ssh_dispatcher.ConnectHandler.\n *kwargs : dict\n The same *kwargs that you might provide to the netmiko.ssh_dispatcher.ConnectHandler.\n\n Attributes\n ----------\n connection : netmiko.terminal_server.TerminalServerSSH\n A basic connection to the remote SSH end.\n potential_matches: dict\n Dict of (device_type, accuracy) that is populated through an interaction with the\n remote end.\n\n Methods\n -------\n autodetect()\n Try to determine the device type.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Constructor of the SSHDetect class\n \"\"\"\n if kwargs[\"device_type\"] != \"autodetect\":\n raise ValueError(\"The connection device_type must be 'autodetect'\")\n # Always set cmd_verify to False for autodetect\n kwargs[\"global_cmd_verify\"] = False\n self.connection = ConnectHandler(*args, **kwargs)\n # Call the _test_channel_read() in base to clear initial data\n output = BaseConnection._test_channel_read(self.connection)\n self.initial_buffer = output\n self.potential_matches = {}\n self._results_cache = {}\n\n def autodetect(self):\n \"\"\"\n Try to guess the best 'device_type' based on patterns defined in SSH_MAPPER_BASE\n\n Returns\n -------\n best_match : str or None\n The device type that is currently the best to use to interact with the device\n \"\"\"\n for device_type, autodetect_dict in SSH_MAPPER_BASE:\n tmp_dict = autodetect_dict.copy()\n call_method = tmp_dict.pop(\"dispatch\")\n autodetect_method = getattr(self, call_method)\n accuracy = autodetect_method(**tmp_dict)\n if accuracy:\n self.potential_matches[device_type] = accuracy\n if accuracy >= 99: # Stop the loop as we are sure of our match\n best_match = sorted(\n self.potential_matches.items(), key=lambda t: t[1], reverse=True\n )\n self.connection.disconnect()\n return best_match[0][0]\n\n if not self.potential_matches:\n self.connection.disconnect()\n return None\n\n best_match = sorted(\n self.potential_matches.items(), key=lambda t: t[1], reverse=True\n )\n self.connection.disconnect()\n return best_match[0][0]\n\n def _send_command(self, cmd=\"\"):\n \"\"\"\n Handle reading/writing channel directly. It is also sanitizing the output received.\n\n Parameters\n ----------\n cmd : str, optional\n The command to send to the remote device (default : \"\", just send a new line)\n\n Returns\n -------\n output : str\n The output from the command sent\n \"\"\"\n self.connection.write_channel(cmd + \"\\n\")\n time.sleep(1)\n output = self.connection._read_channel_timing()\n output = self.connection.strip_backspaces(output)\n return output\n\n def _send_command_wrapper(self, cmd):\n \"\"\"\n Send command to the remote device with a caching feature to avoid sending the same command\n twice based on the SSH_MAPPER_BASE dict cmd key.\n\n Parameters\n ----------\n cmd : str\n The command to send to the remote device after checking cache.\n\n Returns\n -------\n response : str\n The response from the remote device.\n \"\"\"\n cached_results = self._results_cache.get(cmd)\n if not cached_results:\n response = self._send_command(cmd)\n self._results_cache[cmd] = response\n return response\n else:\n return cached_results\n\n def _autodetect_remote_version(\n self, search_patterns=None, re_flags=re.IGNORECASE, priority=99\n ):\n \"\"\"\n Method to try auto-detect the device type, by matching a regular expression on the reported\n remote version of the SSH server.\n\n Parameters\n ----------\n search_patterns : list\n A list of regular expression to look for in the reported remote SSH version\n (default: None).\n re_flags: re.flags, optional\n Any flags from the python re module to modify the regular expression (default: re.I).\n priority: int, optional\n The confidence the match is right between 0 and 99 (default: 99).\n \"\"\"\n invalid_responses = [r\"^$\"]\n\n if not search_patterns:\n return 0\n\n try:\n remote_version = self.connection.remote_conn.transport.remote_version\n for pattern in invalid_responses:\n match = re.search(pattern, remote_version, flags=re.I)\n if match:\n return 0\n for pattern in search_patterns:\n match = re.search(pattern, remote_version, flags=re_flags)\n if match:\n return priority\n except Exception:\n return 0\n return 0\n\n def _autodetect_std(\n self, cmd=\"\", search_patterns=None, re_flags=re.IGNORECASE, priority=99\n ):\n \"\"\"\n Standard method to try to auto-detect the device type. This method will be called for each\n device_type present in SSH_MAPPER_BASE dict ('dispatch' key). It will attempt to send a\n command and match some regular expression from the ouput for each entry in SSH_MAPPER_BASE\n ('cmd' and 'search_pattern' keys).\n\n Parameters\n ----------\n cmd : str\n The command to send to the remote device after checking cache.\n search_patterns : list\n A list of regular expression to look for in the command's output (default: None).\n re_flags: re.flags, optional\n Any flags from the python re module to modify the regular expression (default: re.I).\n priority: int, optional\n The confidence the match is right between 0 and 99 (default: 99).\n \"\"\"\n invalid_responses = [\n r\"% Invalid input detected\",\n r\"syntax error, expecting\",\n r\"Error: Unrecognized command\",\n r\"%Error\",\n r\"command not found\",\n r\"Syntax Error: unexpected argument\",\n ]\n if not cmd or not search_patterns:\n return 0\n try:\n # _send_command_wrapper will use already cached results if available\n response = self._send_command_wrapper(cmd)\n # Look for error conditions in output\n for pattern in invalid_responses:\n match = re.search(pattern, response, flags=re.I)\n if match:\n return 0\n for pattern in search_patterns:\n match = re.search(pattern, response, flags=re_flags)\n if match:\n return priority\n except Exception:\n return 0\n return 0\n", "path": "netmiko/ssh_autodetect.py" } ]
diff --git a/netmiko/ssh_autodetect.py b/netmiko/ssh_autodetect.py index 9b1f96d26..971570244 100644 --- a/netmiko/ssh_autodetect.py +++ b/netmiko/ssh_autodetect.py @@ -101,7 +101,7 @@ }, "dell_force10": { "cmd": "show version", - "search_patterns": [r"S4048-ON"], + "search_patterns": [r"Real Time Operating System Software"], "priority": 99, "dispatch": "_autodetect_std", },
chainer__chainer-3204
VariableNode.shape is None although Parameter.initialize is called. The following code returns `None` with the latest version of Chainer. (3.0.0b1, 034c4c596) ```py a = chainer.Parameter() a.initialize((1, 1)) print(a.node.shape) # returns None ``` It causes an internal error by running the following code. ```py import chainer from chainer import computational_graph as c def main(): a = chainer.Parameter() b = chainer.Parameter() a.initialize((1, 1)) b.initialize((1, 1)) vs = a + b g = c.build_computational_graph(vs) g.dump() if __name__ == '__main__': main() ``` ``` Traceback (most recent call last): File "poc.py", line 16, in <module> main() File "poc.py", line 12, in main g.dump() File "/home/igarashi/.pyenv/versions/3.6.1/lib/python3.6/site-packages/chainer-3.0.0b1-py3.6.egg/chainer/computational_graph.py", line 164, in dump return self._to_dot() File "/home/igarashi/.pyenv/versions/3.6.1/lib/python3.6/site-packages/chainer-3.0.0b1-py3.6.egg/chainer/computational_graph.py", line 120, in _to_dot node, self.variable_style, self.show_name).label File "/home/igarashi/.pyenv/versions/3.6.1/lib/python3.6/site-packages/chainer-3.0.0b1-py3.6.egg/chainer/computational_graph.py", line 29, in __init__ self.attribute = {'label': node.label} File "/home/igarashi/.pyenv/versions/3.6.1/lib/python3.6/site-packages/chainer-3.0.0b1-py3.6.egg/chainer/variable.py", line 280, in label return '(%s), %s' % (', '.join(map(str, self.shape)), TypeError: 'NoneType' object is not iterable If you suspect this is an IPython bug, please report it at: https://github.com/ipython/ipython/issues or send an email to the mailing list at [email protected] You can print a more detailed traceback right now with "%tb", or use "%debug" to interactively debug it. Extra-detailed tracebacks for bug-reporting purposes can be enabled via: %config Application.verbose_crash=True ```
[ { "content": "import collections\nimport copy\nimport heapq\nimport traceback\nimport warnings\nimport weakref\n\nimport numpy\n\nimport chainer\nfrom chainer import cuda\nfrom chainer import initializers\nfrom chainer.initializers import constant\nfrom chainer.utils import argument\n\n\ndef _check_grad_type(func, x, gx):\n if x.data is None or gx is None:\n # ``x.data is None`` implies that the data array is not retained\n return\n if not isinstance(gx, type(x.data)):\n msg = ('Type of data and grad mismatch\\n%s != %s' %\n (type(x.data), type(gx)))\n typ = TypeError\n elif gx.dtype != x.data.dtype:\n msg = ('Dtype of data and grad mismatch\\n%s != %s' %\n (x.data.dtype, gx.dtype))\n typ = TypeError\n elif gx.shape != x.data.shape:\n msg = ('Shape of data and grad mismatch\\n%s != %s' %\n (x.data.shape, gx.shape))\n typ = ValueError\n else:\n return\n\n detail = ''\n if func:\n detail = 'Function `{0}` ({1}) has a bug.\\n'.format(\n type(func)._impl_name, func.label)\n stack = func.stack\n if stack:\n detail += 'Stacktrace of the function is below:\\n'\n for line in traceback.format_list(func.stack):\n detail += line\n detail += '''\nPlease report this error to the issue tracker with the stack trace,\nthe information of your environment, and your script:\nhttps://github.com/chainer/chainer/issues/new.\n'''.format(type(func).__name__, func.label)\n\n raise typ(detail + msg)\n\n\ndef variable_repr(var):\n \"\"\"Return the string representation of a variable.\n\n Args:\n var (~chainer.Variable): Input Variable.\n .. seealso:: numpy.array_repr\n \"\"\"\n xp = cuda.get_array_module(var)\n if xp is numpy:\n arr = var.data\n else:\n arr = var.data.get()\n\n if var.name:\n prefix = 'variable ' + var.name\n else:\n prefix = 'variable'\n\n if arr is None:\n lst = 'None'\n elif arr.size > 0 or arr.shape == (0,):\n lst = numpy.array2string(arr, None, None, None, ', ', prefix + '(')\n else: # show zero-length shape unless it is (0,)\n lst = '[], shape=%s' % (repr(arr.shape),)\n\n return '%s(%s)' % (prefix, lst)\n\n\ndef variable_str(var):\n \"\"\"Return the string representation of a variable.\n\n Args:\n var (~chainer.Variable): Input Variable.\n .. seealso:: numpy.array_str\n \"\"\"\n xp = cuda.get_array_module(var)\n if xp is numpy:\n arr = var.data\n else:\n arr = var.data.get()\n\n if var.name:\n prefix = 'variable ' + var.name\n else:\n prefix = 'variable'\n\n if arr is None:\n lst = 'None'\n else:\n lst = numpy.array2string(arr, None, None, None, ' ', prefix + '(')\n\n return '%s(%s)' % (prefix, lst)\n\n\nclass VariableNode(object):\n\n \"\"\"Node in the backward computational graph representing a variable.\n\n This object represents a variable node in a computational graph. The node\n is used in error backpropagation (a.k.a. backprop) to determine which\n gradient to be passed to each function.\n\n A variable node is held by the corresponding :class:`Variable` object,\n which is managed by users. :class:`Function` objects that take the variable\n as an input also hold references to the variable node.\n\n Note that the node does not hold a reference to the corresponding data\n array in general. The data array is actually accessible by the node in the\n following cases.\n\n 1. If there exists a :class:`Variable` object that holds a reference to the\n variable node, the variable node holds a weak reference to the variable\n object, and thus the data array is accessible via the weak reference.\n 2. If :meth:`retain_data` is called, the node holds a reference to the data\n array. It is mainly called by a function that needs the input or output\n data array in its backprop procedure. See :meth:`Function.retain_inputs`\n and :meth:`Function.retain_outputs` for more details.\n\n Users usually do not need to touch this variable node object. The\n computational graph is automatically managed by Chainer, and any interface\n that is beneficial for users is also provided by :class:`Variable`.\n\n Args:\n variable (Variable): The corresponding variable object.\n name (str): Name of the variable node.\n\n Attributes:\n ~VariableNode.dtype: Data type of the data array.\n ~VariableNode.shape: Shape of the data array.\n ~VariableNode.name (str): Name of the variable node.\n\n \"\"\"\n\n _creator_node = None\n _data = None\n _rank = 0\n # Name of the Function is assigned if this variable is a gradient generated\n # by an old-style Function\n _old_style_grad_generator = None\n\n def __init__(self, variable, name, **kwargs):\n argument.check_unexpected_kwargs(\n kwargs,\n grad='unexpected keyword argument \"grad\": '\n 'pass the gradient to Variable instead'\n )\n self._variable = weakref.ref(variable)\n self.name = name\n self._requires_grad = variable.requires_grad\n\n vdata = variable.data\n self._set_data_type(vdata)\n\n @property\n def creator(self):\n \"\"\"Function object that created this variable node.\n\n When the function is implemented with the old-style API (i.e., it uses\n :class:`Function` class), this property returns the :class:`Function`\n object. The object is extracted from the :class:`FunctionAdapter`\n object, so the returned object is not the function node, but instead\n the actual implementation of forward and backward procedures.\n\n When the function is implemented with the new-style API (i.e., it uses\n :class:`FunctionNode` class), this property returns the function node\n object. In this case, the returned object is same as\n :attr:`creator_node`.\n\n .. warning::\n\n As of v3.0.0, when the creator is an old-style function, the\n following code is invalid:\n\n .. code-block:: python\n\n creator = v.creator\n v.creator = None\n ...\n v.creator = creator\n\n The point is that :class:`FunctionNode` objects are used as nodes\n in the computational graph instead of :class:`Function`, and each\n :class:`Function` object only holds a *weak reference* to the\n corresponding :class:`FunctionNode`. Since ``creator`` returns the\n :class:`Function` object, the :class:`FunctionNode` object is not\n kept by preserving ``creator``.\n\n The above code should be fixed as follows.\n\n .. code-block:: python\n\n creator_node = v.creator_node\n v.creator_node = None\n ...\n v.creator_node = creator_node\n\n \"\"\"\n node = self._creator_node\n if node is None:\n return None\n\n if isinstance(node, chainer.function.FunctionAdapter):\n return node.function\n return node\n\n @creator.setter\n def creator(self, func):\n self.creator_node = func\n\n @property\n def creator_node(self):\n \"\"\"Function node that has this variable as an output.\n\n See :class:`FunctionNode` for the definition of a function node.\n\n \"\"\"\n return self._creator_node\n\n @creator_node.setter\n def creator_node(self, func):\n if isinstance(func, chainer.Function):\n func = func.node\n self._creator_node = func\n if func is not None:\n self._rank = func.rank + 1\n\n @property\n def data(self):\n \"\"\"Data array of the corresponding variable.\n\n If the data is not available, it returns ``None``.\n\n \"\"\"\n return self._data\n\n @data.setter\n def data(self, d):\n self._data = d\n self._set_data_type(d)\n\n @property\n def grad(self):\n \"\"\"Gradient array of the corresponding variable.\n\n If the variable is not available, it returns ``None``.\n\n \"\"\"\n var = self.get_variable()\n return None if var is None else var.grad\n\n @property\n def grad_var(self):\n \"\"\"Gradient variable of the corresponding variable.\n\n If the corresponding variable is not available, it return ``None``.\n\n \"\"\"\n var = self.get_variable()\n return None if var is None else var._grad_var\n\n @property\n def label(self):\n \"\"\"Short text that represents the variable node.\"\"\"\n if self.shape == ():\n return str(self.dtype)\n return '(%s), %s' % (', '.join(map(str, self.shape)),\n str(self.dtype))\n\n @property\n def rank(self):\n return self._rank\n\n @property\n def requires_grad(self):\n \"\"\"It indicates that ``grad`` will be set in backward calculation.\"\"\"\n return self._requires_grad\n\n def get_variable(self):\n \"\"\"Returns the corresponding :class:`Variable` object.\n\n VariableNode object holds a weak reference of the variable object. If\n the reference is alive, it is returned by this property. Otherwise,\n this property creates a new :class:`Variable` object from this node\n object and returns it.\n\n Returns:\n Variable: The variable object that refers this node.\n\n \"\"\"\n var = self._variable()\n if var is not None:\n return var\n\n var = Variable(self.data, name=self.name,\n requires_grad=self._requires_grad)\n var._node = self\n return var\n\n def set_creator(self, creator):\n \"\"\"Sets a :class:`Function` object that created this node.\n\n This method is equivalent to ``self.creator = creator``. A\n :class:`FunctionNode` object can also be passed.\n\n Args:\n creator (Function or FunctionNode): Function that has created this\n variable.\n\n \"\"\"\n self.creator = creator\n\n def set_creator_node(self, creator_node):\n \"\"\"Sets a :class:`FunctionNode` object that created this node.\n\n This method is equivalent to ``self.creator_node = creator_node``. A\n :class:`Function` object can also be passed, in which case the\n :attr:`~Function.node` object is extracted.\n\n Args:\n creator_node (FunctionNode or Function): Function node that has\n this variable as an output.\n\n \"\"\"\n self.creator_node = creator_node\n\n def unchain(self):\n \"\"\"Deletes the reference to the creator of this variable node.\n\n This method is equivalent to ``self.creator_node = None``.\n\n \"\"\"\n self.creator_node = None\n\n def retain_data(self):\n \"\"\"Lets the node hold a reference to the underlying data array.\n\n This method gets the data array of the corresponding variable and keeps\n it. If the weak reference to the corresponding variable is dead, it\n raises an error.\n\n \"\"\"\n variable = self._variable()\n if variable is not None:\n self.data = variable.data\n else:\n raise RuntimeError('cannot retain variable data: the variable has '\n 'been already released')\n\n def _set_data_type(self, d):\n if d is None:\n self.dtype = None\n self.shape = None\n else:\n self.dtype = d.dtype\n self.shape = d.shape\n\n def _check_old_style_gradient(self):\n if self._old_style_grad_generator is not None:\n raise RuntimeError(\n 'cannot twice-differentiate an old style Function \"%s\"' %\n self._old_style_grad_generator)\n\n\ndef _create_variable(data, name, grad, requires_grad):\n return Variable(\n data, name=name, grad=grad, requires_grad=requires_grad)\n\n\nclass Variable(object):\n\n \"\"\"__init__(data=None, *, name=None, grad=None, requires_grad=True)\n\n Array with a structure to keep track of computation.\n\n Every variable holds a data array of type either :class:`numpy.ndarray` or\n :class:`cupy.ndarray`.\n\n A variable object holds a data array and a :class:`VariableNode` object of\n a computational graph. If the variable is constructed by the user, the node\n is *root* and does not hold any parent. If the variable is constructed by a\n :class:`FunctionNode` object, the node holds a reference to its parent\n called :attr:`creator_node`. This reference is used in backpropagation to\n backtrack the graph.\n\n Users can disable (resp. enable) this chaining behavior by calling\n :func:`~chainer.no_backprop_mode` (resp.\n :func:`~chainer.force_backprop_mode`).\n In the former context, a variable never creates a computational graph,\n whereas in the latter context, it is forced to create.\n\n .. warning::\n\n ``volatile`` argument is not supported anymore since v2.\n Instead, use :func:`chainer.no_backprop_mode`.\n\n Args:\n data (numpy.ndarray or cupy.ndarray): Initial data array.\n name (str): Name of the variable.\n grad (numpy.ndarray or cupy.ndarray): Initial gradient array.\n requires_grad (bool): Boolean indicating whether ``grad`` will be set\n in backward calculation.\n\n Attributes:\n ~Variable.data: Data array of type either :class:`numpy.ndarray` or\n :class:`cupy.ndarray`. If it is None, the variable is left in an\n uninitialized state.\n ~Variable.grad_var (Variable): Gradient variable.\n\n \"\"\" # NOQA\n\n def __init__(self, data=None, **kwargs):\n argument.check_unexpected_kwargs(\n kwargs, volatile='volatile argument is not supported anymore. '\n 'Use chainer.using_config')\n name, grad, requires_grad \\\n = argument.parse_kwargs(\n kwargs, ('name', None), ('grad', None),\n ('requires_grad', True))\n\n if (data is not None and\n not isinstance(data, (numpy.ndarray, cuda.ndarray))):\n msg = '''numpy.ndarray or cuda.ndarray are expected.\nActual: {0}'''.format(type(data))\n raise TypeError(msg)\n\n # Use a list as a data structure to hold the data array indirectly to\n # abstract its initialized/uninitialized state.\n self._data = [data]\n self._requires_grad = requires_grad\n self._node = VariableNode(self, name)\n self._grad_var = None if grad is None else Variable(grad)\n\n def __copy__(self):\n return self._copy_to(Variable())\n\n def _copy_to(self, target):\n target.__dict__ = copy.copy(self.__dict__)\n target._node = VariableNode(target, self.name)\n return target\n\n def __reduce__(self):\n return _create_variable, (self.data, self.name, self.grad,\n self._requires_grad)\n\n def __repr__(self):\n return variable_repr(self)\n\n def __str__(self):\n return variable_str(self)\n\n @property\n def name(self):\n return self._node.name\n\n @name.setter\n def name(self, n):\n self._node.name = n\n\n def summary(self):\n if self.name:\n return '<variable %s>' % self.name\n else:\n return '<variable at 0x%x>' % id(self)\n\n def debug_print(self):\n \"\"\"Display a summary of the stored data and location of the Variable\"\"\"\n\n msg = \"\"\"{summary}\n- device: {device}\n- backend: {background}\n- shape: {shape}\n- dtype: {dtype}\n- statistics: {stats}\n- grad: {grad}\"\"\"\n\n stats_msg = 'mean={0:.8f}, std={1:.8f}'\n\n try:\n device = self.data.device\n except AttributeError:\n device = 'CPU'\n\n with cuda.get_device_from_array(self.data) as dev:\n xp = numpy if int(dev) == -1 else cuda.cupy\n\n if self.grad is None:\n grad = None\n elif xp.all(self.grad == 0):\n grad = 0\n else:\n grad = stats_msg.format(float(xp.mean(self.grad)),\n float(xp.std(self.grad)))\n\n stats = stats_msg.format(float(xp.mean(self.data)),\n float(xp.std(self.data)))\n\n return msg.format(summary=self.summary(),\n grad=grad, shape=self.data.shape,\n background=type(self.data),\n dtype=self.data.dtype, device=device,\n stats=stats)\n\n def __pos__(self):\n return self\n\n def __len__(self):\n \"\"\"Returns the first dimension of the data array.\n\n Returns:\n int: Number of the first dimension of the data array.\n\n \"\"\"\n return len(self.data)\n\n @property\n def label(self):\n \"\"\"Short text that represents the variable.\"\"\"\n return self._node.label\n\n @property\n def creator(self):\n \"\"\"Function implementation that created this variable.\n\n When this variable has been created by an old-style function (i.e., it\n is implemented as a subclass of :class:`Function`), this property\n returns that :class:`Function` object.\n\n When this variable has been created by a new-style function (i.e., it\n is implemented as a subclass of :class:`FunctionNode` class), this\n property returns that node object.\n\n \"\"\"\n return self._node.creator\n\n @creator.setter\n def creator(self, func):\n self._node.creator = func\n\n @property\n def creator_node(self):\n \"\"\":class:`FunctionNode` object that created this variable.\n\n This property has a setter to which ``None`` can be set. Setting\n ``None`` to this property is equivalent to call :meth:`unchain`;\n it purges the variable from the function that created this variable.\n\n The setter also accepts the original :class:`FunctionNode` object that\n created this variable. For example, you can once set ``None`` to this\n property and then set the original value again.\n\n .. note::\n Setting an irrelevant :meth:`FunctionNode` object does not emit any\n error immediately, whereas the behavior is undefined. Do not set\n a :meth:`FunctionNode` object that did not create this variable\n object.\n\n \"\"\"\n return self._node._creator_node\n\n @creator_node.setter\n def creator_node(self, func):\n self._node.creator_node = func\n\n @property\n def data(self):\n return self._data[0]\n\n @data.setter\n def data(self, d):\n self._data[0] = d\n self._node._set_data_type(d)\n\n @property\n def grad(self):\n \"\"\"Gradient array of this variable.\n\n Not that this property returns the underlying array of the gradient\n variable instead of the gradient variable itself; to get/set\n gradient variable, use :attr:`grad_var` instead.\n\n \"\"\"\n gv = self._grad_var\n return None if gv is None else gv.data\n\n @grad.setter\n def grad(self, g):\n self.grad_var = None if g is None else Variable(g)\n\n @property\n def grad_var(self):\n return self._grad_var\n\n @grad_var.setter\n def grad_var(self, g):\n if g is not None:\n _check_grad_type(None, self, g.data)\n self._grad_var = g\n\n @property\n def shape(self):\n return self.data.shape\n\n @property\n def ndim(self):\n return self.data.ndim\n\n @property\n def size(self):\n return self.data.size\n\n @property\n def dtype(self):\n return self.data.dtype\n\n @property\n def rank(self):\n return self._node.rank\n\n @property\n def node(self):\n return self._node\n\n @property\n def requires_grad(self):\n \"\"\"It indicates that ``grad`` will be set in backward calculation.\"\"\"\n return self._requires_grad\n\n def to_cpu(self):\n \"\"\"Copies the data and gradient arrays to CPU.\"\"\"\n if self.data is None:\n return\n\n self._data = [cuda.to_cpu(self.data)]\n if self._grad_var is not None:\n self._grad_var.to_cpu()\n # ensure that the node tracks the device migration\n node = self._node\n if node._data is not None:\n node.retain_data()\n\n def to_gpu(self, device=None):\n \"\"\"Copies the data and gradient arrays to specified GPU.\n\n Args:\n device: Target device specifier. If omitted, the current device is\n used.\n\n \"\"\"\n if self.data is None:\n self._initial_device = (cuda.Device().id\n if device is None else device)\n else:\n self._data = [cuda.to_gpu(self.data, device)]\n if self._grad_var is not None:\n self._grad_var.to_gpu(device)\n # ensure that the node tracks the device migration\n node = self._node\n if node._data is not None:\n node.retain_data()\n\n def cleargrad(self):\n \"\"\"Clears the gradient array.\"\"\"\n self._grad_var = None\n\n def zerograd(self):\n \"\"\"Initializes the gradient array by zeros.\n\n Note that the gradient variable is unchained from the computational\n graph by this method because this operation breaks the backprop\n validity.\n\n .. deprecated:: v1.15\n Use :meth:`cleargrad` instead.\n\n \"\"\"\n warnings.warn(\n 'Variable.zerograd is deprecated. Use Variable.cleargrad instead.',\n DeprecationWarning)\n\n if self.data is None:\n return\n\n with cuda.get_device_from_array(self.data) as dev:\n gv = self._grad_var\n if gv is None:\n xp = numpy if dev.id == -1 else cuda.cupy\n self.grad = xp.zeros_like(self.data)\n else:\n gv.unchain()\n gv.data.fill(0)\n\n def copydata(self, var):\n \"\"\"Copies the data array from given source variable.\n\n This method copies the data array from given variable to this variable.\n The copy is done even if the arrays reside on different devices,\n including across the host and a GPU device. If this variable has an\n uninitialized data array, this method initializes it by the data array\n of the given variable. Similarly, if the given variable has an\n uninitialized data array, this method initializes it by the data array\n of this variable (``self``). If both are uninitialized, this method\n does nothing.\n\n Args:\n var (Variable): Source variable.\n\n \"\"\"\n src = var.data\n dst = self.data\n if src is None:\n if dst is None:\n return\n var.initialize(self.shape)\n src = var.data\n elif dst is None:\n self.initialize(src.shape)\n dst = self.data\n src_xp = cuda.get_array_module(src)\n dst_xp = cuda.get_array_module(dst)\n if dst_xp is src_xp:\n dst_xp.copyto(dst, src)\n elif dst_xp is numpy:\n dst_xp.copyto(dst, src.get())\n else:\n dst.set(src)\n\n def addgrad(self, var):\n \"\"\"Accumulates the gradient array from given source variable.\n\n This method adds the gradient of a given variable to the gradient of\n this variable. The accumulation is even done across the host and\n different devices. If this variable has uninitialized data/grad arrays,\n this method initializes it with the shape of the given variable and\n then accumulates the gradient.\n\n Args:\n var (Variable): Source variable.\n\n \"\"\"\n src = var._grad_var\n if src is None:\n return\n\n if self.data is None:\n self.initialize(var.shape)\n dst = self._grad_var\n\n src_dev = cuda.get_device_from_array(src.data)\n dst_dev = cuda.get_device_from_array(self.data)\n\n if src_dev.id != dst_dev.id:\n src = chainer.functions.copy(src, dst_dev.id)\n self._grad_var = src if dst is None else src + dst\n\n def set_creator(self, gen_func):\n \"\"\"Notifies the variable that the given function is its creator.\n\n Args:\n gen_func (Function): Function object that creates this variable as\n one of its outputs.\n\n \"\"\"\n self._node.set_creator(gen_func)\n\n def set_creator_node(self, fnode):\n \"\"\"Notifies the variable that the given node is its creator.\n\n Args:\n fnode (FunctionNode): Function node that has this variable as an\n output.\n\n \"\"\"\n self._node.set_creator_node(fnode)\n\n def backward(self, retain_grad=False):\n \"\"\"Runs error backpropagation (a.k.a. backprop) from this variable.\n\n On backprop, :meth:`FunctionNode.backward` is called on each\n :class:`FunctionNode` object appearing in the backward graph starting\n from this variable. The backward graph is represented by backward\n references from variable nodes to their creators, and from function\n nodes to their input variable nodes. The backprop stops at all root\n nodes. Some function nodes set ``None`` as gradients of some inputs,\n where further backprop does not take place at such inputs.\n\n This method uses :data:`grad` as the initial error array. User can\n manually set a gradient array before calling this method. If\n :data:`data` contains only one element (i.e., it is scalar) and\n :data:`grad` is ``None``, then this method automatically complements\n 1.0 as the initial error. This is useful on starting backprop from\n some scalar loss value.\n\n Note that this method does not support *differentiable backprop*. Use\n :func:`grad` to compute the gradient of gradients.\n\n Args:\n retain_grad (bool): If ``True``, the gradient arrays of all\n intermediate variables are kept. Otherwise, :data:`grad` of the\n intermediate variables are set to ``None`` on appropriate\n timing, which may reduce the maximum memory consumption.\n\n In most cases of training some models, the purpose of backprop\n is to compute gradients of parameters, not of all variables,\n and therefore it is recommended to set this flag ``False``.\n\n \"\"\"\n self._node._check_old_style_gradient()\n if self.creator_node is None:\n return\n initial_device = None\n if cuda.available and isinstance(self.data, cuda.cupy.ndarray):\n try:\n initial_device = cuda.Device()\n except cuda.cupy.cuda.runtime.CUDARuntimeError as e:\n if e.status != 38: # cudaErrorNoDevice\n raise\n\n is_debug = chainer.is_debug()\n\n cand_funcs = []\n seen_set = set()\n grads = {}\n\n # Initialize error by 1, if this is a loss variable\n if self.data.size == 1 and self._grad_var is None:\n with cuda.get_device_from_array(self.data) as device:\n if device is cuda.DummyDevice:\n self.grad = numpy.ones_like(self.data)\n else:\n self.grad = cuda.cupy.ones_like(self.data)\n grads[self._node] = self._grad_var\n\n def add_cand(cand):\n if cand not in seen_set:\n # Negate since heapq is min-heap\n heapq.heappush(cand_funcs, (-cand.rank, len(seen_set), cand))\n seen_set.add(cand)\n\n add_cand(self.creator_node)\n\n def get_grad(node):\n if node is None:\n return None\n if node in grads:\n return grads[node]\n return node.grad_var\n\n while cand_funcs:\n _, _, func = heapq.heappop(cand_funcs)\n inputs = func.inputs\n outputs = [y() for y in func.outputs] # access via weak ref\n\n in_data = tuple([x.data for x in inputs])\n out_grad = tuple([get_grad(y) for y in outputs])\n out_grad_data = tuple(\n [None if g is None else g.data for g in out_grad])\n hooks = chainer.get_function_hooks()\n if func._n_local_function_hooks != 0:\n hooks = collections.OrderedDict(hooks)\n hooks.update(func.local_function_hooks)\n hooks = hooks.values() # avoid six for performance\n\n cuda.get_device_from_array(*in_data).use()\n for hook in hooks:\n hook.backward_preprocess(func, in_data, out_grad_data)\n\n # Collect the current input gradients.\n #\n # Note (Tokui): When the same variable is passed to multiple input\n # slots (e.g. an expression like ``f(x, x)``), it makes the\n # gradient accumulation complicated since the back-propagated\n # gradients w.r.t. the first and second argument should be\n # accumulated to the current gradient w.r.t. the same variable.\n # In this case, the current implementation passes the current\n # gradient only to the first occurrence of the variable in the\n # input tuple and passes ``None`` to the rest of the occurrences.\n # For example, when the input variables are ``(x, x)``, the\n # input gradient passed to the ``backward_accumulate`` method is\n # ``(gx, None)`` where ``gx`` is the current gradient of ``x``.\n # See also the docstring of ``FunctionNode.backward_accumulate``.\n target_input_indexes = [\n i for i, x in enumerate(inputs) if x.requires_grad\n ]\n target_inputs = [inputs[i] for i in target_input_indexes]\n in_grad = []\n for i, index_i in enumerate(target_input_indexes):\n x = inputs[index_i]\n if x in target_inputs[:i]:\n # Pass ``None`` for duplicated input variables except for\n # the first occurrence (see the comment above).\n gx = None\n elif x in grads:\n gx = grads[x]\n elif x.creator_node is None:\n x._check_old_style_gradient()\n # accumulate the gradient only if the node is a leaf\n gx = x.grad_var\n else:\n gx = None\n in_grad.append(gx)\n\n gxs = func.backward_accumulate(\n target_input_indexes, out_grad, in_grad)\n\n assert len(gxs) == len(in_grad)\n for hook in hooks:\n hook.backward_postprocess(func, in_data, out_grad_data)\n\n if is_debug:\n for gx in gxs:\n if gx is None:\n continue\n gx_data = gx.data\n cuda.get_device_from_array(gx_data).use()\n if cuda.get_array_module(gx_data).isnan(gx_data).any():\n msg = ('NaN is detected on backward computation of '\n '{}'.format(func.label))\n raise RuntimeError(msg)\n\n if not retain_grad:\n for y in outputs:\n if y is not None and y is not self.node:\n grads[y] = None\n y_var = y.get_variable()\n if y_var is not None:\n y_var._grad_var = None\n\n for i, gx in enumerate(gxs):\n if gx is None:\n continue\n\n x = target_inputs[i]\n if not x.requires_grad:\n continue\n\n _check_grad_type(func, x, gx.data)\n\n if x in target_inputs[:i]:\n # Accumulate the duplicated gradients here. See the comment\n # above the code that builds ``in_grad``.\n cur_gx = grads[x]\n grads[x] = gx if cur_gx is None else gx + cur_gx\n else:\n grads[x] = gx\n\n x_var = x.get_variable()\n if x_var is not None:\n x_var._grad_var = grads[x]\n\n if x.creator_node is not None:\n add_cand(x.creator_node)\n\n del gxs # to reduce memory usage\n if initial_device is not None:\n initial_device.use()\n\n def reshape(self, *shape):\n \"\"\"Returns a variable of a different shape and the same content.\n\n .. seealso::\n :func:`chainer.functions.reshape` for full documentation,\n\n \"\"\"\n if len(shape) == 1 and isinstance(shape[0], (tuple, list)):\n shape = shape[0]\n return chainer.functions.reshape(self, shape)\n\n def transpose(self, *axes):\n \"\"\"Permute the dimensions of an input variable without copy.\n\n .. seealso::\n :func:`chainer.functions.transpose` for full documentation.\n\n \"\"\"\n if len(axes) == 0:\n axes = None\n elif len(axes) == 1 and (isinstance(axes[0], (tuple, list)) or\n axes[0] is None):\n axes = axes[0]\n return chainer.functions.transpose(self, axes)\n\n def unchain(self):\n \"\"\"Deletes the reference to the creator of this variable.\n\n This method deletes the reference to the creator from the corresponding\n variable node. Unlike :meth:`unchain_backward`, it does not backtrack\n the graph.\n\n This method is equivalent to ``self.creator_node = None``.\n\n \"\"\"\n self.creator_node = None\n\n def unchain_backward(self):\n \"\"\"Deletes references between variable nodes and functions backward.\n\n After this method completes, intermediate variable nodes and functions\n that are not referenced from anywhere are deallocated by reference\n count GC. Also this variable itself deletes the reference to its\n creator function from the node, i.e. the node becomes root in the\n computation graph. It indicates that backprop after unchaining stops at\n this variable. This behavior is useful to implement truncated BPTT.\n\n \"\"\"\n cand_funcs = []\n seen_set = set()\n\n def add_cand(cand):\n if cand is not None and cand not in seen_set:\n cand_funcs.append(cand)\n seen_set.add(cand)\n\n add_cand(self.creator_node)\n\n while cand_funcs:\n func = cand_funcs.pop()\n for var in func.inputs:\n add_cand(var.creator_node)\n func.unchain()\n\n def retain_data(self):\n \"\"\"Lets the corresponding variable node keep the underlying array.\"\"\"\n self._node.data = self._data[0]\n\n def __lt__(self, other):\n raise NotImplementedError()\n\n def __le__(self, other):\n raise NotImplementedError()\n\n def __eq__(self, other):\n raise NotImplementedError()\n\n def __ne__(self, other):\n raise NotImplementedError()\n\n def __gt__(self, other):\n raise NotImplementedError()\n\n def __ge__(self, other):\n raise NotImplementedError()\n\n def __nonzero__(self):\n raise NotImplementedError()\n\n def __bool__(self):\n raise NotImplementedError()\n\n def __hash__(self):\n return super(Variable, self).__hash__()\n\n __array_priority__ = 200\n\n\nclass Parameter(Variable):\n\n \"\"\"Parameter variable that can be registered to a link.\n\n Parameter is a subclass of :class:`Variable`. It almost behaves as same\n as a usual variable except that a parameter can be registered to a\n :class:`~chainer.Link` object just by assigning it to an attribute of\n the link within an :meth:`~chainer.Link.init_scope` context.\n\n Parameter also supports an initialization by an initializer. It can have\n two initializers: one for the data array, and the other for the gradient\n array. The initializer only specifies the way of filling the elements of\n these arrays, and the shape information is specified at the initialization\n point.\n\n When a link that the parameter has been registered to is passed to an\n :class:`~chainer.GradientMethod`, an update rule is set to the parameter.\n This update rule specifies how to update the data array of the parameter\n using its gradient array.\n\n Args:\n initializer (~chainer.Initializer or numpy.ndarray or cupy.ndarray):\n Initializer of the data array. If ``shape`` is given, this\n initializer is immediately used to initialize the data array.\n Otherwise, if it is an array, it is immediately used as the data\n array, and otherwise the data array is left uninitialized and will\n be initialized by this initializer in :meth:`initialize`. It can\n also be a scalar, in which case the data array will be filled by\n this scalar. Note that float32 is used in this case.\n shape (int or tuple of int or None): Shape of the parameter. If it is\n ``None``, the initialization is deferred to the call of\n :meth:`initialize`.\n name (str): Name of the parameter.\n\n Attributes:\n initializer: Initializer of the data array. It is used for\n initializing the data array of an uninitialized variable.\n update_rule: :class:`~chainer.optimizer.UpdateRule` instance that\n updates this variable as a parameter. This argument is set to\n :attr:`update_rule`.\n\n \"\"\"\n\n initializer = None\n _grad_initializer = None\n _initial_device = None\n\n def __init__(self, initializer=None, shape=None, name=None):\n if initializer is None:\n initializer = constant.NaN()\n elif numpy.isscalar(initializer):\n initializer = constant.Constant(initializer)\n if shape is None:\n if isinstance(initializer, (numpy.ndarray, cuda.ndarray)):\n # parameter initialized by the initial array\n super(Parameter, self).__init__(initializer, name=name)\n else:\n # uninitialized parameter\n super(Parameter, self).__init__(name=name)\n self.initializer = initializer\n dtype = getattr(initializer, 'dtype', numpy.float32)\n self._grad_initializer = constant.NaN(dtype)\n else:\n # parameter initialized with a given shape\n if isinstance(initializer, (numpy.ndarray, cuda.ndarray)):\n xp = cuda.get_array_module(initializer)\n initializer = constant.Constant(initializer)\n else:\n xp = numpy\n data = initializers.generate_array(initializer, shape, xp)\n grad = xp.full_like(data, numpy.nan)\n super(Parameter, self).__init__(data, name=name, grad=grad)\n\n self.update_rule = None\n\n def __copy__(self):\n return self._copy_to(Parameter())\n\n def __reduce__(self):\n return _recover_parameter, (self.data, self.name, self.grad,\n self.initializer, self.update_rule)\n\n def to_cpu(self):\n super(Parameter, self).to_cpu()\n if self.data is None:\n self._initial_device = None\n\n def to_gpu(self, device=None):\n super(Parameter, self).to_gpu(device)\n if self.data is None:\n if device is None:\n device = cuda.Device().id\n self._initial_device = device\n\n def cleargrad(self):\n super(Parameter, self).cleargrad()\n if self.data is None:\n self._grad_initializer = None\n\n def zerograd(self):\n super(Parameter, self).zerograd()\n if self.data is None:\n dtype = getattr(self.initializer, 'dtype', None)\n self._grad_initializer = initializers.Zero(dtype)\n\n def initialize(self, shape):\n \"\"\"Initializes the uninitialized variable.\n\n Uninitialized variable is a variable created with the data array set to\n None. This method creates and initializes the data array. The shape of\n the variable can be left unknown until this method is called.\n\n Args:\n shape (tuple of int): Shape of the data array.\n\n \"\"\"\n xp = numpy if self._initial_device is None else cuda.cupy\n with cuda.get_device_from_id(self._initial_device):\n data = initializers.generate_array(self.initializer, shape, xp)\n\n ginit = self._grad_initializer\n grad = None if ginit is None else initializers.generate_array(\n ginit, shape, xp)\n\n self._data[0] = data\n self.grad = grad\n\n def update(self):\n \"\"\"Updates the data array using the gradient and the update rule.\n\n This method updates the parameter using the attached update rule.\n\n \"\"\"\n if self.update_rule is not None:\n self.update_rule.update(self)\n\n\ndef _recover_parameter(data, name, grad, initializer, update_rule):\n p = Parameter(initializer=initializer, name=name)\n p.data = data\n p.grad = grad\n p.update_rule = update_rule\n return p\n", "path": "chainer/variable.py" } ]
[ { "content": "import collections\nimport copy\nimport heapq\nimport traceback\nimport warnings\nimport weakref\n\nimport numpy\n\nimport chainer\nfrom chainer import cuda\nfrom chainer import initializers\nfrom chainer.initializers import constant\nfrom chainer.utils import argument\n\n\ndef _check_grad_type(func, x, gx):\n if x.data is None or gx is None:\n # ``x.data is None`` implies that the data array is not retained\n return\n if not isinstance(gx, type(x.data)):\n msg = ('Type of data and grad mismatch\\n%s != %s' %\n (type(x.data), type(gx)))\n typ = TypeError\n elif gx.dtype != x.data.dtype:\n msg = ('Dtype of data and grad mismatch\\n%s != %s' %\n (x.data.dtype, gx.dtype))\n typ = TypeError\n elif gx.shape != x.data.shape:\n msg = ('Shape of data and grad mismatch\\n%s != %s' %\n (x.data.shape, gx.shape))\n typ = ValueError\n else:\n return\n\n detail = ''\n if func:\n detail = 'Function `{0}` ({1}) has a bug.\\n'.format(\n type(func)._impl_name, func.label)\n stack = func.stack\n if stack:\n detail += 'Stacktrace of the function is below:\\n'\n for line in traceback.format_list(func.stack):\n detail += line\n detail += '''\nPlease report this error to the issue tracker with the stack trace,\nthe information of your environment, and your script:\nhttps://github.com/chainer/chainer/issues/new.\n'''.format(type(func).__name__, func.label)\n\n raise typ(detail + msg)\n\n\ndef variable_repr(var):\n \"\"\"Return the string representation of a variable.\n\n Args:\n var (~chainer.Variable): Input Variable.\n .. seealso:: numpy.array_repr\n \"\"\"\n xp = cuda.get_array_module(var)\n if xp is numpy:\n arr = var.data\n else:\n arr = var.data.get()\n\n if var.name:\n prefix = 'variable ' + var.name\n else:\n prefix = 'variable'\n\n if arr is None:\n lst = 'None'\n elif arr.size > 0 or arr.shape == (0,):\n lst = numpy.array2string(arr, None, None, None, ', ', prefix + '(')\n else: # show zero-length shape unless it is (0,)\n lst = '[], shape=%s' % (repr(arr.shape),)\n\n return '%s(%s)' % (prefix, lst)\n\n\ndef variable_str(var):\n \"\"\"Return the string representation of a variable.\n\n Args:\n var (~chainer.Variable): Input Variable.\n .. seealso:: numpy.array_str\n \"\"\"\n xp = cuda.get_array_module(var)\n if xp is numpy:\n arr = var.data\n else:\n arr = var.data.get()\n\n if var.name:\n prefix = 'variable ' + var.name\n else:\n prefix = 'variable'\n\n if arr is None:\n lst = 'None'\n else:\n lst = numpy.array2string(arr, None, None, None, ' ', prefix + '(')\n\n return '%s(%s)' % (prefix, lst)\n\n\nclass VariableNode(object):\n\n \"\"\"Node in the backward computational graph representing a variable.\n\n This object represents a variable node in a computational graph. The node\n is used in error backpropagation (a.k.a. backprop) to determine which\n gradient to be passed to each function.\n\n A variable node is held by the corresponding :class:`Variable` object,\n which is managed by users. :class:`Function` objects that take the variable\n as an input also hold references to the variable node.\n\n Note that the node does not hold a reference to the corresponding data\n array in general. The data array is actually accessible by the node in the\n following cases.\n\n 1. If there exists a :class:`Variable` object that holds a reference to the\n variable node, the variable node holds a weak reference to the variable\n object, and thus the data array is accessible via the weak reference.\n 2. If :meth:`retain_data` is called, the node holds a reference to the data\n array. It is mainly called by a function that needs the input or output\n data array in its backprop procedure. See :meth:`Function.retain_inputs`\n and :meth:`Function.retain_outputs` for more details.\n\n Users usually do not need to touch this variable node object. The\n computational graph is automatically managed by Chainer, and any interface\n that is beneficial for users is also provided by :class:`Variable`.\n\n Args:\n variable (Variable): The corresponding variable object.\n name (str): Name of the variable node.\n\n Attributes:\n ~VariableNode.dtype: Data type of the data array.\n ~VariableNode.shape: Shape of the data array.\n ~VariableNode.name (str): Name of the variable node.\n\n \"\"\"\n\n _creator_node = None\n _data = None\n _rank = 0\n # Name of the Function is assigned if this variable is a gradient generated\n # by an old-style Function\n _old_style_grad_generator = None\n\n def __init__(self, variable, name, **kwargs):\n argument.check_unexpected_kwargs(\n kwargs,\n grad='unexpected keyword argument \"grad\": '\n 'pass the gradient to Variable instead'\n )\n self._variable = weakref.ref(variable)\n self.name = name\n self._requires_grad = variable.requires_grad\n\n vdata = variable.data\n self._set_data_type(vdata)\n\n @property\n def creator(self):\n \"\"\"Function object that created this variable node.\n\n When the function is implemented with the old-style API (i.e., it uses\n :class:`Function` class), this property returns the :class:`Function`\n object. The object is extracted from the :class:`FunctionAdapter`\n object, so the returned object is not the function node, but instead\n the actual implementation of forward and backward procedures.\n\n When the function is implemented with the new-style API (i.e., it uses\n :class:`FunctionNode` class), this property returns the function node\n object. In this case, the returned object is same as\n :attr:`creator_node`.\n\n .. warning::\n\n As of v3.0.0, when the creator is an old-style function, the\n following code is invalid:\n\n .. code-block:: python\n\n creator = v.creator\n v.creator = None\n ...\n v.creator = creator\n\n The point is that :class:`FunctionNode` objects are used as nodes\n in the computational graph instead of :class:`Function`, and each\n :class:`Function` object only holds a *weak reference* to the\n corresponding :class:`FunctionNode`. Since ``creator`` returns the\n :class:`Function` object, the :class:`FunctionNode` object is not\n kept by preserving ``creator``.\n\n The above code should be fixed as follows.\n\n .. code-block:: python\n\n creator_node = v.creator_node\n v.creator_node = None\n ...\n v.creator_node = creator_node\n\n \"\"\"\n node = self._creator_node\n if node is None:\n return None\n\n if isinstance(node, chainer.function.FunctionAdapter):\n return node.function\n return node\n\n @creator.setter\n def creator(self, func):\n self.creator_node = func\n\n @property\n def creator_node(self):\n \"\"\"Function node that has this variable as an output.\n\n See :class:`FunctionNode` for the definition of a function node.\n\n \"\"\"\n return self._creator_node\n\n @creator_node.setter\n def creator_node(self, func):\n if isinstance(func, chainer.Function):\n func = func.node\n self._creator_node = func\n if func is not None:\n self._rank = func.rank + 1\n\n @property\n def data(self):\n \"\"\"Data array of the corresponding variable.\n\n If the data is not available, it returns ``None``.\n\n \"\"\"\n return self._data\n\n @data.setter\n def data(self, d):\n self._data = d\n self._set_data_type(d)\n\n @property\n def grad(self):\n \"\"\"Gradient array of the corresponding variable.\n\n If the variable is not available, it returns ``None``.\n\n \"\"\"\n var = self.get_variable()\n return None if var is None else var.grad\n\n @property\n def grad_var(self):\n \"\"\"Gradient variable of the corresponding variable.\n\n If the corresponding variable is not available, it return ``None``.\n\n \"\"\"\n var = self.get_variable()\n return None if var is None else var._grad_var\n\n @property\n def label(self):\n \"\"\"Short text that represents the variable node.\"\"\"\n if self.shape == ():\n return str(self.dtype)\n return '(%s), %s' % (', '.join(map(str, self.shape)),\n str(self.dtype))\n\n @property\n def rank(self):\n return self._rank\n\n @property\n def requires_grad(self):\n \"\"\"It indicates that ``grad`` will be set in backward calculation.\"\"\"\n return self._requires_grad\n\n def get_variable(self):\n \"\"\"Returns the corresponding :class:`Variable` object.\n\n VariableNode object holds a weak reference of the variable object. If\n the reference is alive, it is returned by this property. Otherwise,\n this property creates a new :class:`Variable` object from this node\n object and returns it.\n\n Returns:\n Variable: The variable object that refers this node.\n\n \"\"\"\n var = self._variable()\n if var is not None:\n return var\n\n var = Variable(self.data, name=self.name,\n requires_grad=self._requires_grad)\n var._node = self\n return var\n\n def set_creator(self, creator):\n \"\"\"Sets a :class:`Function` object that created this node.\n\n This method is equivalent to ``self.creator = creator``. A\n :class:`FunctionNode` object can also be passed.\n\n Args:\n creator (Function or FunctionNode): Function that has created this\n variable.\n\n \"\"\"\n self.creator = creator\n\n def set_creator_node(self, creator_node):\n \"\"\"Sets a :class:`FunctionNode` object that created this node.\n\n This method is equivalent to ``self.creator_node = creator_node``. A\n :class:`Function` object can also be passed, in which case the\n :attr:`~Function.node` object is extracted.\n\n Args:\n creator_node (FunctionNode or Function): Function node that has\n this variable as an output.\n\n \"\"\"\n self.creator_node = creator_node\n\n def unchain(self):\n \"\"\"Deletes the reference to the creator of this variable node.\n\n This method is equivalent to ``self.creator_node = None``.\n\n \"\"\"\n self.creator_node = None\n\n def retain_data(self):\n \"\"\"Lets the node hold a reference to the underlying data array.\n\n This method gets the data array of the corresponding variable and keeps\n it. If the weak reference to the corresponding variable is dead, it\n raises an error.\n\n \"\"\"\n variable = self._variable()\n if variable is not None:\n self.data = variable.data\n else:\n raise RuntimeError('cannot retain variable data: the variable has '\n 'been already released')\n\n def _set_data_type(self, d):\n if d is None:\n self.dtype = None\n self.shape = None\n else:\n self.dtype = d.dtype\n self.shape = d.shape\n\n def _check_old_style_gradient(self):\n if self._old_style_grad_generator is not None:\n raise RuntimeError(\n 'cannot twice-differentiate an old style Function \"%s\"' %\n self._old_style_grad_generator)\n\n\ndef _create_variable(data, name, grad, requires_grad):\n return Variable(\n data, name=name, grad=grad, requires_grad=requires_grad)\n\n\nclass Variable(object):\n\n \"\"\"__init__(data=None, *, name=None, grad=None, requires_grad=True)\n\n Array with a structure to keep track of computation.\n\n Every variable holds a data array of type either :class:`numpy.ndarray` or\n :class:`cupy.ndarray`.\n\n A variable object holds a data array and a :class:`VariableNode` object of\n a computational graph. If the variable is constructed by the user, the node\n is *root* and does not hold any parent. If the variable is constructed by a\n :class:`FunctionNode` object, the node holds a reference to its parent\n called :attr:`creator_node`. This reference is used in backpropagation to\n backtrack the graph.\n\n Users can disable (resp. enable) this chaining behavior by calling\n :func:`~chainer.no_backprop_mode` (resp.\n :func:`~chainer.force_backprop_mode`).\n In the former context, a variable never creates a computational graph,\n whereas in the latter context, it is forced to create.\n\n .. warning::\n\n ``volatile`` argument is not supported anymore since v2.\n Instead, use :func:`chainer.no_backprop_mode`.\n\n Args:\n data (numpy.ndarray or cupy.ndarray): Initial data array.\n name (str): Name of the variable.\n grad (numpy.ndarray or cupy.ndarray): Initial gradient array.\n requires_grad (bool): Boolean indicating whether ``grad`` will be set\n in backward calculation.\n\n Attributes:\n ~Variable.data: Data array of type either :class:`numpy.ndarray` or\n :class:`cupy.ndarray`. If it is None, the variable is left in an\n uninitialized state.\n ~Variable.grad_var (Variable): Gradient variable.\n\n \"\"\" # NOQA\n\n def __init__(self, data=None, **kwargs):\n argument.check_unexpected_kwargs(\n kwargs, volatile='volatile argument is not supported anymore. '\n 'Use chainer.using_config')\n name, grad, requires_grad \\\n = argument.parse_kwargs(\n kwargs, ('name', None), ('grad', None),\n ('requires_grad', True))\n\n if (data is not None and\n not isinstance(data, (numpy.ndarray, cuda.ndarray))):\n msg = '''numpy.ndarray or cuda.ndarray are expected.\nActual: {0}'''.format(type(data))\n raise TypeError(msg)\n\n # Use a list as a data structure to hold the data array indirectly to\n # abstract its initialized/uninitialized state.\n self._data = [data]\n self._requires_grad = requires_grad\n self._node = VariableNode(self, name)\n self._grad_var = None if grad is None else Variable(grad)\n\n def __copy__(self):\n return self._copy_to(Variable())\n\n def _copy_to(self, target):\n target.__dict__ = copy.copy(self.__dict__)\n target._node = VariableNode(target, self.name)\n return target\n\n def __reduce__(self):\n return _create_variable, (self.data, self.name, self.grad,\n self._requires_grad)\n\n def __repr__(self):\n return variable_repr(self)\n\n def __str__(self):\n return variable_str(self)\n\n @property\n def name(self):\n return self._node.name\n\n @name.setter\n def name(self, n):\n self._node.name = n\n\n def summary(self):\n if self.name:\n return '<variable %s>' % self.name\n else:\n return '<variable at 0x%x>' % id(self)\n\n def debug_print(self):\n \"\"\"Display a summary of the stored data and location of the Variable\"\"\"\n\n msg = \"\"\"{summary}\n- device: {device}\n- backend: {background}\n- shape: {shape}\n- dtype: {dtype}\n- statistics: {stats}\n- grad: {grad}\"\"\"\n\n stats_msg = 'mean={0:.8f}, std={1:.8f}'\n\n try:\n device = self.data.device\n except AttributeError:\n device = 'CPU'\n\n with cuda.get_device_from_array(self.data) as dev:\n xp = numpy if int(dev) == -1 else cuda.cupy\n\n if self.grad is None:\n grad = None\n elif xp.all(self.grad == 0):\n grad = 0\n else:\n grad = stats_msg.format(float(xp.mean(self.grad)),\n float(xp.std(self.grad)))\n\n stats = stats_msg.format(float(xp.mean(self.data)),\n float(xp.std(self.data)))\n\n return msg.format(summary=self.summary(),\n grad=grad, shape=self.data.shape,\n background=type(self.data),\n dtype=self.data.dtype, device=device,\n stats=stats)\n\n def __pos__(self):\n return self\n\n def __len__(self):\n \"\"\"Returns the first dimension of the data array.\n\n Returns:\n int: Number of the first dimension of the data array.\n\n \"\"\"\n return len(self.data)\n\n @property\n def label(self):\n \"\"\"Short text that represents the variable.\"\"\"\n return self._node.label\n\n @property\n def creator(self):\n \"\"\"Function implementation that created this variable.\n\n When this variable has been created by an old-style function (i.e., it\n is implemented as a subclass of :class:`Function`), this property\n returns that :class:`Function` object.\n\n When this variable has been created by a new-style function (i.e., it\n is implemented as a subclass of :class:`FunctionNode` class), this\n property returns that node object.\n\n \"\"\"\n return self._node.creator\n\n @creator.setter\n def creator(self, func):\n self._node.creator = func\n\n @property\n def creator_node(self):\n \"\"\":class:`FunctionNode` object that created this variable.\n\n This property has a setter to which ``None`` can be set. Setting\n ``None`` to this property is equivalent to call :meth:`unchain`;\n it purges the variable from the function that created this variable.\n\n The setter also accepts the original :class:`FunctionNode` object that\n created this variable. For example, you can once set ``None`` to this\n property and then set the original value again.\n\n .. note::\n Setting an irrelevant :meth:`FunctionNode` object does not emit any\n error immediately, whereas the behavior is undefined. Do not set\n a :meth:`FunctionNode` object that did not create this variable\n object.\n\n \"\"\"\n return self._node._creator_node\n\n @creator_node.setter\n def creator_node(self, func):\n self._node.creator_node = func\n\n @property\n def data(self):\n return self._data[0]\n\n @data.setter\n def data(self, d):\n self._data[0] = d\n self._node._set_data_type(d)\n\n @property\n def grad(self):\n \"\"\"Gradient array of this variable.\n\n Not that this property returns the underlying array of the gradient\n variable instead of the gradient variable itself; to get/set\n gradient variable, use :attr:`grad_var` instead.\n\n \"\"\"\n gv = self._grad_var\n return None if gv is None else gv.data\n\n @grad.setter\n def grad(self, g):\n self.grad_var = None if g is None else Variable(g)\n\n @property\n def grad_var(self):\n return self._grad_var\n\n @grad_var.setter\n def grad_var(self, g):\n if g is not None:\n _check_grad_type(None, self, g.data)\n self._grad_var = g\n\n @property\n def shape(self):\n return self.data.shape\n\n @property\n def ndim(self):\n return self.data.ndim\n\n @property\n def size(self):\n return self.data.size\n\n @property\n def dtype(self):\n return self.data.dtype\n\n @property\n def rank(self):\n return self._node.rank\n\n @property\n def node(self):\n return self._node\n\n @property\n def requires_grad(self):\n \"\"\"It indicates that ``grad`` will be set in backward calculation.\"\"\"\n return self._requires_grad\n\n def to_cpu(self):\n \"\"\"Copies the data and gradient arrays to CPU.\"\"\"\n if self.data is None:\n return\n\n self._data = [cuda.to_cpu(self.data)]\n if self._grad_var is not None:\n self._grad_var.to_cpu()\n # ensure that the node tracks the device migration\n node = self._node\n if node._data is not None:\n node.retain_data()\n\n def to_gpu(self, device=None):\n \"\"\"Copies the data and gradient arrays to specified GPU.\n\n Args:\n device: Target device specifier. If omitted, the current device is\n used.\n\n \"\"\"\n if self.data is None:\n self._initial_device = (cuda.Device().id\n if device is None else device)\n else:\n self._data = [cuda.to_gpu(self.data, device)]\n if self._grad_var is not None:\n self._grad_var.to_gpu(device)\n # ensure that the node tracks the device migration\n node = self._node\n if node._data is not None:\n node.retain_data()\n\n def cleargrad(self):\n \"\"\"Clears the gradient array.\"\"\"\n self._grad_var = None\n\n def zerograd(self):\n \"\"\"Initializes the gradient array by zeros.\n\n Note that the gradient variable is unchained from the computational\n graph by this method because this operation breaks the backprop\n validity.\n\n .. deprecated:: v1.15\n Use :meth:`cleargrad` instead.\n\n \"\"\"\n warnings.warn(\n 'Variable.zerograd is deprecated. Use Variable.cleargrad instead.',\n DeprecationWarning)\n\n if self.data is None:\n return\n\n with cuda.get_device_from_array(self.data) as dev:\n gv = self._grad_var\n if gv is None:\n xp = numpy if dev.id == -1 else cuda.cupy\n self.grad = xp.zeros_like(self.data)\n else:\n gv.unchain()\n gv.data.fill(0)\n\n def copydata(self, var):\n \"\"\"Copies the data array from given source variable.\n\n This method copies the data array from given variable to this variable.\n The copy is done even if the arrays reside on different devices,\n including across the host and a GPU device. If this variable has an\n uninitialized data array, this method initializes it by the data array\n of the given variable. Similarly, if the given variable has an\n uninitialized data array, this method initializes it by the data array\n of this variable (``self``). If both are uninitialized, this method\n does nothing.\n\n Args:\n var (Variable): Source variable.\n\n \"\"\"\n src = var.data\n dst = self.data\n if src is None:\n if dst is None:\n return\n var.initialize(self.shape)\n src = var.data\n elif dst is None:\n self.initialize(src.shape)\n dst = self.data\n src_xp = cuda.get_array_module(src)\n dst_xp = cuda.get_array_module(dst)\n if dst_xp is src_xp:\n dst_xp.copyto(dst, src)\n elif dst_xp is numpy:\n dst_xp.copyto(dst, src.get())\n else:\n dst.set(src)\n\n def addgrad(self, var):\n \"\"\"Accumulates the gradient array from given source variable.\n\n This method adds the gradient of a given variable to the gradient of\n this variable. The accumulation is even done across the host and\n different devices. If this variable has uninitialized data/grad arrays,\n this method initializes it with the shape of the given variable and\n then accumulates the gradient.\n\n Args:\n var (Variable): Source variable.\n\n \"\"\"\n src = var._grad_var\n if src is None:\n return\n\n if self.data is None:\n self.initialize(var.shape)\n dst = self._grad_var\n\n src_dev = cuda.get_device_from_array(src.data)\n dst_dev = cuda.get_device_from_array(self.data)\n\n if src_dev.id != dst_dev.id:\n src = chainer.functions.copy(src, dst_dev.id)\n self._grad_var = src if dst is None else src + dst\n\n def set_creator(self, gen_func):\n \"\"\"Notifies the variable that the given function is its creator.\n\n Args:\n gen_func (Function): Function object that creates this variable as\n one of its outputs.\n\n \"\"\"\n self._node.set_creator(gen_func)\n\n def set_creator_node(self, fnode):\n \"\"\"Notifies the variable that the given node is its creator.\n\n Args:\n fnode (FunctionNode): Function node that has this variable as an\n output.\n\n \"\"\"\n self._node.set_creator_node(fnode)\n\n def backward(self, retain_grad=False):\n \"\"\"Runs error backpropagation (a.k.a. backprop) from this variable.\n\n On backprop, :meth:`FunctionNode.backward` is called on each\n :class:`FunctionNode` object appearing in the backward graph starting\n from this variable. The backward graph is represented by backward\n references from variable nodes to their creators, and from function\n nodes to their input variable nodes. The backprop stops at all root\n nodes. Some function nodes set ``None`` as gradients of some inputs,\n where further backprop does not take place at such inputs.\n\n This method uses :data:`grad` as the initial error array. User can\n manually set a gradient array before calling this method. If\n :data:`data` contains only one element (i.e., it is scalar) and\n :data:`grad` is ``None``, then this method automatically complements\n 1.0 as the initial error. This is useful on starting backprop from\n some scalar loss value.\n\n Note that this method does not support *differentiable backprop*. Use\n :func:`grad` to compute the gradient of gradients.\n\n Args:\n retain_grad (bool): If ``True``, the gradient arrays of all\n intermediate variables are kept. Otherwise, :data:`grad` of the\n intermediate variables are set to ``None`` on appropriate\n timing, which may reduce the maximum memory consumption.\n\n In most cases of training some models, the purpose of backprop\n is to compute gradients of parameters, not of all variables,\n and therefore it is recommended to set this flag ``False``.\n\n \"\"\"\n self._node._check_old_style_gradient()\n if self.creator_node is None:\n return\n initial_device = None\n if cuda.available and isinstance(self.data, cuda.cupy.ndarray):\n try:\n initial_device = cuda.Device()\n except cuda.cupy.cuda.runtime.CUDARuntimeError as e:\n if e.status != 38: # cudaErrorNoDevice\n raise\n\n is_debug = chainer.is_debug()\n\n cand_funcs = []\n seen_set = set()\n grads = {}\n\n # Initialize error by 1, if this is a loss variable\n if self.data.size == 1 and self._grad_var is None:\n with cuda.get_device_from_array(self.data) as device:\n if device is cuda.DummyDevice:\n self.grad = numpy.ones_like(self.data)\n else:\n self.grad = cuda.cupy.ones_like(self.data)\n grads[self._node] = self._grad_var\n\n def add_cand(cand):\n if cand not in seen_set:\n # Negate since heapq is min-heap\n heapq.heappush(cand_funcs, (-cand.rank, len(seen_set), cand))\n seen_set.add(cand)\n\n add_cand(self.creator_node)\n\n def get_grad(node):\n if node is None:\n return None\n if node in grads:\n return grads[node]\n return node.grad_var\n\n while cand_funcs:\n _, _, func = heapq.heappop(cand_funcs)\n inputs = func.inputs\n outputs = [y() for y in func.outputs] # access via weak ref\n\n in_data = tuple([x.data for x in inputs])\n out_grad = tuple([get_grad(y) for y in outputs])\n out_grad_data = tuple(\n [None if g is None else g.data for g in out_grad])\n hooks = chainer.get_function_hooks()\n if func._n_local_function_hooks != 0:\n hooks = collections.OrderedDict(hooks)\n hooks.update(func.local_function_hooks)\n hooks = hooks.values() # avoid six for performance\n\n cuda.get_device_from_array(*in_data).use()\n for hook in hooks:\n hook.backward_preprocess(func, in_data, out_grad_data)\n\n # Collect the current input gradients.\n #\n # Note (Tokui): When the same variable is passed to multiple input\n # slots (e.g. an expression like ``f(x, x)``), it makes the\n # gradient accumulation complicated since the back-propagated\n # gradients w.r.t. the first and second argument should be\n # accumulated to the current gradient w.r.t. the same variable.\n # In this case, the current implementation passes the current\n # gradient only to the first occurrence of the variable in the\n # input tuple and passes ``None`` to the rest of the occurrences.\n # For example, when the input variables are ``(x, x)``, the\n # input gradient passed to the ``backward_accumulate`` method is\n # ``(gx, None)`` where ``gx`` is the current gradient of ``x``.\n # See also the docstring of ``FunctionNode.backward_accumulate``.\n target_input_indexes = [\n i for i, x in enumerate(inputs) if x.requires_grad\n ]\n target_inputs = [inputs[i] for i in target_input_indexes]\n in_grad = []\n for i, index_i in enumerate(target_input_indexes):\n x = inputs[index_i]\n if x in target_inputs[:i]:\n # Pass ``None`` for duplicated input variables except for\n # the first occurrence (see the comment above).\n gx = None\n elif x in grads:\n gx = grads[x]\n elif x.creator_node is None:\n x._check_old_style_gradient()\n # accumulate the gradient only if the node is a leaf\n gx = x.grad_var\n else:\n gx = None\n in_grad.append(gx)\n\n gxs = func.backward_accumulate(\n target_input_indexes, out_grad, in_grad)\n\n assert len(gxs) == len(in_grad)\n for hook in hooks:\n hook.backward_postprocess(func, in_data, out_grad_data)\n\n if is_debug:\n for gx in gxs:\n if gx is None:\n continue\n gx_data = gx.data\n cuda.get_device_from_array(gx_data).use()\n if cuda.get_array_module(gx_data).isnan(gx_data).any():\n msg = ('NaN is detected on backward computation of '\n '{}'.format(func.label))\n raise RuntimeError(msg)\n\n if not retain_grad:\n for y in outputs:\n if y is not None and y is not self.node:\n grads[y] = None\n y_var = y.get_variable()\n if y_var is not None:\n y_var._grad_var = None\n\n for i, gx in enumerate(gxs):\n if gx is None:\n continue\n\n x = target_inputs[i]\n if not x.requires_grad:\n continue\n\n _check_grad_type(func, x, gx.data)\n\n if x in target_inputs[:i]:\n # Accumulate the duplicated gradients here. See the comment\n # above the code that builds ``in_grad``.\n cur_gx = grads[x]\n grads[x] = gx if cur_gx is None else gx + cur_gx\n else:\n grads[x] = gx\n\n x_var = x.get_variable()\n if x_var is not None:\n x_var._grad_var = grads[x]\n\n if x.creator_node is not None:\n add_cand(x.creator_node)\n\n del gxs # to reduce memory usage\n if initial_device is not None:\n initial_device.use()\n\n def reshape(self, *shape):\n \"\"\"Returns a variable of a different shape and the same content.\n\n .. seealso::\n :func:`chainer.functions.reshape` for full documentation,\n\n \"\"\"\n if len(shape) == 1 and isinstance(shape[0], (tuple, list)):\n shape = shape[0]\n return chainer.functions.reshape(self, shape)\n\n def transpose(self, *axes):\n \"\"\"Permute the dimensions of an input variable without copy.\n\n .. seealso::\n :func:`chainer.functions.transpose` for full documentation.\n\n \"\"\"\n if len(axes) == 0:\n axes = None\n elif len(axes) == 1 and (isinstance(axes[0], (tuple, list)) or\n axes[0] is None):\n axes = axes[0]\n return chainer.functions.transpose(self, axes)\n\n def unchain(self):\n \"\"\"Deletes the reference to the creator of this variable.\n\n This method deletes the reference to the creator from the corresponding\n variable node. Unlike :meth:`unchain_backward`, it does not backtrack\n the graph.\n\n This method is equivalent to ``self.creator_node = None``.\n\n \"\"\"\n self.creator_node = None\n\n def unchain_backward(self):\n \"\"\"Deletes references between variable nodes and functions backward.\n\n After this method completes, intermediate variable nodes and functions\n that are not referenced from anywhere are deallocated by reference\n count GC. Also this variable itself deletes the reference to its\n creator function from the node, i.e. the node becomes root in the\n computation graph. It indicates that backprop after unchaining stops at\n this variable. This behavior is useful to implement truncated BPTT.\n\n \"\"\"\n cand_funcs = []\n seen_set = set()\n\n def add_cand(cand):\n if cand is not None and cand not in seen_set:\n cand_funcs.append(cand)\n seen_set.add(cand)\n\n add_cand(self.creator_node)\n\n while cand_funcs:\n func = cand_funcs.pop()\n for var in func.inputs:\n add_cand(var.creator_node)\n func.unchain()\n\n def retain_data(self):\n \"\"\"Lets the corresponding variable node keep the underlying array.\"\"\"\n self._node.data = self._data[0]\n\n def __lt__(self, other):\n raise NotImplementedError()\n\n def __le__(self, other):\n raise NotImplementedError()\n\n def __eq__(self, other):\n raise NotImplementedError()\n\n def __ne__(self, other):\n raise NotImplementedError()\n\n def __gt__(self, other):\n raise NotImplementedError()\n\n def __ge__(self, other):\n raise NotImplementedError()\n\n def __nonzero__(self):\n raise NotImplementedError()\n\n def __bool__(self):\n raise NotImplementedError()\n\n def __hash__(self):\n return super(Variable, self).__hash__()\n\n __array_priority__ = 200\n\n\nclass Parameter(Variable):\n\n \"\"\"Parameter variable that can be registered to a link.\n\n Parameter is a subclass of :class:`Variable`. It almost behaves as same\n as a usual variable except that a parameter can be registered to a\n :class:`~chainer.Link` object just by assigning it to an attribute of\n the link within an :meth:`~chainer.Link.init_scope` context.\n\n Parameter also supports an initialization by an initializer. It can have\n two initializers: one for the data array, and the other for the gradient\n array. The initializer only specifies the way of filling the elements of\n these arrays, and the shape information is specified at the initialization\n point.\n\n When a link that the parameter has been registered to is passed to an\n :class:`~chainer.GradientMethod`, an update rule is set to the parameter.\n This update rule specifies how to update the data array of the parameter\n using its gradient array.\n\n Args:\n initializer (~chainer.Initializer or numpy.ndarray or cupy.ndarray):\n Initializer of the data array. If ``shape`` is given, this\n initializer is immediately used to initialize the data array.\n Otherwise, if it is an array, it is immediately used as the data\n array, and otherwise the data array is left uninitialized and will\n be initialized by this initializer in :meth:`initialize`. It can\n also be a scalar, in which case the data array will be filled by\n this scalar. Note that float32 is used in this case.\n shape (int or tuple of int or None): Shape of the parameter. If it is\n ``None``, the initialization is deferred to the call of\n :meth:`initialize`.\n name (str): Name of the parameter.\n\n Attributes:\n initializer: Initializer of the data array. It is used for\n initializing the data array of an uninitialized variable.\n update_rule: :class:`~chainer.optimizer.UpdateRule` instance that\n updates this variable as a parameter. This argument is set to\n :attr:`update_rule`.\n\n \"\"\"\n\n initializer = None\n _grad_initializer = None\n _initial_device = None\n\n def __init__(self, initializer=None, shape=None, name=None):\n if initializer is None:\n initializer = constant.NaN()\n elif numpy.isscalar(initializer):\n initializer = constant.Constant(initializer)\n if shape is None:\n if isinstance(initializer, (numpy.ndarray, cuda.ndarray)):\n # parameter initialized by the initial array\n super(Parameter, self).__init__(initializer, name=name)\n else:\n # uninitialized parameter\n super(Parameter, self).__init__(name=name)\n self.initializer = initializer\n dtype = getattr(initializer, 'dtype', numpy.float32)\n self._grad_initializer = constant.NaN(dtype)\n else:\n # parameter initialized with a given shape\n if isinstance(initializer, (numpy.ndarray, cuda.ndarray)):\n xp = cuda.get_array_module(initializer)\n initializer = constant.Constant(initializer)\n else:\n xp = numpy\n data = initializers.generate_array(initializer, shape, xp)\n grad = xp.full_like(data, numpy.nan)\n super(Parameter, self).__init__(data, name=name, grad=grad)\n\n self.update_rule = None\n\n def __copy__(self):\n return self._copy_to(Parameter())\n\n def __reduce__(self):\n return _recover_parameter, (self.data, self.name, self.grad,\n self.initializer, self.update_rule)\n\n def to_cpu(self):\n super(Parameter, self).to_cpu()\n if self.data is None:\n self._initial_device = None\n\n def to_gpu(self, device=None):\n super(Parameter, self).to_gpu(device)\n if self.data is None:\n if device is None:\n device = cuda.Device().id\n self._initial_device = device\n\n def cleargrad(self):\n super(Parameter, self).cleargrad()\n if self.data is None:\n self._grad_initializer = None\n\n def zerograd(self):\n super(Parameter, self).zerograd()\n if self.data is None:\n dtype = getattr(self.initializer, 'dtype', None)\n self._grad_initializer = initializers.Zero(dtype)\n\n def initialize(self, shape):\n \"\"\"Initializes the uninitialized variable.\n\n Uninitialized variable is a variable created with the data array set to\n None. This method creates and initializes the data array. The shape of\n the variable can be left unknown until this method is called.\n\n Args:\n shape (tuple of int): Shape of the data array.\n\n \"\"\"\n xp = numpy if self._initial_device is None else cuda.cupy\n with cuda.get_device_from_id(self._initial_device):\n data = initializers.generate_array(self.initializer, shape, xp)\n\n ginit = self._grad_initializer\n grad = None if ginit is None else initializers.generate_array(\n ginit, shape, xp)\n\n self.data = data\n self.grad = grad\n\n def update(self):\n \"\"\"Updates the data array using the gradient and the update rule.\n\n This method updates the parameter using the attached update rule.\n\n \"\"\"\n if self.update_rule is not None:\n self.update_rule.update(self)\n\n\ndef _recover_parameter(data, name, grad, initializer, update_rule):\n p = Parameter(initializer=initializer, name=name)\n p.data = data\n p.grad = grad\n p.update_rule = update_rule\n return p\n", "path": "chainer/variable.py" } ]
diff --git a/chainer/variable.py b/chainer/variable.py index e0050d20c3ac..680b810d3aa1 100644 --- a/chainer/variable.py +++ b/chainer/variable.py @@ -1189,7 +1189,7 @@ def initialize(self, shape): grad = None if ginit is None else initializers.generate_array( ginit, shape, xp) - self._data[0] = data + self.data = data self.grad = grad def update(self): diff --git a/tests/chainer_tests/test_variable.py b/tests/chainer_tests/test_variable.py index 75b9fa1511e7..feb66a15914f 100644 --- a/tests/chainer_tests/test_variable.py +++ b/tests/chainer_tests/test_variable.py @@ -757,6 +757,13 @@ def test_initialize_dtype(self): self.assertEqual(x.data.dtype, np.float64) self.assertEqual(x.grad.dtype, np.float64) + def test_initialize_node(self): + initializer = initializers.Zero(np.float64) + x = chainer.Parameter(initializer=initializer) + x.initialize((2, 3)) + self.assertEqual(x.node.shape, (2, 3)) + self.assertEqual(x.node.dtype, np.float64) + @attr.gpu def test_initialize_to_gpu(self): x = chainer.Parameter(initializer=initializers.Constant(self.a))
certbot__certbot-9561
docs: generated man pages sticks everything under NAME ## My operating system is (include version): FreeBSD ## I installed Certbot with (snap, OS package manager, pip, certbot-auto, etc): i didn't, really, i was just looking for a man page on https://man.freebsd.org when I noticed this ## I ran this command and it produced this output: apropos certbot https://man.freebsd.org/cgi/man.cgi?query=certbot&apropos=1&sektion=0&manpath=FreeBSD+13.1-RELEASE+and+Ports&arch=default&format=html ## Certbot's behavior differed from what I expected because: this should only show the first line, but it has the entire man page. That means that the source must be formated wrong
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Certbot documentation build configuration file, created by\n# sphinx-quickstart on Sun Nov 23 20:35:21 2014.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport codecs\nimport os\nimport re\nimport sys\n\nimport sphinx\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# read version number (and other metadata) from package init\ninit_fn = os.path.join(here, '..', 'certbot', '__init__.py')\nwith codecs.open(init_fn, encoding='utf8') as fd:\n meta = dict(re.findall(r\"\"\"__([a-z]+)__ = '([^']+)\"\"\", fd.read()))\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(os.path.join(here, '..')))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = '1.2'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.todo',\n 'sphinx.ext.coverage',\n 'sphinx.ext.viewcode',\n]\n\nif sphinx.version_info >= (1, 6):\n extensions.append('sphinx.ext.imgconverter')\n\nautodoc_member_order = 'bysource'\nautodoc_default_flags = ['show-inheritance']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Certbot'\n# this is now overridden by the footer.html template\n#copyright = u'2014-2018 - The Certbot software and documentation are licensed under the Apache 2.0 license as described at https://eff.org/cb-license.'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '.'.join(meta['version'].split('.')[:2])\n# The full version, including alpha/beta/rc tags.\nrelease = meta['version']\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = 'en'\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\n '_build',\n 'challenges.rst',\n]\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\ndefault_role = 'py:obj'\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\nsuppress_warnings = ['image.nonlocal_uri']\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n\n# https://docs.readthedocs.io/en/stable/faq.html#i-want-to-use-the-read-the-docs-theme-locally\n# on_rtd is whether we are on readthedocs.org\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n# otherwise, readthedocs.org uses their theme by default, so no need to specify it\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Certbotdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #'preamble': '',\n\n # Latex figure (float) alignment\n #'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n ('index', 'Certbot.tex', u'Certbot Documentation',\n u'Certbot Project', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'certbot', u'Certbot Documentation',\n [project], 7),\n ('man/certbot', 'certbot', u'certbot script documentation',\n [project], 1),\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'Certbot', u'Certbot Documentation',\n u'Certbot Project', 'Certbot', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/', None),\n 'acme': ('https://acme-python.readthedocs.org/en/latest/', None),\n}\n", "path": "certbot/docs/conf.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Certbot documentation build configuration file, created by\n# sphinx-quickstart on Sun Nov 23 20:35:21 2014.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport codecs\nimport os\nimport re\nimport sys\n\nimport sphinx\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# read version number (and other metadata) from package init\ninit_fn = os.path.join(here, '..', 'certbot', '__init__.py')\nwith codecs.open(init_fn, encoding='utf8') as fd:\n meta = dict(re.findall(r\"\"\"__([a-z]+)__ = '([^']+)\"\"\", fd.read()))\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(os.path.join(here, '..')))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = '1.2'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.todo',\n 'sphinx.ext.coverage',\n 'sphinx.ext.viewcode',\n]\n\nif sphinx.version_info >= (1, 6):\n extensions.append('sphinx.ext.imgconverter')\n\nautodoc_member_order = 'bysource'\nautodoc_default_flags = ['show-inheritance']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Certbot'\n# this is now overridden by the footer.html template\n#copyright = u'2014-2018 - The Certbot software and documentation are licensed under the Apache 2.0 license as described at https://eff.org/cb-license.'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '.'.join(meta['version'].split('.')[:2])\n# The full version, including alpha/beta/rc tags.\nrelease = meta['version']\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = 'en'\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\n '_build',\n 'challenges.rst',\n]\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\ndefault_role = 'py:obj'\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\nsuppress_warnings = ['image.nonlocal_uri']\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n\n# https://docs.readthedocs.io/en/stable/faq.html#i-want-to-use-the-read-the-docs-theme-locally\n# on_rtd is whether we are on readthedocs.org\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n# otherwise, readthedocs.org uses their theme by default, so no need to specify it\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Certbotdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #'preamble': '',\n\n # Latex figure (float) alignment\n #'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n ('index', 'Certbot.tex', u'Certbot Documentation',\n u'Certbot Project', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'certbot', u'Certbot Documentation',\n [project], 7),\n ('man/certbot', 'certbot', u\"Automatically configure HTTPS using Let's Encrypt\",\n [project], 1),\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'Certbot', u'Certbot Documentation',\n u'Certbot Project', 'Certbot', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/', None),\n 'acme': ('https://acme-python.readthedocs.org/en/latest/', None),\n}\n", "path": "certbot/docs/conf.py" } ]
diff --git a/certbot/docs/conf.py b/certbot/docs/conf.py index de9e287fc03..5102e7a1e39 100644 --- a/certbot/docs/conf.py +++ b/certbot/docs/conf.py @@ -286,7 +286,7 @@ man_pages = [ ('index', 'certbot', u'Certbot Documentation', [project], 7), - ('man/certbot', 'certbot', u'certbot script documentation', + ('man/certbot', 'certbot', u"Automatically configure HTTPS using Let's Encrypt", [project], 1), ] diff --git a/certbot/docs/man/certbot.rst b/certbot/docs/man/certbot.rst index 2f25504b04e..cc690d2da1d 100644 --- a/certbot/docs/man/certbot.rst +++ b/certbot/docs/man/certbot.rst @@ -1,3 +1,23 @@ :orphan: -.. literalinclude:: ../cli-help.txt +======= +certbot +======= + +Synopsis +======== +The objective of Certbot, Let's Encrypt, and the ACME (Automated Certificate Management +Environment) protocol is to make it possible to set up an HTTPS server and have it automatically +obtain a browser-trusted certificate, without any human intervention. This is accomplished by +running a certificate management agent on the web server. + +This agent is used to: + +- Automatically prove to the Let's Encrypt CA that you control the website +- Obtain a browser-trusted certificate and set it up on your web server +- Keep track of when your certificate is going to expire, and renew it +- Help you revoke the certificate if that ever becomes necessary. + +Options +======= +.. literalinclude:: ../cli-help.txt \ No newline at end of file
django-cms__django-filer-486
[Django>=1.7] FilerFolderField admin widget hidden If you use Django 1.7 and have a FilerFolderField in a model, the corresponding admin widget will not show up. That’s happening since django/django@dc3d2ac98c1bcfad74d3e9523caf07e7e9fb15aa. In `fieldset.html`, a `hidden` class is added to a form row if all the contained widgets have `is_hidden = True`. Setting `is_hidden` to `False` works fine. I’m not sure whether this attribute is useful somewhere else. I found it used 7 times in Django, and always refer to showing the field and its label. Since it’s not really a hidden field, I suggest we do that change.
[ { "content": "#-*- coding: utf-8 -*-\nfrom django.template.loader import render_to_string\nimport inspect\nimport warnings\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.admin.widgets import ForeignKeyRawIdWidget\nfrom django.contrib.admin.sites import site\nfrom django.core.urlresolvers import reverse\nfrom django.db import models\nfrom django.utils.safestring import mark_safe\nfrom filer.utils.compatibility import truncate_words\nfrom django.utils.translation import ugettext as _\nfrom filer.models import Folder\nfrom filer.settings import FILER_STATICMEDIA_PREFIX\n\n\nclass AdminFolderWidget(ForeignKeyRawIdWidget):\n choices = None\n input_type = 'hidden'\n is_hidden = True\n\n def render(self, name, value, attrs=None):\n obj = self.obj_for_value(value)\n css_id = attrs.get('id')\n css_id_folder = \"%s_folder\" % css_id\n css_id_description_txt = \"%s_description_txt\" % css_id\n required = self.attrs\n if attrs is None:\n attrs = {}\n related_url = None\n if value:\n try:\n folder = Folder.objects.get(pk=value)\n related_url = folder.get_admin_directory_listing_url_path()\n except Exception:\n pass\n if not related_url:\n related_url = reverse('admin:filer-directory_listing-last')\n params = self.url_parameters()\n params['select_folder'] = 1\n if params:\n url = '?' + '&amp;'.join(\n ['%s=%s' % (k, v) for k, v in list(params.items())])\n else:\n url = ''\n if not 'class' in attrs:\n # The JavaScript looks for this hook.\n attrs['class'] = 'vForeignKeyRawIdAdminField'\n super_attrs = attrs.copy()\n hidden_input = super(ForeignKeyRawIdWidget, self).render(\n name, value, super_attrs)\n\n # TODO: \"id_\" is hard-coded here. This should instead use the correct\n # API to determine the ID dynamically.\n context = {\n 'hidden_input': hidden_input,\n 'lookup_url': '%s%s' % (related_url, url),\n 'lookup_name': name,\n 'span_id': css_id_description_txt,\n 'object': obj,\n 'clear_id': '%s_clear' % css_id,\n 'descid': css_id_description_txt,\n 'noimg': '%sicons/nofile_32x32.png' % FILER_STATICMEDIA_PREFIX,\n 'foldid': css_id_folder,\n 'id': css_id,\n }\n html = render_to_string('admin/filer/widgets/admin_folder.html', context)\n return mark_safe(html)\n\n def label_for_value(self, value):\n obj = self.obj_for_value(value)\n return '&nbsp;<strong>%s</strong>' % truncate_words(obj, 14)\n\n def obj_for_value(self, value):\n try:\n key = self.rel.get_related_field().name\n obj = self.rel.to._default_manager.get(**{key: value})\n except:\n obj = None\n return obj\n\n class Media:\n js = (FILER_STATICMEDIA_PREFIX + 'js/popup_handling.js',)\n\n\nclass AdminFolderFormField(forms.ModelChoiceField):\n widget = AdminFolderWidget\n\n def __init__(self, rel, queryset, to_field_name, *args, **kwargs):\n self.rel = rel\n self.queryset = queryset\n self.limit_choices_to = kwargs.pop('limit_choices_to', None)\n self.to_field_name = to_field_name\n self.max_value = None\n self.min_value = None\n kwargs.pop('widget', None)\n forms.Field.__init__(self, widget=self.widget(rel, site), *args, **kwargs)\n\n def widget_attrs(self, widget):\n widget.required = self.required\n return {}\n\n\nclass FilerFolderField(models.ForeignKey):\n default_form_class = AdminFolderFormField\n default_model_class = Folder\n\n def __init__(self, **kwargs):\n # We hard-code the `to` argument for ForeignKey.__init__\n if \"to\" in kwargs.keys():\n old_to = kwargs.pop(\"to\")\n msg = \"%s can only be a ForeignKey to %s; %s passed\" % (\n self.__class__.__name__, self.default_model_class.__name__, old_to\n )\n warnings.warn(msg, SyntaxWarning)\n kwargs['to'] = self.default_model_class\n return super(FilerFolderField, self).__init__(**kwargs)\n\n def formfield(self, **kwargs):\n # This is a fairly standard way to set up some defaults\n # while letting the caller override them.\n defaults = {\n 'form_class': self.default_form_class,\n 'rel': self.rel,\n }\n defaults.update(kwargs)\n return super(FilerFolderField, self).formfield(**defaults)\n\n def south_field_triple(self):\n \"Returns a suitable description of this field for South.\"\n # We'll just introspect ourselves, since we inherit.\n from south.modelsinspector import introspector\n field_class = \"django.db.models.fields.related.ForeignKey\"\n args, kwargs = introspector(self)\n # That's our definition!\n return (field_class, args, kwargs)\n", "path": "filer/fields/folder.py" } ]
[ { "content": "#-*- coding: utf-8 -*-\nfrom django.template.loader import render_to_string\nimport inspect\nimport warnings\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.admin.widgets import ForeignKeyRawIdWidget\nfrom django.contrib.admin.sites import site\nfrom django.core.urlresolvers import reverse\nfrom django.db import models\nfrom django.utils.safestring import mark_safe\nfrom filer.utils.compatibility import truncate_words\nfrom django.utils.translation import ugettext as _\nfrom filer.models import Folder\nfrom filer.settings import FILER_STATICMEDIA_PREFIX\n\n\nclass AdminFolderWidget(ForeignKeyRawIdWidget):\n choices = None\n input_type = 'hidden'\n\n def render(self, name, value, attrs=None):\n obj = self.obj_for_value(value)\n css_id = attrs.get('id')\n css_id_folder = \"%s_folder\" % css_id\n css_id_description_txt = \"%s_description_txt\" % css_id\n required = self.attrs\n if attrs is None:\n attrs = {}\n related_url = None\n if value:\n try:\n folder = Folder.objects.get(pk=value)\n related_url = folder.get_admin_directory_listing_url_path()\n except Exception:\n pass\n if not related_url:\n related_url = reverse('admin:filer-directory_listing-last')\n params = self.url_parameters()\n params['select_folder'] = 1\n if params:\n url = '?' + '&amp;'.join(\n ['%s=%s' % (k, v) for k, v in list(params.items())])\n else:\n url = ''\n if not 'class' in attrs:\n # The JavaScript looks for this hook.\n attrs['class'] = 'vForeignKeyRawIdAdminField'\n super_attrs = attrs.copy()\n hidden_input = super(ForeignKeyRawIdWidget, self).render(\n name, value, super_attrs)\n\n # TODO: \"id_\" is hard-coded here. This should instead use the correct\n # API to determine the ID dynamically.\n context = {\n 'hidden_input': hidden_input,\n 'lookup_url': '%s%s' % (related_url, url),\n 'lookup_name': name,\n 'span_id': css_id_description_txt,\n 'object': obj,\n 'clear_id': '%s_clear' % css_id,\n 'descid': css_id_description_txt,\n 'noimg': '%sicons/nofile_32x32.png' % FILER_STATICMEDIA_PREFIX,\n 'foldid': css_id_folder,\n 'id': css_id,\n }\n html = render_to_string('admin/filer/widgets/admin_folder.html', context)\n return mark_safe(html)\n\n def label_for_value(self, value):\n obj = self.obj_for_value(value)\n return '&nbsp;<strong>%s</strong>' % truncate_words(obj, 14)\n\n def obj_for_value(self, value):\n try:\n key = self.rel.get_related_field().name\n obj = self.rel.to._default_manager.get(**{key: value})\n except:\n obj = None\n return obj\n\n class Media:\n js = (FILER_STATICMEDIA_PREFIX + 'js/popup_handling.js',)\n\n\nclass AdminFolderFormField(forms.ModelChoiceField):\n widget = AdminFolderWidget\n\n def __init__(self, rel, queryset, to_field_name, *args, **kwargs):\n self.rel = rel\n self.queryset = queryset\n self.limit_choices_to = kwargs.pop('limit_choices_to', None)\n self.to_field_name = to_field_name\n self.max_value = None\n self.min_value = None\n kwargs.pop('widget', None)\n forms.Field.__init__(self, widget=self.widget(rel, site), *args, **kwargs)\n\n def widget_attrs(self, widget):\n widget.required = self.required\n return {}\n\n\nclass FilerFolderField(models.ForeignKey):\n default_form_class = AdminFolderFormField\n default_model_class = Folder\n\n def __init__(self, **kwargs):\n # We hard-code the `to` argument for ForeignKey.__init__\n if \"to\" in kwargs.keys():\n old_to = kwargs.pop(\"to\")\n msg = \"%s can only be a ForeignKey to %s; %s passed\" % (\n self.__class__.__name__, self.default_model_class.__name__, old_to\n )\n warnings.warn(msg, SyntaxWarning)\n kwargs['to'] = self.default_model_class\n return super(FilerFolderField, self).__init__(**kwargs)\n\n def formfield(self, **kwargs):\n # This is a fairly standard way to set up some defaults\n # while letting the caller override them.\n defaults = {\n 'form_class': self.default_form_class,\n 'rel': self.rel,\n }\n defaults.update(kwargs)\n return super(FilerFolderField, self).formfield(**defaults)\n\n def south_field_triple(self):\n \"Returns a suitable description of this field for South.\"\n # We'll just introspect ourselves, since we inherit.\n from south.modelsinspector import introspector\n field_class = \"django.db.models.fields.related.ForeignKey\"\n args, kwargs = introspector(self)\n # That's our definition!\n return (field_class, args, kwargs)\n", "path": "filer/fields/folder.py" } ]
diff --git a/filer/fields/folder.py b/filer/fields/folder.py index fdd3c7f0f..ac5b5050d 100644 --- a/filer/fields/folder.py +++ b/filer/fields/folder.py @@ -18,7 +18,6 @@ class AdminFolderWidget(ForeignKeyRawIdWidget): choices = None input_type = 'hidden' - is_hidden = True def render(self, name, value, attrs=None): obj = self.obj_for_value(value)
interlegis__sapl-1513
Problema ao apagar dados legados de ExpedienteSessaoPlenaria O campo txt_expediente pode possuir valores com tags em HTML, o que prejudica a formatação da string sql de delete por contas das aspas duplas.
[ { "content": "import re\nfrom datetime import date\nfrom functools import lru_cache\nfrom subprocess import PIPE, call\n\nimport pkg_resources\nimport reversion\nimport yaml\nfrom django.apps import apps\nfrom django.apps.config import AppConfig\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Group\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import connections, transaction\nfrom django.db.models import Count, Max\nfrom django.db.models.base import ModelBase\n\nfrom sapl.base.models import AppConfig as AppConf\nfrom sapl.base.models import (Autor, CasaLegislativa, ProblemaMigracao,\n TipoAutor)\nfrom sapl.comissoes.models import Comissao, Composicao, Participacao\nfrom sapl.legacy.models import TipoNumeracaoProtocolo\nfrom sapl.materia.models import (AcompanhamentoMateria, Proposicao,\n StatusTramitacao, TipoDocumento,\n TipoMateriaLegislativa, TipoProposicao,\n Tramitacao)\nfrom sapl.norma.models import (AssuntoNorma, NormaJuridica, NormaRelacionada,\n TipoVinculoNormaJuridica)\nfrom sapl.parlamentares.models import (Legislatura, Mandato, Parlamentar,\n TipoAfastamento)\nfrom sapl.protocoloadm.models import (DocumentoAdministrativo, Protocolo,\n StatusTramitacaoAdministrativo)\nfrom sapl.sessao.models import ExpedienteMateria, OrdemDia, RegistroVotacao\nfrom sapl.settings import PROJECT_DIR\nfrom sapl.utils import normalize\n\n# BASE ######################################################################\n# apps to be migrated, in app dependency order (very important)\nappconfs = [apps.get_app_config(n) for n in [\n 'parlamentares',\n 'comissoes',\n 'base',\n 'materia',\n 'norma',\n 'sessao',\n 'lexml',\n 'protocoloadm', ]]\n\nunique_constraints = []\none_to_one_constraints = []\nprimeira_vez = []\n\nname_sets = [set(m.__name__ for m in ac.get_models()) for ac in appconfs]\n\n# apps do not overlap\nfor s1 in name_sets:\n for s2 in name_sets:\n if s1 is not s2:\n assert not s1.intersection(s2)\n\n# apps include all legacy models\nlegacy_app = apps.get_app_config('legacy')\nlegacy_model_names = set(m.__name__ for m in legacy_app.get_models())\n\nmodel_dict = {m.__name__: m for ac in appconfs for m in ac.get_models()}\n\n\n# RENAMES ###################################################################\n\nMODEL_RENAME_PATTERN = re.compile('(.+) \\((.+)\\)')\n\n\ndef get_renames():\n field_renames = {}\n model_renames = {}\n for app in appconfs:\n app_rename_data = yaml.load(\n pkg_resources.resource_string(app.module.__name__, 'legacy.yaml'))\n for model_name, renames in app_rename_data.items():\n match = MODEL_RENAME_PATTERN.match(model_name)\n if match:\n model_name, old_name = match.groups()\n else:\n old_name = None\n model = getattr(app.models_module, model_name)\n if old_name:\n model_renames[model] = old_name\n field_renames[model] = renames\n\n # collect renames from parent classes\n for model, renames in field_renames.items():\n if any(parent in field_renames for parent in model.__mro__[1:]):\n renames = {}\n for parent in reversed(model.__mro__):\n if parent in field_renames:\n renames.update(field_renames[parent])\n field_renames[model] = renames\n\n # remove abstract classes\n field_renames = {m: r for m, r in field_renames.items()\n if not m._meta.abstract}\n\n return field_renames, model_renames\n\n# MIGRATION #################################################################\n\n\ndef info(msg):\n print('INFO: ' + msg)\n\n\ndef warn(msg):\n print('CUIDADO! ' + msg)\n\n\nclass ForeignKeyFaltando(ObjectDoesNotExist):\n 'Uma FK aponta para um registro inexistente'\n pass\n\n\n@lru_cache()\ndef _get_all_ids_from_model(model):\n # esta função para uso apenas em get_fk_related\n return set(model.objects.values_list('id', flat=True))\n\n\ndef get_fk_related(field, value, label=None):\n if value is None and field.null:\n return None\n\n # if field.related_model.objects.filter(id=value).exists():\n if value in _get_all_ids_from_model(field.related_model):\n return value\n else:\n msg = 'FK [%s] não encontrada para o valor %s (em %s %s)' % (\n field.name, value, field.model.__name__, label or '---')\n warn(msg)\n raise ForeignKeyFaltando(msg)\n\n\ndef exec_sql_file(path, db='default'):\n with open(path) as arq:\n sql = arq.read()\n with connections[db].cursor() as cursor:\n cursor.execute(sql)\n\n\ndef exec_sql(sql, db='default'):\n cursor = connections[db].cursor()\n cursor.execute(sql)\n return cursor\n\n\ndef iter_sql_records(sql, db):\n class Record:\n pass\n cursor = exec_sql(sql, db)\n fieldnames = [name[0] for name in cursor.description]\n for row in cursor.fetchall():\n record = Record()\n record.__dict__.update(zip(fieldnames, row))\n yield record\n\n\ndef get_last_value(model):\n last_value = model.objects.all().aggregate(Max('pk'))\n return last_value['pk__max'] or 0\n\n\ndef save_relation(obj, nome_campo='', problema='', descricao='',\n eh_stub=False, critico=False):\n link = ProblemaMigracao(\n content_object=obj, nome_campo=nome_campo, problema=problema,\n descricao=descricao, eh_stub=eh_stub, critico=critico)\n link.save()\n\n\ndef fill_vinculo_norma_juridica():\n lista = [('A', 'Altera o(a)',\n 'Alterado(a) pelo(a)'),\n ('R', 'Revoga integralmente o(a)',\n 'Revogado(a) integralmente pelo(a)'),\n ('P', 'Revoga parcialmente o(a)',\n 'Revogado(a) parcialmente pelo(a)'),\n ('T', 'Revoga integralmente por consolidação',\n 'Revogado(a) integralmente por consolidação'),\n ('C', 'Norma correlata',\n 'Norma correlata'),\n ('S', 'Ressalva o(a)',\n 'Ressalvada pelo(a)'),\n ('E', 'Reedita o(a)',\n 'Reeditada pelo(a)'),\n ('I', 'Reedita com alteração o(a)',\n 'Reeditada com alteração pelo(a)'),\n ('G', 'Regulamenta o(a)',\n 'Regulamentada pelo(a)'),\n ('K', 'Suspende parcialmente o(a)',\n 'Suspenso(a) parcialmente pelo(a)'),\n ('L', 'Suspende integralmente o(a)',\n 'Suspenso(a) integralmente pelo(a)'),\n ('N', 'Julga integralmente inconstitucional',\n 'Julgada integralmente inconstitucional'),\n ('O', 'Julga parcialmente inconstitucional',\n 'Julgada parcialmente inconstitucional')]\n lista_objs = [TipoVinculoNormaJuridica(\n sigla=item[0], descricao_ativa=item[1], descricao_passiva=item[2])\n for item in lista]\n TipoVinculoNormaJuridica.objects.bulk_create(lista_objs)\n\n\ndef fill_dados_basicos():\n # Ajusta sequencia numérica e cria base.AppConfig\n letra = 'A'\n try:\n tipo = TipoNumeracaoProtocolo.objects.latest('dat_inicial_protocolo')\n if 'POR ANO' in tipo.des_numeracao_protocolo:\n letra = 'A'\n elif 'POR LEGISLATURA' in tipo.des_numeracao_protocolo:\n letra = 'L'\n elif 'CONSECUTIVO' in tipo.des_numeracao_protocolo:\n letra = 'U'\n except Exception as e:\n pass\n appconf = AppConf(sequencia_numeracao=letra)\n appconf.save()\n\n # Cria instância de CasaLegislativa\n casa = CasaLegislativa()\n casa.save()\n\n\n# Uma anomalia no sapl 2.5 causa a duplicação de registros de votação.\n# Essa duplicação deve ser eliminada para que não haja erro no sapl 3.1\ndef excluir_registrovotacao_duplicados():\n duplicatas_ids = RegistroVotacao.objects.values(\n 'materia', 'ordem', 'expediente').annotate(\n Count('id')).order_by().filter(id__count__gt=1)\n duplicatas_queryset = RegistroVotacao.objects.filter(\n materia__in=[item['materia'] for item in duplicatas_ids])\n\n for dup in duplicatas_queryset:\n lista_dups = duplicatas_queryset.filter(\n materia=dup.materia, expediente=dup.expediente, ordem=dup.ordem)\n primeiro_registro = lista_dups[0]\n lista_dups = lista_dups.exclude(pk=primeiro_registro.pk)\n for objeto in lista_dups:\n if (objeto.pk > primeiro_registro.pk):\n try:\n objeto.delete()\n except:\n assert 0\n else:\n try:\n primeiro_registro.delete()\n primeiro_registro = objeto\n except:\n assert 0\n\n\ndef delete_old(legacy_model, cols_values):\n\n def eq_clause(col, value):\n if value is None:\n return '{} IS NULL'.format(col)\n else:\n return '{}=\"{}\"'.format(col, value)\n\n delete_sql = 'delete from {} where {}'.format(\n legacy_model._meta.db_table,\n ' and '.join([eq_clause(col, value)\n for col, value in cols_values.items()]))\n exec_sql(delete_sql, 'legacy')\n\n\nclass DataMigrator:\n\n def __init__(self):\n self.field_renames, self.model_renames = get_renames()\n self.data_mudada = {}\n self.choice_valida = {}\n\n def populate_renamed_fields(self, new, old):\n renames = self.field_renames[type(new)]\n\n for field in new._meta.fields:\n old_field_name = renames.get(field.name)\n field_type = field.get_internal_type()\n if old_field_name:\n old_value = getattr(old, old_field_name)\n\n if field_type == 'ForeignKey':\n # not necessarily a model\n if hasattr(old, '_meta') and old._meta.pk.name != 'id':\n label = old.pk\n else:\n label = '-- SEM PK --'\n fk_field_name = '{}_id'.format(field.name)\n value = get_fk_related(field, old_value, label)\n setattr(new, fk_field_name, value)\n else:\n value = getattr(old, old_field_name)\n # TODO rever esse DateField após as mudança para datas com\n # timezone\n if field_type == 'DateField' and \\\n not field.null and value is None:\n # TODO REVER ISSO\n descricao = 'A data 1111-11-11 foi colocada no lugar'\n problema = 'O valor da data era nulo ou inválido'\n warn(\"O valor do campo %s (%s) do model %s \"\n \"era inválido => %s\" % (\n field.name, field_type,\n field.model.__name__, descricao))\n value = date(1111, 11, 11)\n self.data_mudada['obj'] = new\n self.data_mudada['descricao'] = descricao\n self.data_mudada['problema'] = problema\n self.data_mudada.setdefault('nome_campo', []).\\\n append(field.name)\n if (field_type in ['CharField', 'TextField']\n and value in [None, 'None']):\n value = ''\n setattr(new, field.name, value)\n\n def migrate(self, obj=appconfs, interativo=True):\n # warning: model/app migration order is of utmost importance\n exec_sql_file(PROJECT_DIR.child(\n 'sapl', 'legacy', 'scripts', 'fix_tables.sql'), 'legacy')\n\n # excluindo database antigo.\n if interativo:\n info('Todos os dados do banco serão excluidos. '\n 'Recomendamos que faça backup do banco sapl '\n 'antes de continuar.')\n info('Deseja continuar? [s/n]')\n resposta = input()\n if resposta.lower() in ['s', 'sim', 'y', 'yes']:\n pass\n else:\n info('Migração cancelada.')\n return 0\n info('Excluindo entradas antigas do banco.')\n call([PROJECT_DIR.child('manage.py'), 'flush',\n '--database=default', '--no-input'], stdout=PIPE)\n\n fill_vinculo_norma_juridica()\n fill_dados_basicos()\n info('Começando migração: %s...' % obj)\n self._do_migrate(obj)\n\n info('Excluindo possíveis duplicações em RegistroVotacao...')\n excluir_registrovotacao_duplicados()\n\n def _do_migrate(self, obj):\n if isinstance(obj, AppConfig):\n models_to_migrate = (model for model in obj.models.values()\n if model in self.field_renames)\n self._do_migrate(models_to_migrate)\n elif isinstance(obj, ModelBase):\n # A migração vai pular TipoProposicao e só vai migrar essa model\n # antes de migrar Proposicao. Isso deve acontecer por causa da\n # GenericRelation existente em TipoProposicao.\n if not obj.__name__ == 'TipoProposicao':\n if obj.__name__ == 'Proposicao':\n self.migrate_model(TipoProposicao)\n self.migrate_model(obj)\n elif hasattr(obj, '__iter__'):\n for item in obj:\n self._do_migrate(item)\n else:\n raise TypeError(\n 'Parameter must be a Model, AppConfig or a sequence of them')\n\n def migrate_model(self, model):\n print('Migrando %s...' % model.__name__)\n\n legacy_model_name = self.model_renames.get(model, model.__name__)\n legacy_model = legacy_app.get_model(legacy_model_name)\n legacy_pk_name = legacy_model._meta.pk.name\n\n # setup migration strategy for tables with or without a pk\n if legacy_pk_name == 'id':\n # There is no pk in the legacy table\n def save(new, old):\n with reversion.create_revision():\n new.save()\n reversion.set_comment('Objeto criado pela migração')\n\n # apaga registro do legado\n delete_old(legacy_model, old.__dict__)\n\n old_records = iter_sql_records(\n 'select * from ' + legacy_model._meta.db_table, 'legacy')\n else:\n def save(new, old):\n with reversion.create_revision():\n # salva new com id de old\n new.id = getattr(old, legacy_pk_name)\n new.save()\n reversion.set_comment('Objeto criado pela migração')\n\n # apaga registro do legado\n delete_old(legacy_model, {legacy_pk_name: new.id})\n\n old_records = legacy_model.objects.all().order_by(legacy_pk_name)\n\n ajuste_antes_salvar = AJUSTE_ANTES_SALVAR.get(model)\n ajuste_depois_salvar = AJUSTE_DEPOIS_SALVAR.get(model)\n\n # convert old records to new ones\n with transaction.atomic():\n for old in old_records:\n if getattr(old, 'ind_excluido', False):\n # não migramos registros marcados como excluídos\n continue\n new = model()\n try:\n self.populate_renamed_fields(new, old)\n if ajuste_antes_salvar:\n ajuste_antes_salvar(new, old)\n except ForeignKeyFaltando:\n # tentamos preencher uma FK e o ojeto relacionado\n # não existe\n # então este é um objeo órfão: simplesmente ignoramos\n continue\n else:\n save(new, old)\n if ajuste_depois_salvar:\n ajuste_depois_salvar(new, old)\n\n if self.data_mudada:\n with reversion.create_revision():\n save_relation(**self.data_mudada)\n self.data_mudada.clear()\n reversion.set_comment(\n 'Ajuste de data pela migração')\n\n\ndef migrate(obj=appconfs, interativo=True):\n dm = DataMigrator()\n dm.migrate(obj, interativo)\n\n\n# MIGRATION_ADJUSTMENTS #####################################################\n\ndef adjust_acompanhamentomateria(new, old):\n new.confirmado = True\n\n\ndef adjust_documentoadministrativo(new, old):\n if new.numero_protocolo:\n protocolo = Protocolo.objects.filter(\n numero=new.numero_protocolo, ano=new.ano)\n if not protocolo:\n protocolo = Protocolo.objects.filter(\n numero=new.numero_protocolo, ano=new.ano + 1)\n print('PROTOCOLO ENCONTRADO APENAS PARA O ANO SEGUINTE!!!!! '\n 'DocumentoAdministrativo: {}, numero_protocolo: {}, '\n 'ano doc adm: {}'.format(\n old.cod_documento, new.numero_protocolo, new.ano))\n if not protocolo:\n raise ForeignKeyFaltando(\n 'Protocolo {} faltando '\n '(referenciado no documento administrativo {}'.format(\n new.numero_protocolo, old.cod_documento))\n assert len(protocolo) == 1\n new.protocolo = protocolo[0]\n\n\ndef adjust_mandato(new, old):\n if old.dat_fim_mandato:\n new.data_fim_mandato = old.dat_fim_mandato\n if not new.data_fim_mandato:\n legislatura = Legislatura.objects.latest('data_fim')\n new.data_fim_mandato = legislatura.data_fim\n new.data_expedicao_diploma = legislatura.data_inicio\n if not new.data_inicio_mandato:\n new.data_inicio_mandato = new.legislatura.data_inicio\n new.data_fim_mandato = new.legislatura.data_fim\n\n\ndef adjust_ordemdia_antes_salvar(new, old):\n new.votacao_aberta = False\n\n if not old.tip_votacao:\n new.tipo_votacao = 1\n\n if old.num_ordem is None:\n new.numero_ordem = 999999999\n\n\ndef adjust_ordemdia_depois_salvar(new, old):\n if old.num_ordem is None and new.numero_ordem == 999999999:\n with reversion.create_revision():\n problema = 'OrdemDia de PK %s tinha seu valor de numero ordem'\\\n ' nulo.' % old.pk\n descricao = 'O valor %s foi colocado no lugar.' % new.numero_ordem\n warn(problema + ' => ' + descricao)\n save_relation(obj=new, problema=problema,\n descricao=descricao, eh_stub=False)\n reversion.set_comment('OrdemDia sem número da ordem.')\n\n\ndef adjust_parlamentar(new, old):\n if old.ind_unid_deliberativa:\n value = new.unidade_deliberativa\n # Field is defined as not null in legacy db,\n # but data includes null values\n # => transform None to False\n if value is None:\n warn('nulo convertido para falso')\n new.unidade_deliberativa = False\n\n\ndef adjust_participacao(new, old):\n composicao = Composicao()\n composicao.comissao_id, composicao.periodo_id = [\n get_fk_related(Composicao._meta.get_field(name), value)\n for name, value in (('comissao', old.cod_comissao),\n ('periodo', old.cod_periodo_comp))]\n # check if there is already an \"equal\" one in the db\n already_created = Composicao.objects.filter(\n comissao=composicao.comissao, periodo=composicao.periodo)\n if already_created:\n assert len(already_created) == 1 # we must never have made 2 copies\n [composicao] = already_created\n else:\n with reversion.create_revision():\n composicao.save()\n reversion.set_comment('Objeto criado pela migração')\n new.composicao = composicao\n\n\ndef adjust_proposicao_antes_salvar(new, old):\n if new.data_envio:\n new.ano = new.data_envio.year\n\n\ndef adjust_proposicao_depois_salvar(new, old):\n if not hasattr(old.dat_envio, 'year') or old.dat_envio.year == 1800:\n msg = \"O valor do campo data_envio (DateField) da model Proposicao\"\\\n \" era inválido\"\n descricao = 'A data 1111-11-11 foi colocada no lugar'\n problema = 'O valor da data era nulo ou inválido'\n warn(msg + ' => ' + descricao)\n new.data_envio = date(1111, 11, 11)\n with reversion.create_revision():\n save_relation(obj=new, problema=problema,\n descricao=descricao, eh_stub=False)\n reversion.set_comment('Ajuste de data pela migração')\n\n\ndef adjust_normarelacionada(new, old):\n tipo = TipoVinculoNormaJuridica.objects.filter(sigla=old.tip_vinculo)\n assert len(tipo) == 1\n new.tipo_vinculo = tipo[0]\n\n\ndef adjust_protocolo_antes_salvar(new, old):\n if old.num_protocolo is None:\n new.numero = old.cod_protocolo\n\n\ndef adjust_protocolo_depois_salvar(new, old):\n if old.num_protocolo is None:\n with reversion.create_revision():\n problema = 'Número do protocolo de PK %s é nulo' % new.pk\n descricao = 'Número do protocolo alterado para %s!' % new.numero\n warn(problema + ' => ' + descricao)\n save_relation(obj=new, problema=problema,\n descricao=descricao, eh_stub=False)\n reversion.set_comment('Número de protocolo teve que ser alterado')\n\n\ndef adjust_registrovotacao_antes_salvar(new, old):\n ordem_dia = OrdemDia.objects.filter(\n pk=old.cod_ordem, materia=old.cod_materia)\n expediente_materia = ExpedienteMateria.objects.filter(\n pk=old.cod_ordem, materia=old.cod_materia)\n\n if ordem_dia and not expediente_materia:\n new.ordem = ordem_dia[0]\n if not ordem_dia and expediente_materia:\n new.expediente = expediente_materia[0]\n\n\ndef adjust_registrovotacao_depois_salvar(new, old):\n if not new.ordem and not new.expediente:\n with reversion.create_revision():\n problema = 'RegistroVotacao de PK %s não possui nenhuma OrdemDia'\\\n ' ou ExpedienteMateria.' % old.pk\n descricao = 'RevistroVotacao deve ter no mínimo uma ordem do dia'\\\n ' ou expediente vinculado.'\n warn(problema + ' => ' + descricao)\n save_relation(obj=new, problema=problema,\n descricao=descricao, eh_stub=False)\n reversion.set_comment('RegistroVotacao sem ordem ou expediente')\n\n\ndef adjust_tipoafastamento(new, old):\n if old.ind_afastamento == 1:\n new.indicador = 'A'\n\n\ndef adjust_tipoproposicao(new, old):\n if old.ind_mat_ou_doc == 'M':\n tipo_materia = TipoMateriaLegislativa.objects.filter(\n pk=old.tip_mat_ou_doc)\n if tipo_materia:\n new.tipo_conteudo_related = tipo_materia[0]\n else:\n raise ForeignKeyFaltando\n elif old.ind_mat_ou_doc == 'D':\n tipo_documento = TipoDocumento.objects.filter(pk=old.tip_mat_ou_doc)\n if tipo_documento:\n new.tipo_conteudo_related = tipo_documento[0]\n else:\n raise ForeignKeyFaltando\n\n\ndef adjust_statustramitacao(new, old):\n if old.ind_fim_tramitacao:\n new.indicador = 'F'\n elif old.ind_retorno_tramitacao:\n new.indicador = 'R'\n else:\n new.indicador = ''\n\n\ndef adjust_statustramitacaoadm(new, old):\n adjust_statustramitacao(new, old)\n\n\ndef adjust_tramitacao(new, old):\n if old.sgl_turno == 'Ú':\n new.turno = 'U'\n\n\ndef adjust_tipo_autor(new, old):\n model_apontado = normalize(new.descricao.lower()).replace(' ', '')\n content_types = ContentType.objects.filter(\n model=model_apontado).exclude(app_label='legacy')\n assert len(content_types) <= 1\n new.content_type = content_types[0] if content_types else None\n\n\ndef adjust_normajuridica_antes_salvar(new, old):\n # Ajusta choice de esfera_federacao\n # O 'S' vem de 'Selecionar'. Na versão antiga do SAPL, quando uma opção do\n # combobox era selecionada, o sistema pegava a primeira letra da seleção,\n # sendo F para Federal, E para Estadual, M para Municipal e o S para\n # Selecionar, que era a primeira opção quando nada era selecionado.\n if old.tip_esfera_federacao == 'S':\n new.esfera_federacao = ''\n\n\ndef adjust_normajuridica_depois_salvar(new, old):\n # Ajusta relação M2M\n\n if not old.cod_assunto: # it can be null or empty\n return\n\n # lista de pks separadas por vírgulas (ignorando strings vazias)\n lista_pks_assunto = [int(pk) for pk in old.cod_assunto.split(',') if pk]\n\n for pk_assunto in lista_pks_assunto:\n try:\n new.assuntos.add(AssuntoNorma.objects.get(pk=pk_assunto))\n except ObjectDoesNotExist:\n pass # ignora assuntos inexistentes\n\n\ndef adjust_autor(new, old):\n if old.cod_parlamentar:\n try:\n new.autor_related = Parlamentar.objects.get(pk=old.cod_parlamentar)\n except ObjectDoesNotExist:\n # ignoramos o autor órfão\n raise ForeignKeyFaltando('Parlamentar inexiste para autor')\n else:\n new.nome = new.autor_related.nome_parlamentar\n\n elif old.cod_comissao:\n try:\n new.autor_related = Comissao.objects.get(pk=old.cod_comissao)\n except ObjectDoesNotExist:\n # ignoramos o autor órfão\n raise ForeignKeyFaltando('Comissao inexiste para autor')\n else:\n new.nome = new.autor_related.nome\n\n if old.col_username:\n user_model = get_user_model()\n if not user_model.objects.filter(username=old.col_username).exists():\n # cria um novo ususaŕio para o autor\n user = user_model(username=old.col_username)\n user.set_password(12345)\n with reversion.create_revision():\n user.save()\n reversion.set_comment(\n 'Usuário criado pela migração para o autor {}'.format(\n old.cod_autor))\n grupo_autor = Group.objects.get(name=\"Autor\")\n user.groups.add(grupo_autor)\n\n\ndef adjust_comissao(new, old):\n if not old.dat_extincao and not old.dat_fim_comissao:\n new.ativa = True\n elif old.dat_extincao and date.today() < new.data_extincao or \\\n old.dat_fim_comissao and date.today() < new.data_fim_comissao:\n new.ativa = True\n else:\n new.ativa = False\n\n\nAJUSTE_ANTES_SALVAR = {\n Autor: adjust_autor,\n TipoAutor: adjust_tipo_autor,\n AcompanhamentoMateria: adjust_acompanhamentomateria,\n Comissao: adjust_comissao,\n DocumentoAdministrativo: adjust_documentoadministrativo,\n Mandato: adjust_mandato,\n NormaJuridica: adjust_normajuridica_antes_salvar,\n NormaRelacionada: adjust_normarelacionada,\n OrdemDia: adjust_ordemdia_antes_salvar,\n Parlamentar: adjust_parlamentar,\n Participacao: adjust_participacao,\n Proposicao: adjust_proposicao_antes_salvar,\n Protocolo: adjust_protocolo_antes_salvar,\n RegistroVotacao: adjust_registrovotacao_antes_salvar,\n TipoAfastamento: adjust_tipoafastamento,\n TipoProposicao: adjust_tipoproposicao,\n StatusTramitacao: adjust_statustramitacao,\n StatusTramitacaoAdministrativo: adjust_statustramitacaoadm,\n Tramitacao: adjust_tramitacao,\n}\n\nAJUSTE_DEPOIS_SALVAR = {\n NormaJuridica: adjust_normajuridica_depois_salvar,\n OrdemDia: adjust_ordemdia_depois_salvar,\n Proposicao: adjust_proposicao_depois_salvar,\n Protocolo: adjust_protocolo_depois_salvar,\n RegistroVotacao: adjust_registrovotacao_depois_salvar,\n}\n\n# CHECKS ####################################################################\n\n\ndef get_ind_excluido(new):\n legacy_model = legacy_app.get_model(type(new).__name__)\n old = legacy_model.objects.get(**{legacy_model._meta.pk.name: new.id})\n return getattr(old, 'ind_excluido', False)\n\n\ndef check_app_no_ind_excluido(app):\n for model in app.models.values():\n assert not any(get_ind_excluido(new) for new in model.objects.all())\n print('OK!')\n", "path": "sapl/legacy/migration.py" } ]
[ { "content": "import re\nfrom datetime import date\nfrom functools import lru_cache\nfrom subprocess import PIPE, call\n\nimport pkg_resources\nimport reversion\nimport yaml\nfrom django.apps import apps\nfrom django.apps.config import AppConfig\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Group\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import connections, transaction\nfrom django.db.models import Count, Max\nfrom django.db.models.base import ModelBase\n\nfrom sapl.base.models import AppConfig as AppConf\nfrom sapl.base.models import (Autor, CasaLegislativa, ProblemaMigracao,\n TipoAutor)\nfrom sapl.comissoes.models import Comissao, Composicao, Participacao\nfrom sapl.legacy.models import TipoNumeracaoProtocolo\nfrom sapl.materia.models import (AcompanhamentoMateria, Proposicao,\n StatusTramitacao, TipoDocumento,\n TipoMateriaLegislativa, TipoProposicao,\n Tramitacao)\nfrom sapl.norma.models import (AssuntoNorma, NormaJuridica, NormaRelacionada,\n TipoVinculoNormaJuridica)\nfrom sapl.parlamentares.models import (Legislatura, Mandato, Parlamentar,\n TipoAfastamento)\nfrom sapl.protocoloadm.models import (DocumentoAdministrativo, Protocolo,\n StatusTramitacaoAdministrativo)\nfrom sapl.sessao.models import ExpedienteMateria, OrdemDia, RegistroVotacao\nfrom sapl.settings import PROJECT_DIR\nfrom sapl.utils import normalize\n\n# BASE ######################################################################\n# apps to be migrated, in app dependency order (very important)\nappconfs = [apps.get_app_config(n) for n in [\n 'parlamentares',\n 'comissoes',\n 'base',\n 'materia',\n 'norma',\n 'sessao',\n 'lexml',\n 'protocoloadm', ]]\n\nunique_constraints = []\none_to_one_constraints = []\nprimeira_vez = []\n\nname_sets = [set(m.__name__ for m in ac.get_models()) for ac in appconfs]\n\n# apps do not overlap\nfor s1 in name_sets:\n for s2 in name_sets:\n if s1 is not s2:\n assert not s1.intersection(s2)\n\n# apps include all legacy models\nlegacy_app = apps.get_app_config('legacy')\nlegacy_model_names = set(m.__name__ for m in legacy_app.get_models())\n\nmodel_dict = {m.__name__: m for ac in appconfs for m in ac.get_models()}\n\n\n# RENAMES ###################################################################\n\nMODEL_RENAME_PATTERN = re.compile('(.+) \\((.+)\\)')\n\n\ndef get_renames():\n field_renames = {}\n model_renames = {}\n for app in appconfs:\n app_rename_data = yaml.load(\n pkg_resources.resource_string(app.module.__name__, 'legacy.yaml'))\n for model_name, renames in app_rename_data.items():\n match = MODEL_RENAME_PATTERN.match(model_name)\n if match:\n model_name, old_name = match.groups()\n else:\n old_name = None\n model = getattr(app.models_module, model_name)\n if old_name:\n model_renames[model] = old_name\n field_renames[model] = renames\n\n # collect renames from parent classes\n for model, renames in field_renames.items():\n if any(parent in field_renames for parent in model.__mro__[1:]):\n renames = {}\n for parent in reversed(model.__mro__):\n if parent in field_renames:\n renames.update(field_renames[parent])\n field_renames[model] = renames\n\n # remove abstract classes\n field_renames = {m: r for m, r in field_renames.items()\n if not m._meta.abstract}\n\n return field_renames, model_renames\n\n# MIGRATION #################################################################\n\n\ndef info(msg):\n print('INFO: ' + msg)\n\n\ndef warn(msg):\n print('CUIDADO! ' + msg)\n\n\nclass ForeignKeyFaltando(ObjectDoesNotExist):\n 'Uma FK aponta para um registro inexistente'\n pass\n\n\n@lru_cache()\ndef _get_all_ids_from_model(model):\n # esta função para uso apenas em get_fk_related\n return set(model.objects.values_list('id', flat=True))\n\n\ndef get_fk_related(field, value, label=None):\n if value is None and field.null:\n return None\n\n # if field.related_model.objects.filter(id=value).exists():\n if value in _get_all_ids_from_model(field.related_model):\n return value\n else:\n msg = 'FK [%s] não encontrada para o valor %s (em %s %s)' % (\n field.name, value, field.model.__name__, label or '---')\n warn(msg)\n raise ForeignKeyFaltando(msg)\n\n\ndef exec_sql_file(path, db='default'):\n with open(path) as arq:\n sql = arq.read()\n with connections[db].cursor() as cursor:\n cursor.execute(sql)\n\n\ndef exec_sql(sql, db='default'):\n cursor = connections[db].cursor()\n cursor.execute(sql)\n return cursor\n\n\ndef iter_sql_records(sql, db):\n class Record:\n pass\n cursor = exec_sql(sql, db)\n fieldnames = [name[0] for name in cursor.description]\n for row in cursor.fetchall():\n record = Record()\n record.__dict__.update(zip(fieldnames, row))\n yield record\n\n\ndef get_last_value(model):\n last_value = model.objects.all().aggregate(Max('pk'))\n return last_value['pk__max'] or 0\n\n\ndef save_relation(obj, nome_campo='', problema='', descricao='',\n eh_stub=False, critico=False):\n link = ProblemaMigracao(\n content_object=obj, nome_campo=nome_campo, problema=problema,\n descricao=descricao, eh_stub=eh_stub, critico=critico)\n link.save()\n\n\ndef fill_vinculo_norma_juridica():\n lista = [('A', 'Altera o(a)',\n 'Alterado(a) pelo(a)'),\n ('R', 'Revoga integralmente o(a)',\n 'Revogado(a) integralmente pelo(a)'),\n ('P', 'Revoga parcialmente o(a)',\n 'Revogado(a) parcialmente pelo(a)'),\n ('T', 'Revoga integralmente por consolidação',\n 'Revogado(a) integralmente por consolidação'),\n ('C', 'Norma correlata',\n 'Norma correlata'),\n ('S', 'Ressalva o(a)',\n 'Ressalvada pelo(a)'),\n ('E', 'Reedita o(a)',\n 'Reeditada pelo(a)'),\n ('I', 'Reedita com alteração o(a)',\n 'Reeditada com alteração pelo(a)'),\n ('G', 'Regulamenta o(a)',\n 'Regulamentada pelo(a)'),\n ('K', 'Suspende parcialmente o(a)',\n 'Suspenso(a) parcialmente pelo(a)'),\n ('L', 'Suspende integralmente o(a)',\n 'Suspenso(a) integralmente pelo(a)'),\n ('N', 'Julga integralmente inconstitucional',\n 'Julgada integralmente inconstitucional'),\n ('O', 'Julga parcialmente inconstitucional',\n 'Julgada parcialmente inconstitucional')]\n lista_objs = [TipoVinculoNormaJuridica(\n sigla=item[0], descricao_ativa=item[1], descricao_passiva=item[2])\n for item in lista]\n TipoVinculoNormaJuridica.objects.bulk_create(lista_objs)\n\n\ndef fill_dados_basicos():\n # Ajusta sequencia numérica e cria base.AppConfig\n letra = 'A'\n try:\n tipo = TipoNumeracaoProtocolo.objects.latest('dat_inicial_protocolo')\n if 'POR ANO' in tipo.des_numeracao_protocolo:\n letra = 'A'\n elif 'POR LEGISLATURA' in tipo.des_numeracao_protocolo:\n letra = 'L'\n elif 'CONSECUTIVO' in tipo.des_numeracao_protocolo:\n letra = 'U'\n except Exception as e:\n pass\n appconf = AppConf(sequencia_numeracao=letra)\n appconf.save()\n\n # Cria instância de CasaLegislativa\n casa = CasaLegislativa()\n casa.save()\n\n\n# Uma anomalia no sapl 2.5 causa a duplicação de registros de votação.\n# Essa duplicação deve ser eliminada para que não haja erro no sapl 3.1\ndef excluir_registrovotacao_duplicados():\n duplicatas_ids = RegistroVotacao.objects.values(\n 'materia', 'ordem', 'expediente').annotate(\n Count('id')).order_by().filter(id__count__gt=1)\n duplicatas_queryset = RegistroVotacao.objects.filter(\n materia__in=[item['materia'] for item in duplicatas_ids])\n\n for dup in duplicatas_queryset:\n lista_dups = duplicatas_queryset.filter(\n materia=dup.materia, expediente=dup.expediente, ordem=dup.ordem)\n primeiro_registro = lista_dups[0]\n lista_dups = lista_dups.exclude(pk=primeiro_registro.pk)\n for objeto in lista_dups:\n if (objeto.pk > primeiro_registro.pk):\n try:\n objeto.delete()\n except:\n assert 0\n else:\n try:\n primeiro_registro.delete()\n primeiro_registro = objeto\n except:\n assert 0\n\n\ndef delete_old(legacy_model, cols_values):\n # ajuste necessário por conta de cósigos html em txt_expediente\n if legacy_model.__name__ == 'ExpedienteSessaoPlenaria':\n cols_values.pop('txt_expediente')\n\n def eq_clause(col, value):\n if value is None:\n return '{} IS NULL'.format(col)\n else:\n return '{}=\"{}\"'.format(col, value)\n\n delete_sql = 'delete from {} where {}'.format(\n legacy_model._meta.db_table,\n ' and '.join([eq_clause(col, value)\n for col, value in cols_values.items()]))\n exec_sql(delete_sql, 'legacy')\n\n\nclass DataMigrator:\n\n def __init__(self):\n self.field_renames, self.model_renames = get_renames()\n self.data_mudada = {}\n self.choice_valida = {}\n\n def populate_renamed_fields(self, new, old):\n renames = self.field_renames[type(new)]\n\n for field in new._meta.fields:\n old_field_name = renames.get(field.name)\n field_type = field.get_internal_type()\n if old_field_name:\n old_value = getattr(old, old_field_name)\n\n if field_type == 'ForeignKey':\n # not necessarily a model\n if hasattr(old, '_meta') and old._meta.pk.name != 'id':\n label = old.pk\n else:\n label = '-- SEM PK --'\n fk_field_name = '{}_id'.format(field.name)\n value = get_fk_related(field, old_value, label)\n setattr(new, fk_field_name, value)\n else:\n value = getattr(old, old_field_name)\n # TODO rever esse DateField após as mudança para datas com\n # timezone\n if field_type == 'DateField' and \\\n not field.null and value is None:\n # TODO REVER ISSO\n descricao = 'A data 1111-11-11 foi colocada no lugar'\n problema = 'O valor da data era nulo ou inválido'\n warn(\"O valor do campo %s (%s) do model %s \"\n \"era inválido => %s\" % (\n field.name, field_type,\n field.model.__name__, descricao))\n value = date(1111, 11, 11)\n self.data_mudada['obj'] = new\n self.data_mudada['descricao'] = descricao\n self.data_mudada['problema'] = problema\n self.data_mudada.setdefault('nome_campo', []).\\\n append(field.name)\n if (field_type in ['CharField', 'TextField']\n and value in [None, 'None']):\n value = ''\n setattr(new, field.name, value)\n\n def migrate(self, obj=appconfs, interativo=True):\n # warning: model/app migration order is of utmost importance\n exec_sql_file(PROJECT_DIR.child(\n 'sapl', 'legacy', 'scripts', 'fix_tables.sql'), 'legacy')\n\n # excluindo database antigo.\n if interativo:\n info('Todos os dados do banco serão excluidos. '\n 'Recomendamos que faça backup do banco sapl '\n 'antes de continuar.')\n info('Deseja continuar? [s/n]')\n resposta = input()\n if resposta.lower() in ['s', 'sim', 'y', 'yes']:\n pass\n else:\n info('Migração cancelada.')\n return 0\n info('Excluindo entradas antigas do banco.')\n call([PROJECT_DIR.child('manage.py'), 'flush',\n '--database=default', '--no-input'], stdout=PIPE)\n\n fill_vinculo_norma_juridica()\n fill_dados_basicos()\n info('Começando migração: %s...' % obj)\n self._do_migrate(obj)\n\n info('Excluindo possíveis duplicações em RegistroVotacao...')\n excluir_registrovotacao_duplicados()\n\n def _do_migrate(self, obj):\n if isinstance(obj, AppConfig):\n models_to_migrate = (model for model in obj.models.values()\n if model in self.field_renames)\n self._do_migrate(models_to_migrate)\n elif isinstance(obj, ModelBase):\n # A migração vai pular TipoProposicao e só vai migrar essa model\n # antes de migrar Proposicao. Isso deve acontecer por causa da\n # GenericRelation existente em TipoProposicao.\n if not obj.__name__ == 'TipoProposicao':\n if obj.__name__ == 'Proposicao':\n self.migrate_model(TipoProposicao)\n self.migrate_model(obj)\n elif hasattr(obj, '__iter__'):\n for item in obj:\n self._do_migrate(item)\n else:\n raise TypeError(\n 'Parameter must be a Model, AppConfig or a sequence of them')\n\n def migrate_model(self, model):\n print('Migrando %s...' % model.__name__)\n\n legacy_model_name = self.model_renames.get(model, model.__name__)\n legacy_model = legacy_app.get_model(legacy_model_name)\n legacy_pk_name = legacy_model._meta.pk.name\n\n # setup migration strategy for tables with or without a pk\n if legacy_pk_name == 'id':\n # There is no pk in the legacy table\n def save(new, old):\n with reversion.create_revision():\n new.save()\n reversion.set_comment('Objeto criado pela migração')\n\n # apaga registro do legado\n delete_old(legacy_model, old.__dict__)\n\n old_records = iter_sql_records(\n 'select * from ' + legacy_model._meta.db_table, 'legacy')\n else:\n def save(new, old):\n with reversion.create_revision():\n # salva new com id de old\n new.id = getattr(old, legacy_pk_name)\n new.save()\n reversion.set_comment('Objeto criado pela migração')\n\n # apaga registro do legado\n delete_old(legacy_model, {legacy_pk_name: new.id})\n\n old_records = legacy_model.objects.all().order_by(legacy_pk_name)\n\n ajuste_antes_salvar = AJUSTE_ANTES_SALVAR.get(model)\n ajuste_depois_salvar = AJUSTE_DEPOIS_SALVAR.get(model)\n\n # convert old records to new ones\n with transaction.atomic():\n for old in old_records:\n if getattr(old, 'ind_excluido', False):\n # não migramos registros marcados como excluídos\n continue\n new = model()\n try:\n self.populate_renamed_fields(new, old)\n if ajuste_antes_salvar:\n ajuste_antes_salvar(new, old)\n except ForeignKeyFaltando:\n # tentamos preencher uma FK e o ojeto relacionado\n # não existe\n # então este é um objeo órfão: simplesmente ignoramos\n continue\n else:\n save(new, old)\n if ajuste_depois_salvar:\n ajuste_depois_salvar(new, old)\n\n if self.data_mudada:\n with reversion.create_revision():\n save_relation(**self.data_mudada)\n self.data_mudada.clear()\n reversion.set_comment(\n 'Ajuste de data pela migração')\n\n\ndef migrate(obj=appconfs, interativo=True):\n dm = DataMigrator()\n dm.migrate(obj, interativo)\n\n\n# MIGRATION_ADJUSTMENTS #####################################################\n\ndef adjust_acompanhamentomateria(new, old):\n new.confirmado = True\n\n\ndef adjust_documentoadministrativo(new, old):\n if new.numero_protocolo:\n protocolo = Protocolo.objects.filter(\n numero=new.numero_protocolo, ano=new.ano)\n if not protocolo:\n protocolo = Protocolo.objects.filter(\n numero=new.numero_protocolo, ano=new.ano + 1)\n print('PROTOCOLO ENCONTRADO APENAS PARA O ANO SEGUINTE!!!!! '\n 'DocumentoAdministrativo: {}, numero_protocolo: {}, '\n 'ano doc adm: {}'.format(\n old.cod_documento, new.numero_protocolo, new.ano))\n if not protocolo:\n raise ForeignKeyFaltando(\n 'Protocolo {} faltando '\n '(referenciado no documento administrativo {}'.format(\n new.numero_protocolo, old.cod_documento))\n assert len(protocolo) == 1\n new.protocolo = protocolo[0]\n\n\ndef adjust_mandato(new, old):\n if old.dat_fim_mandato:\n new.data_fim_mandato = old.dat_fim_mandato\n if not new.data_fim_mandato:\n legislatura = Legislatura.objects.latest('data_fim')\n new.data_fim_mandato = legislatura.data_fim\n new.data_expedicao_diploma = legislatura.data_inicio\n if not new.data_inicio_mandato:\n new.data_inicio_mandato = new.legislatura.data_inicio\n new.data_fim_mandato = new.legislatura.data_fim\n\n\ndef adjust_ordemdia_antes_salvar(new, old):\n new.votacao_aberta = False\n\n if not old.tip_votacao:\n new.tipo_votacao = 1\n\n if old.num_ordem is None:\n new.numero_ordem = 999999999\n\n\ndef adjust_ordemdia_depois_salvar(new, old):\n if old.num_ordem is None and new.numero_ordem == 999999999:\n with reversion.create_revision():\n problema = 'OrdemDia de PK %s tinha seu valor de numero ordem'\\\n ' nulo.' % old.pk\n descricao = 'O valor %s foi colocado no lugar.' % new.numero_ordem\n warn(problema + ' => ' + descricao)\n save_relation(obj=new, problema=problema,\n descricao=descricao, eh_stub=False)\n reversion.set_comment('OrdemDia sem número da ordem.')\n\n\ndef adjust_parlamentar(new, old):\n if old.ind_unid_deliberativa:\n value = new.unidade_deliberativa\n # Field is defined as not null in legacy db,\n # but data includes null values\n # => transform None to False\n if value is None:\n warn('nulo convertido para falso')\n new.unidade_deliberativa = False\n\n\ndef adjust_participacao(new, old):\n composicao = Composicao()\n composicao.comissao_id, composicao.periodo_id = [\n get_fk_related(Composicao._meta.get_field(name), value)\n for name, value in (('comissao', old.cod_comissao),\n ('periodo', old.cod_periodo_comp))]\n # check if there is already an \"equal\" one in the db\n already_created = Composicao.objects.filter(\n comissao=composicao.comissao, periodo=composicao.periodo)\n if already_created:\n assert len(already_created) == 1 # we must never have made 2 copies\n [composicao] = already_created\n else:\n with reversion.create_revision():\n composicao.save()\n reversion.set_comment('Objeto criado pela migração')\n new.composicao = composicao\n\n\ndef adjust_proposicao_antes_salvar(new, old):\n if new.data_envio:\n new.ano = new.data_envio.year\n\n\ndef adjust_proposicao_depois_salvar(new, old):\n if not hasattr(old.dat_envio, 'year') or old.dat_envio.year == 1800:\n msg = \"O valor do campo data_envio (DateField) da model Proposicao\"\\\n \" era inválido\"\n descricao = 'A data 1111-11-11 foi colocada no lugar'\n problema = 'O valor da data era nulo ou inválido'\n warn(msg + ' => ' + descricao)\n new.data_envio = date(1111, 11, 11)\n with reversion.create_revision():\n save_relation(obj=new, problema=problema,\n descricao=descricao, eh_stub=False)\n reversion.set_comment('Ajuste de data pela migração')\n\n\ndef adjust_normarelacionada(new, old):\n tipo = TipoVinculoNormaJuridica.objects.filter(sigla=old.tip_vinculo)\n assert len(tipo) == 1\n new.tipo_vinculo = tipo[0]\n\n\ndef adjust_protocolo_antes_salvar(new, old):\n if old.num_protocolo is None:\n new.numero = old.cod_protocolo\n\n\ndef adjust_protocolo_depois_salvar(new, old):\n if old.num_protocolo is None:\n with reversion.create_revision():\n problema = 'Número do protocolo de PK %s é nulo' % new.pk\n descricao = 'Número do protocolo alterado para %s!' % new.numero\n warn(problema + ' => ' + descricao)\n save_relation(obj=new, problema=problema,\n descricao=descricao, eh_stub=False)\n reversion.set_comment('Número de protocolo teve que ser alterado')\n\n\ndef adjust_registrovotacao_antes_salvar(new, old):\n ordem_dia = OrdemDia.objects.filter(\n pk=old.cod_ordem, materia=old.cod_materia)\n expediente_materia = ExpedienteMateria.objects.filter(\n pk=old.cod_ordem, materia=old.cod_materia)\n\n if ordem_dia and not expediente_materia:\n new.ordem = ordem_dia[0]\n if not ordem_dia and expediente_materia:\n new.expediente = expediente_materia[0]\n\n\ndef adjust_registrovotacao_depois_salvar(new, old):\n if not new.ordem and not new.expediente:\n with reversion.create_revision():\n problema = 'RegistroVotacao de PK %s não possui nenhuma OrdemDia'\\\n ' ou ExpedienteMateria.' % old.pk\n descricao = 'RevistroVotacao deve ter no mínimo uma ordem do dia'\\\n ' ou expediente vinculado.'\n warn(problema + ' => ' + descricao)\n save_relation(obj=new, problema=problema,\n descricao=descricao, eh_stub=False)\n reversion.set_comment('RegistroVotacao sem ordem ou expediente')\n\n\ndef adjust_tipoafastamento(new, old):\n if old.ind_afastamento == 1:\n new.indicador = 'A'\n\n\ndef adjust_tipoproposicao(new, old):\n if old.ind_mat_ou_doc == 'M':\n tipo_materia = TipoMateriaLegislativa.objects.filter(\n pk=old.tip_mat_ou_doc)\n if tipo_materia:\n new.tipo_conteudo_related = tipo_materia[0]\n else:\n raise ForeignKeyFaltando\n elif old.ind_mat_ou_doc == 'D':\n tipo_documento = TipoDocumento.objects.filter(pk=old.tip_mat_ou_doc)\n if tipo_documento:\n new.tipo_conteudo_related = tipo_documento[0]\n else:\n raise ForeignKeyFaltando\n\n\ndef adjust_statustramitacao(new, old):\n if old.ind_fim_tramitacao:\n new.indicador = 'F'\n elif old.ind_retorno_tramitacao:\n new.indicador = 'R'\n else:\n new.indicador = ''\n\n\ndef adjust_statustramitacaoadm(new, old):\n adjust_statustramitacao(new, old)\n\n\ndef adjust_tramitacao(new, old):\n if old.sgl_turno == 'Ú':\n new.turno = 'U'\n\n\ndef adjust_tipo_autor(new, old):\n model_apontado = normalize(new.descricao.lower()).replace(' ', '')\n content_types = ContentType.objects.filter(\n model=model_apontado).exclude(app_label='legacy')\n assert len(content_types) <= 1\n new.content_type = content_types[0] if content_types else None\n\n\ndef adjust_normajuridica_antes_salvar(new, old):\n # Ajusta choice de esfera_federacao\n # O 'S' vem de 'Selecionar'. Na versão antiga do SAPL, quando uma opção do\n # combobox era selecionada, o sistema pegava a primeira letra da seleção,\n # sendo F para Federal, E para Estadual, M para Municipal e o S para\n # Selecionar, que era a primeira opção quando nada era selecionado.\n if old.tip_esfera_federacao == 'S':\n new.esfera_federacao = ''\n\n\ndef adjust_normajuridica_depois_salvar(new, old):\n # Ajusta relação M2M\n\n if not old.cod_assunto: # it can be null or empty\n return\n\n # lista de pks separadas por vírgulas (ignorando strings vazias)\n lista_pks_assunto = [int(pk) for pk in old.cod_assunto.split(',') if pk]\n\n for pk_assunto in lista_pks_assunto:\n try:\n new.assuntos.add(AssuntoNorma.objects.get(pk=pk_assunto))\n except ObjectDoesNotExist:\n pass # ignora assuntos inexistentes\n\n\ndef adjust_autor(new, old):\n if old.cod_parlamentar:\n try:\n new.autor_related = Parlamentar.objects.get(pk=old.cod_parlamentar)\n except ObjectDoesNotExist:\n # ignoramos o autor órfão\n raise ForeignKeyFaltando('Parlamentar inexiste para autor')\n else:\n new.nome = new.autor_related.nome_parlamentar\n\n elif old.cod_comissao:\n try:\n new.autor_related = Comissao.objects.get(pk=old.cod_comissao)\n except ObjectDoesNotExist:\n # ignoramos o autor órfão\n raise ForeignKeyFaltando('Comissao inexiste para autor')\n else:\n new.nome = new.autor_related.nome\n\n if old.col_username:\n user_model = get_user_model()\n if not user_model.objects.filter(username=old.col_username).exists():\n # cria um novo ususaŕio para o autor\n user = user_model(username=old.col_username)\n user.set_password(12345)\n with reversion.create_revision():\n user.save()\n reversion.set_comment(\n 'Usuário criado pela migração para o autor {}'.format(\n old.cod_autor))\n grupo_autor = Group.objects.get(name=\"Autor\")\n user.groups.add(grupo_autor)\n\n\ndef adjust_comissao(new, old):\n if not old.dat_extincao and not old.dat_fim_comissao:\n new.ativa = True\n elif old.dat_extincao and date.today() < new.data_extincao or \\\n old.dat_fim_comissao and date.today() < new.data_fim_comissao:\n new.ativa = True\n else:\n new.ativa = False\n\n\nAJUSTE_ANTES_SALVAR = {\n Autor: adjust_autor,\n TipoAutor: adjust_tipo_autor,\n AcompanhamentoMateria: adjust_acompanhamentomateria,\n Comissao: adjust_comissao,\n DocumentoAdministrativo: adjust_documentoadministrativo,\n Mandato: adjust_mandato,\n NormaJuridica: adjust_normajuridica_antes_salvar,\n NormaRelacionada: adjust_normarelacionada,\n OrdemDia: adjust_ordemdia_antes_salvar,\n Parlamentar: adjust_parlamentar,\n Participacao: adjust_participacao,\n Proposicao: adjust_proposicao_antes_salvar,\n Protocolo: adjust_protocolo_antes_salvar,\n RegistroVotacao: adjust_registrovotacao_antes_salvar,\n TipoAfastamento: adjust_tipoafastamento,\n TipoProposicao: adjust_tipoproposicao,\n StatusTramitacao: adjust_statustramitacao,\n StatusTramitacaoAdministrativo: adjust_statustramitacaoadm,\n Tramitacao: adjust_tramitacao,\n}\n\nAJUSTE_DEPOIS_SALVAR = {\n NormaJuridica: adjust_normajuridica_depois_salvar,\n OrdemDia: adjust_ordemdia_depois_salvar,\n Proposicao: adjust_proposicao_depois_salvar,\n Protocolo: adjust_protocolo_depois_salvar,\n RegistroVotacao: adjust_registrovotacao_depois_salvar,\n}\n\n# CHECKS ####################################################################\n\n\ndef get_ind_excluido(new):\n legacy_model = legacy_app.get_model(type(new).__name__)\n old = legacy_model.objects.get(**{legacy_model._meta.pk.name: new.id})\n return getattr(old, 'ind_excluido', False)\n\n\ndef check_app_no_ind_excluido(app):\n for model in app.models.values():\n assert not any(get_ind_excluido(new) for new in model.objects.all())\n print('OK!')\n", "path": "sapl/legacy/migration.py" } ]
diff --git a/sapl/legacy/migration.py b/sapl/legacy/migration.py index d34cf935a..e9d4f73b1 100644 --- a/sapl/legacy/migration.py +++ b/sapl/legacy/migration.py @@ -259,6 +259,9 @@ def excluir_registrovotacao_duplicados(): def delete_old(legacy_model, cols_values): + # ajuste necessário por conta de cósigos html em txt_expediente + if legacy_model.__name__ == 'ExpedienteSessaoPlenaria': + cols_values.pop('txt_expediente') def eq_clause(col, value): if value is None:
iterative__dvc-2364
status: change nothing to reproduce message If I use DVC only to version data/models and don't care about pipelines, this message: `Pipelines are up to date. Nothing to reproduce.` looks really strange. Let's change it to something more generic: `Data and pipelines are up to date.` or something similar
[ { "content": "from __future__ import unicode_literals\n\nimport logging\n\nfrom dvc.command.data_sync import CmdDataBase\nfrom dvc.utils.compat import str\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdDataStatus(CmdDataBase):\n STATUS_LEN = 20\n STATUS_INDENT = \"\\t\"\n UP_TO_DATE_MSG = \"Pipelines are up to date. Nothing to reproduce.\"\n\n def _normalize(self, s):\n s += \":\"\n assert len(s) < self.STATUS_LEN\n return s + (self.STATUS_LEN - len(s)) * \" \"\n\n def _show(self, status, indent=0):\n ind = indent * self.STATUS_INDENT\n\n if isinstance(status, str):\n logger.info(\"{}{}\".format(ind, status))\n return\n\n if isinstance(status, list):\n for entry in status:\n self._show(entry, indent)\n return\n\n assert isinstance(status, dict)\n\n for key, value in status.items():\n if isinstance(value, str):\n logger.info(\"{}{}{}\".format(ind, self._normalize(value), key))\n elif value:\n logger.info(\"{}{}:\".format(ind, key))\n self._show(value, indent + 1)\n\n def run(self):\n indent = 1 if self.args.cloud else 0\n try:\n st = self.repo.status(\n targets=self.args.targets,\n jobs=self.args.jobs,\n cloud=self.args.cloud,\n remote=self.args.remote,\n all_branches=self.args.all_branches,\n all_tags=self.args.all_tags,\n with_deps=self.args.with_deps,\n )\n if st:\n if self.args.quiet:\n return 1\n else:\n self._show(st, indent)\n else:\n logger.info(self.UP_TO_DATE_MSG)\n\n except Exception:\n logger.exception(\"failed to obtain data status\")\n return 1\n return 0\n", "path": "dvc/command/status.py" } ]
[ { "content": "from __future__ import unicode_literals\n\nimport logging\n\nfrom dvc.command.data_sync import CmdDataBase\nfrom dvc.utils.compat import str\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdDataStatus(CmdDataBase):\n STATUS_LEN = 20\n STATUS_INDENT = \"\\t\"\n UP_TO_DATE_MSG = \"Data and pipelines are up to date.\"\n\n def _normalize(self, s):\n s += \":\"\n assert len(s) < self.STATUS_LEN\n return s + (self.STATUS_LEN - len(s)) * \" \"\n\n def _show(self, status, indent=0):\n ind = indent * self.STATUS_INDENT\n\n if isinstance(status, str):\n logger.info(\"{}{}\".format(ind, status))\n return\n\n if isinstance(status, list):\n for entry in status:\n self._show(entry, indent)\n return\n\n assert isinstance(status, dict)\n\n for key, value in status.items():\n if isinstance(value, str):\n logger.info(\"{}{}{}\".format(ind, self._normalize(value), key))\n elif value:\n logger.info(\"{}{}:\".format(ind, key))\n self._show(value, indent + 1)\n\n def run(self):\n indent = 1 if self.args.cloud else 0\n try:\n st = self.repo.status(\n targets=self.args.targets,\n jobs=self.args.jobs,\n cloud=self.args.cloud,\n remote=self.args.remote,\n all_branches=self.args.all_branches,\n all_tags=self.args.all_tags,\n with_deps=self.args.with_deps,\n )\n if st:\n if self.args.quiet:\n return 1\n else:\n self._show(st, indent)\n else:\n logger.info(self.UP_TO_DATE_MSG)\n\n except Exception:\n logger.exception(\"failed to obtain data status\")\n return 1\n return 0\n", "path": "dvc/command/status.py" } ]
diff --git a/dvc/command/status.py b/dvc/command/status.py index ecb6082f25..9f6c90ab0a 100644 --- a/dvc/command/status.py +++ b/dvc/command/status.py @@ -12,7 +12,7 @@ class CmdDataStatus(CmdDataBase): STATUS_LEN = 20 STATUS_INDENT = "\t" - UP_TO_DATE_MSG = "Pipelines are up to date. Nothing to reproduce." + UP_TO_DATE_MSG = "Data and pipelines are up to date." def _normalize(self, s): s += ":"
pytorch__vision-6883
deepcopying retinanet fails ### 🐛 Describe the bug Deepcoping retinanet fails ```py from torchvision.models.detection.retinanet import retinanet_resnet50_fpn from torchvision.models.resnet import ResNet50_Weights from copy import deepcopy from torch import nn class RetinaNet(nn.Module): def __init__(self): super().__init__() self.weights_backbone = ResNet50_Weights.IMAGENET1K_V1 self.model = retinanet_resnet50_fpn(weights=None, weights_backbone=self.weights_backbone) if __name__ == '__main__': deepcopy(RetinaNet()) ``` Error: ```console le "/Users/goku/Desktop/work/repos/lightning-bolts/build/tmp2.py", line 15, in <module> deepcopy(RetinaNet()) File "/Users/goku/miniconda3/envs/lit_bolts/lib/python3.9/copy.py", line 172, in deepcopy y = _reconstruct(x, memo, *rv) File "/Users/goku/miniconda3/envs/lit_bolts/lib/python3.9/copy.py", line 270, in _reconstruct state = deepcopy(state, memo) File "/Users/goku/miniconda3/envs/lit_bolts/lib/python3.9/copy.py", line 146, in deepcopy y = copier(x, memo) File "/Users/goku/miniconda3/envs/lit_bolts/lib/python3.9/copy.py", line 230, in _deepcopy_dict y[deepcopy(key, memo)] = deepcopy(value, memo) File "/Users/goku/miniconda3/envs/lit_bolts/lib/python3.9/copy.py", line 172, in deepcopy y = _reconstruct(x, memo, *rv) File "/Users/goku/miniconda3/envs/lit_bolts/lib/python3.9/copy.py", line 264, in _reconstruct y = func(*args) File "/Users/goku/miniconda3/envs/lit_bolts/lib/python3.9/enum.py", line 384, in __call__ return cls.__new__(cls, value) File "/Users/goku/miniconda3/envs/lit_bolts/lib/python3.9/enum.py", line 702, in __new__ raise ve_exc ValueError: Weights(url='https://download.pytorch.org/models/resnet50-0676ba61.pth', transforms=functools.partial(<class 'torchvision.transforms._presets.ImageClassification'>, crop_size=224), meta={'min_size': (1, 1), 'categories': ['tench', 'goldfish', 'great white shark', ...}}, '_docs': 'These weights reproduce closely the results of the paper using a simple training recipe.'}) is not a valid ResNet50_Weights ``` In short this fails: ```python from copy import deepcopy from torchvision.models.resnet import ResNet50_Weights deepcopy(ResNet50_Weights.IMAGENET1K_V1) ``` ### Versions ```console Collecting environment information... PyTorch version: 1.13.0 Is debug build: False CUDA used to build PyTorch: None ROCM used to build PyTorch: N/A OS: macOS 11.6 (x86_64) GCC version: Could not collect Clang version: 13.0.0 (clang-1300.0.29.3) CMake version: version 3.21.3 Libc version: N/A Python version: 3.9.13 (main, Oct 13 2022, 16:12:30) [Clang 12.0.0 ] (64- Python platform: macOS-10.16-x86_64-i386-64bit Is CUDA available: False CUDA runtime version: No CUDA CUDA_MODULE_LOADING set to: N/A GPU models and configuration: No CUDA Nvidia driver version: No CUDA cuDNN version: No CUDA HIP runtime version: N/A MIOpen runtime version: N/A Is XNNPACK available: True Versions of relevant libraries: [pip3] numpy==1.23.4 [pip3] pytorch-lightning==1.8.0rc1 [pip3] torch==1.13.0 [pip3] torchmetrics==0.10.1 [pip3] torchvision==0.14.0 [conda] numpy 1.23.4 pypi_0 pypi [conda] pytorch-lightning 1.8.0rc1 pypi_0 pypi [conda] torch 1.13.0 pypi_0 pypi [conda] torchmetrics 0.10.1 pypi_0 pypi [conda] torchvision 0.14.0 pypi_0 pypi ```
[ { "content": "import importlib\nimport inspect\nimport sys\nfrom dataclasses import dataclass, fields\nfrom inspect import signature\nfrom types import ModuleType\nfrom typing import Any, Callable, cast, Dict, List, Mapping, Optional, TypeVar, Union\n\nfrom torch import nn\n\nfrom torchvision._utils import StrEnum\n\nfrom .._internally_replaced_utils import load_state_dict_from_url\n\n\n__all__ = [\"WeightsEnum\", \"Weights\", \"get_model\", \"get_model_builder\", \"get_model_weights\", \"get_weight\", \"list_models\"]\n\n\n@dataclass\nclass Weights:\n \"\"\"\n This class is used to group important attributes associated with the pre-trained weights.\n\n Args:\n url (str): The location where we find the weights.\n transforms (Callable): A callable that constructs the preprocessing method (or validation preset transforms)\n needed to use the model. The reason we attach a constructor method rather than an already constructed\n object is because the specific object might have memory and thus we want to delay initialization until\n needed.\n meta (Dict[str, Any]): Stores meta-data related to the weights of the model and its configuration. These can be\n informative attributes (for example the number of parameters/flops, recipe link/methods used in training\n etc), configuration parameters (for example the `num_classes`) needed to construct the model or important\n meta-data (for example the `classes` of a classification model) needed to use the model.\n \"\"\"\n\n url: str\n transforms: Callable\n meta: Dict[str, Any]\n\n\nclass WeightsEnum(StrEnum):\n \"\"\"\n This class is the parent class of all model weights. Each model building method receives an optional `weights`\n parameter with its associated pre-trained weights. It inherits from `Enum` and its values should be of type\n `Weights`.\n\n Args:\n value (Weights): The data class entry with the weight information.\n \"\"\"\n\n def __init__(self, value: Weights):\n self._value_ = value\n\n @classmethod\n def verify(cls, obj: Any) -> Any:\n if obj is not None:\n if type(obj) is str:\n obj = cls.from_str(obj.replace(cls.__name__ + \".\", \"\"))\n elif not isinstance(obj, cls):\n raise TypeError(\n f\"Invalid Weight class provided; expected {cls.__name__} but received {obj.__class__.__name__}.\"\n )\n return obj\n\n def get_state_dict(self, progress: bool) -> Mapping[str, Any]:\n return load_state_dict_from_url(self.url, progress=progress)\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}.{self._name_}\"\n\n def __getattr__(self, name):\n # Be able to fetch Weights attributes directly\n for f in fields(Weights):\n if f.name == name:\n return object.__getattribute__(self.value, name)\n return super().__getattr__(name)\n\n\ndef get_weight(name: str) -> WeightsEnum:\n \"\"\"\n Gets the weights enum value by its full name. Example: \"ResNet50_Weights.IMAGENET1K_V1\"\n\n .. betastatus:: function\n\n Args:\n name (str): The name of the weight enum entry.\n\n Returns:\n WeightsEnum: The requested weight enum.\n \"\"\"\n try:\n enum_name, value_name = name.split(\".\")\n except ValueError:\n raise ValueError(f\"Invalid weight name provided: '{name}'.\")\n\n base_module_name = \".\".join(sys.modules[__name__].__name__.split(\".\")[:-1])\n base_module = importlib.import_module(base_module_name)\n model_modules = [base_module] + [\n x[1] for x in inspect.getmembers(base_module, inspect.ismodule) if x[1].__file__.endswith(\"__init__.py\")\n ]\n\n weights_enum = None\n for m in model_modules:\n potential_class = m.__dict__.get(enum_name, None)\n if potential_class is not None and issubclass(potential_class, WeightsEnum):\n weights_enum = potential_class\n break\n\n if weights_enum is None:\n raise ValueError(f\"The weight enum '{enum_name}' for the specific method couldn't be retrieved.\")\n\n return weights_enum.from_str(value_name)\n\n\ndef get_model_weights(name: Union[Callable, str]) -> WeightsEnum:\n \"\"\"\n Retuns the weights enum class associated to the given model.\n\n .. betastatus:: function\n\n Args:\n name (callable or str): The model builder function or the name under which it is registered.\n\n Returns:\n weights_enum (WeightsEnum): The weights enum class associated with the model.\n \"\"\"\n model = get_model_builder(name) if isinstance(name, str) else name\n return _get_enum_from_fn(model)\n\n\ndef _get_enum_from_fn(fn: Callable) -> WeightsEnum:\n \"\"\"\n Internal method that gets the weight enum of a specific model builder method.\n\n Args:\n fn (Callable): The builder method used to create the model.\n weight_name (str): The name of the weight enum entry of the specific model.\n Returns:\n WeightsEnum: The requested weight enum.\n \"\"\"\n sig = signature(fn)\n if \"weights\" not in sig.parameters:\n raise ValueError(\"The method is missing the 'weights' argument.\")\n\n ann = signature(fn).parameters[\"weights\"].annotation\n weights_enum = None\n if isinstance(ann, type) and issubclass(ann, WeightsEnum):\n weights_enum = ann\n else:\n # handle cases like Union[Optional, T]\n # TODO: Replace ann.__args__ with typing.get_args(ann) after python >= 3.8\n for t in ann.__args__: # type: ignore[union-attr]\n if isinstance(t, type) and issubclass(t, WeightsEnum):\n weights_enum = t\n break\n\n if weights_enum is None:\n raise ValueError(\n \"The WeightsEnum class for the specific method couldn't be retrieved. Make sure the typing info is correct.\"\n )\n\n return cast(WeightsEnum, weights_enum)\n\n\nM = TypeVar(\"M\", bound=nn.Module)\n\nBUILTIN_MODELS = {}\n\n\ndef register_model(name: Optional[str] = None) -> Callable[[Callable[..., M]], Callable[..., M]]:\n def wrapper(fn: Callable[..., M]) -> Callable[..., M]:\n key = name if name is not None else fn.__name__\n if key in BUILTIN_MODELS:\n raise ValueError(f\"An entry is already registered under the name '{key}'.\")\n BUILTIN_MODELS[key] = fn\n return fn\n\n return wrapper\n\n\ndef list_models(module: Optional[ModuleType] = None) -> List[str]:\n \"\"\"\n Returns a list with the names of registered models.\n\n .. betastatus:: function\n\n Args:\n module (ModuleType, optional): The module from which we want to extract the available models.\n\n Returns:\n models (list): A list with the names of available models.\n \"\"\"\n models = [\n k for k, v in BUILTIN_MODELS.items() if module is None or v.__module__.rsplit(\".\", 1)[0] == module.__name__\n ]\n return sorted(models)\n\n\ndef get_model_builder(name: str) -> Callable[..., nn.Module]:\n \"\"\"\n Gets the model name and returns the model builder method.\n\n .. betastatus:: function\n\n Args:\n name (str): The name under which the model is registered.\n\n Returns:\n fn (Callable): The model builder method.\n \"\"\"\n name = name.lower()\n try:\n fn = BUILTIN_MODELS[name]\n except KeyError:\n raise ValueError(f\"Unknown model {name}\")\n return fn\n\n\ndef get_model(name: str, **config: Any) -> nn.Module:\n \"\"\"\n Gets the model name and configuration and returns an instantiated model.\n\n .. betastatus:: function\n\n Args:\n name (str): The name under which the model is registered.\n **config (Any): parameters passed to the model builder method.\n\n Returns:\n model (nn.Module): The initialized model.\n \"\"\"\n fn = get_model_builder(name)\n return fn(**config)\n", "path": "torchvision/models/_api.py" } ]
[ { "content": "import importlib\nimport inspect\nimport sys\nfrom dataclasses import dataclass, fields\nfrom inspect import signature\nfrom types import ModuleType\nfrom typing import Any, Callable, cast, Dict, List, Mapping, Optional, TypeVar, Union\n\nfrom torch import nn\n\nfrom torchvision._utils import StrEnum\n\nfrom .._internally_replaced_utils import load_state_dict_from_url\n\n\n__all__ = [\"WeightsEnum\", \"Weights\", \"get_model\", \"get_model_builder\", \"get_model_weights\", \"get_weight\", \"list_models\"]\n\n\n@dataclass\nclass Weights:\n \"\"\"\n This class is used to group important attributes associated with the pre-trained weights.\n\n Args:\n url (str): The location where we find the weights.\n transforms (Callable): A callable that constructs the preprocessing method (or validation preset transforms)\n needed to use the model. The reason we attach a constructor method rather than an already constructed\n object is because the specific object might have memory and thus we want to delay initialization until\n needed.\n meta (Dict[str, Any]): Stores meta-data related to the weights of the model and its configuration. These can be\n informative attributes (for example the number of parameters/flops, recipe link/methods used in training\n etc), configuration parameters (for example the `num_classes`) needed to construct the model or important\n meta-data (for example the `classes` of a classification model) needed to use the model.\n \"\"\"\n\n url: str\n transforms: Callable\n meta: Dict[str, Any]\n\n\nclass WeightsEnum(StrEnum):\n \"\"\"\n This class is the parent class of all model weights. Each model building method receives an optional `weights`\n parameter with its associated pre-trained weights. It inherits from `Enum` and its values should be of type\n `Weights`.\n\n Args:\n value (Weights): The data class entry with the weight information.\n \"\"\"\n\n def __init__(self, value: Weights):\n self._value_ = value\n\n @classmethod\n def verify(cls, obj: Any) -> Any:\n if obj is not None:\n if type(obj) is str:\n obj = cls.from_str(obj.replace(cls.__name__ + \".\", \"\"))\n elif not isinstance(obj, cls):\n raise TypeError(\n f\"Invalid Weight class provided; expected {cls.__name__} but received {obj.__class__.__name__}.\"\n )\n return obj\n\n def get_state_dict(self, progress: bool) -> Mapping[str, Any]:\n return load_state_dict_from_url(self.url, progress=progress)\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}.{self._name_}\"\n\n def __getattr__(self, name):\n # Be able to fetch Weights attributes directly\n for f in fields(Weights):\n if f.name == name:\n return object.__getattribute__(self.value, name)\n return super().__getattr__(name)\n\n def __deepcopy__(self, memodict=None):\n return self\n\n\ndef get_weight(name: str) -> WeightsEnum:\n \"\"\"\n Gets the weights enum value by its full name. Example: \"ResNet50_Weights.IMAGENET1K_V1\"\n\n .. betastatus:: function\n\n Args:\n name (str): The name of the weight enum entry.\n\n Returns:\n WeightsEnum: The requested weight enum.\n \"\"\"\n try:\n enum_name, value_name = name.split(\".\")\n except ValueError:\n raise ValueError(f\"Invalid weight name provided: '{name}'.\")\n\n base_module_name = \".\".join(sys.modules[__name__].__name__.split(\".\")[:-1])\n base_module = importlib.import_module(base_module_name)\n model_modules = [base_module] + [\n x[1] for x in inspect.getmembers(base_module, inspect.ismodule) if x[1].__file__.endswith(\"__init__.py\")\n ]\n\n weights_enum = None\n for m in model_modules:\n potential_class = m.__dict__.get(enum_name, None)\n if potential_class is not None and issubclass(potential_class, WeightsEnum):\n weights_enum = potential_class\n break\n\n if weights_enum is None:\n raise ValueError(f\"The weight enum '{enum_name}' for the specific method couldn't be retrieved.\")\n\n return weights_enum.from_str(value_name)\n\n\ndef get_model_weights(name: Union[Callable, str]) -> WeightsEnum:\n \"\"\"\n Retuns the weights enum class associated to the given model.\n\n .. betastatus:: function\n\n Args:\n name (callable or str): The model builder function or the name under which it is registered.\n\n Returns:\n weights_enum (WeightsEnum): The weights enum class associated with the model.\n \"\"\"\n model = get_model_builder(name) if isinstance(name, str) else name\n return _get_enum_from_fn(model)\n\n\ndef _get_enum_from_fn(fn: Callable) -> WeightsEnum:\n \"\"\"\n Internal method that gets the weight enum of a specific model builder method.\n\n Args:\n fn (Callable): The builder method used to create the model.\n weight_name (str): The name of the weight enum entry of the specific model.\n Returns:\n WeightsEnum: The requested weight enum.\n \"\"\"\n sig = signature(fn)\n if \"weights\" not in sig.parameters:\n raise ValueError(\"The method is missing the 'weights' argument.\")\n\n ann = signature(fn).parameters[\"weights\"].annotation\n weights_enum = None\n if isinstance(ann, type) and issubclass(ann, WeightsEnum):\n weights_enum = ann\n else:\n # handle cases like Union[Optional, T]\n # TODO: Replace ann.__args__ with typing.get_args(ann) after python >= 3.8\n for t in ann.__args__: # type: ignore[union-attr]\n if isinstance(t, type) and issubclass(t, WeightsEnum):\n weights_enum = t\n break\n\n if weights_enum is None:\n raise ValueError(\n \"The WeightsEnum class for the specific method couldn't be retrieved. Make sure the typing info is correct.\"\n )\n\n return cast(WeightsEnum, weights_enum)\n\n\nM = TypeVar(\"M\", bound=nn.Module)\n\nBUILTIN_MODELS = {}\n\n\ndef register_model(name: Optional[str] = None) -> Callable[[Callable[..., M]], Callable[..., M]]:\n def wrapper(fn: Callable[..., M]) -> Callable[..., M]:\n key = name if name is not None else fn.__name__\n if key in BUILTIN_MODELS:\n raise ValueError(f\"An entry is already registered under the name '{key}'.\")\n BUILTIN_MODELS[key] = fn\n return fn\n\n return wrapper\n\n\ndef list_models(module: Optional[ModuleType] = None) -> List[str]:\n \"\"\"\n Returns a list with the names of registered models.\n\n .. betastatus:: function\n\n Args:\n module (ModuleType, optional): The module from which we want to extract the available models.\n\n Returns:\n models (list): A list with the names of available models.\n \"\"\"\n models = [\n k for k, v in BUILTIN_MODELS.items() if module is None or v.__module__.rsplit(\".\", 1)[0] == module.__name__\n ]\n return sorted(models)\n\n\ndef get_model_builder(name: str) -> Callable[..., nn.Module]:\n \"\"\"\n Gets the model name and returns the model builder method.\n\n .. betastatus:: function\n\n Args:\n name (str): The name under which the model is registered.\n\n Returns:\n fn (Callable): The model builder method.\n \"\"\"\n name = name.lower()\n try:\n fn = BUILTIN_MODELS[name]\n except KeyError:\n raise ValueError(f\"Unknown model {name}\")\n return fn\n\n\ndef get_model(name: str, **config: Any) -> nn.Module:\n \"\"\"\n Gets the model name and configuration and returns an instantiated model.\n\n .. betastatus:: function\n\n Args:\n name (str): The name under which the model is registered.\n **config (Any): parameters passed to the model builder method.\n\n Returns:\n model (nn.Module): The initialized model.\n \"\"\"\n fn = get_model_builder(name)\n return fn(**config)\n", "path": "torchvision/models/_api.py" } ]
diff --git a/test/test_extended_models.py b/test/test_extended_models.py index c467564c9c4..2cd8a568113 100644 --- a/test/test_extended_models.py +++ b/test/test_extended_models.py @@ -1,3 +1,4 @@ +import copy import os import pytest @@ -59,6 +60,25 @@ def test_get_model_weights(name, weight): assert models.get_model_weights(name) == weight [email protected]("copy_fn", [copy.copy, copy.deepcopy]) [email protected]( + "name", + [ + "resnet50", + "retinanet_resnet50_fpn_v2", + "raft_large", + "quantized_resnet50", + "lraspp_mobilenet_v3_large", + "mvit_v1_b", + ], +) +def test_weights_copyable(copy_fn, name): + model_weights = models.get_model_weights(name) + for weights in list(model_weights): + copied_weights = copy_fn(weights) + assert copied_weights is weights + + @pytest.mark.parametrize( "module", [models, models.detection, models.quantization, models.segmentation, models.video, models.optical_flow] ) diff --git a/torchvision/models/_api.py b/torchvision/models/_api.py index 52ac070e6d3..d550594c5b7 100644 --- a/torchvision/models/_api.py +++ b/torchvision/models/_api.py @@ -75,6 +75,9 @@ def __getattr__(self, name): return object.__getattribute__(self.value, name) return super().__getattr__(name) + def __deepcopy__(self, memodict=None): + return self + def get_weight(name: str) -> WeightsEnum: """
ManimCommunity__manim-907
Sound in manimce I want to add sound to my video in manimce like manim by 3b1b I use the following code -: `self.add_sound(" sound file name in assets folder", gain = value)` But it's giving an error in manimce ![20210102172136](https://user-images.githubusercontent.com/76893489/103471739-ab14a200-4da9-11eb-978d-74ff95788ee5.jpg) I think there will be some other syntax in manimce. Please resolve this!!
[ { "content": "\"\"\"Basic canvas for animations.\"\"\"\n\n\n__all__ = [\"Scene\"]\n\n\nimport inspect\nimport random\nimport warnings\nimport platform\nimport copy\nimport string\nimport types\n\nfrom tqdm import tqdm\nimport numpy as np\n\nfrom .. import config, logger\nfrom ..animation.animation import Animation, Wait\nfrom ..animation.transform import MoveToTarget, _MethodAnimation\nfrom ..camera.camera import Camera\nfrom ..constants import *\nfrom ..container import Container\nfrom ..mobject.mobject import Mobject, _AnimationBuilder\nfrom ..utils.iterables import list_update, list_difference_update\nfrom ..utils.family import extract_mobject_family_members\nfrom ..renderer.cairo_renderer import CairoRenderer\nfrom ..utils.exceptions import EndSceneEarlyException\n\n\nclass Scene(Container):\n \"\"\"A Scene is the canvas of your animation.\n\n The primary role of :class:`Scene` is to provide the user with tools to manage\n mobjects and animations. Generally speaking, a manim script consists of a class\n that derives from :class:`Scene` whose :meth:`Scene.construct` method is overridden\n by the user's code.\n\n Mobjects are displayed on screen by calling :meth:`Scene.add` and removed from\n screen by calling :meth:`Scene.remove`. All mobjects currently on screen are kept\n in :attr:`Scene.mobjects`. Animations are played by calling :meth:`Scene.play`.\n\n A :class:`Scene` is rendered internally by calling :meth:`Scene.render`. This in\n turn calls :meth:`Scene.setup`, :meth:`Scene.construct`, and\n :meth:`Scene.tear_down`, in that order.\n\n It is not recommended to override the ``__init__`` method in user Scenes. For code\n that should be ran before a Scene is rendered, use :meth:`Scene.setup` instead.\n\n\n Examples\n --------\n Override the :meth:`Scene.construct` method with your code.\n\n .. code-block:: python\n\n class MyScene(Scene):\n def construct(self):\n self.play(Write(Text(\"Hello World!\")))\n\n \"\"\"\n\n def __init__(\n self,\n renderer=None,\n camera_class=Camera,\n always_update_mobjects=False,\n random_seed=0,\n **kwargs,\n ):\n self.camera_class = camera_class\n self.always_update_mobjects = always_update_mobjects\n self.random_seed = random_seed\n\n self.animations = None\n self.stop_condition = None\n self.moving_mobjects = None\n self.static_mobjects = None\n self.time_progression = None\n self.duration = None\n self.last_t = None\n\n if renderer is None:\n self.renderer = CairoRenderer(\n camera_class=self.camera_class,\n skip_animations=kwargs.get(\"skip_animations\", False),\n )\n else:\n self.renderer = renderer\n self.renderer.init_scene(self)\n\n self.mobjects = []\n # TODO, remove need for foreground mobjects\n self.foreground_mobjects = []\n if self.random_seed is not None:\n random.seed(self.random_seed)\n np.random.seed(self.random_seed)\n\n Container.__init__(self, **kwargs)\n\n @property\n def camera(self):\n return self.renderer.camera\n\n def __deepcopy__(self, clone_from_id):\n cls = self.__class__\n result = cls.__new__(cls)\n clone_from_id[id(self)] = result\n for k, v in self.__dict__.items():\n if k in [\"renderer\", \"time_progression\"]:\n continue\n if k == \"camera_class\":\n setattr(result, k, v)\n setattr(result, k, copy.deepcopy(v, clone_from_id))\n\n # Update updaters\n for mobject in self.mobjects:\n cloned_updaters = []\n for updater in mobject.updaters:\n # Make the cloned updater use the cloned Mobjects as free variables\n # rather than the original ones. Analyzing function bytecode with the\n # dis module will help in understanding this.\n # https://docs.python.org/3/library/dis.html\n # TODO: Do the same for function calls recursively.\n free_variable_map = inspect.getclosurevars(updater).nonlocals\n cloned_co_freevars = []\n cloned_closure = []\n for i, free_variable_name in enumerate(updater.__code__.co_freevars):\n free_variable_value = free_variable_map[free_variable_name]\n\n # If the referenced variable has not been cloned, raise.\n if id(free_variable_value) not in clone_from_id:\n raise Exception(\n f\"{free_variable_name} is referenced from an updater \"\n \"but is not an attribute of the Scene, which isn't \"\n \"allowed.\"\n )\n\n # Add the cloned object's name to the free variable list.\n cloned_co_freevars.append(free_variable_name)\n\n # Add a cell containing the cloned object's reference to the\n # closure list.\n cloned_closure.append(\n types.CellType(clone_from_id[id(free_variable_value)])\n )\n\n cloned_updater = types.FunctionType(\n updater.__code__.replace(co_freevars=tuple(cloned_co_freevars)),\n updater.__globals__,\n updater.__name__,\n updater.__defaults__,\n tuple(cloned_closure),\n )\n cloned_updaters.append(cloned_updater)\n clone_from_id[id(mobject)].updaters = cloned_updaters\n return result\n\n def render(self):\n \"\"\"\n Render this Scene.\n \"\"\"\n self.setup()\n try:\n self.construct()\n except EndSceneEarlyException:\n pass\n self.tear_down()\n # We have to reset these settings in case of multiple renders.\n self.renderer.scene_finished(self)\n logger.info(\n f\"Rendered {str(self)}\\nPlayed {self.renderer.num_plays} animations\"\n )\n\n def setup(self):\n \"\"\"\n This is meant to be implemented by any scenes which\n are commonly subclassed, and have some common setup\n involved before the construct method is called.\n \"\"\"\n pass\n\n def tear_down(self):\n \"\"\"\n This is meant to be implemented by any scenes which\n are commonly subclassed, and have some common method\n to be invoked before the scene ends.\n \"\"\"\n pass\n\n def construct(self):\n \"\"\"Add content to the Scene.\n\n From within :meth:`Scene.construct`, display mobjects on screen by calling\n :meth:`Scene.add` and remove them from screen by calling :meth:`Scene.remove`.\n All mobjects currently on screen are kept in :attr:`Scene.mobjects`. Play\n animations by calling :meth:`Scene.play`.\n\n Notes\n -----\n Initialization code should go in :meth:`Scene.setup`. Termination code should\n go in :meth:`Scene.tear_down`.\n\n Examples\n --------\n A typical manim script includes a class derived from :class:`Scene` with an\n overridden :meth:`Scene.contruct` method:\n\n .. code-block:: python\n\n class MyScene(Scene):\n def construct(self):\n self.play(Write(Text(\"Hello World!\")))\n\n See Also\n --------\n :meth:`Scene.setup`\n :meth:`Scene.render`\n :meth:`Scene.tear_down`\n\n \"\"\"\n pass # To be implemented in subclasses\n\n def __str__(self):\n return self.__class__.__name__\n\n def get_attrs(self, *keys):\n \"\"\"\n Gets attributes of a scene given the attribute's identifier/name.\n\n Parameters\n ----------\n *keys : str\n Name(s) of the argument(s) to return the attribute of.\n\n Returns\n -------\n list\n List of attributes of the passed identifiers.\n \"\"\"\n return [getattr(self, key) for key in keys]\n\n def update_mobjects(self, dt):\n \"\"\"\n Begins updating all mobjects in the Scene.\n\n Parameters\n ----------\n dt: int or float\n Change in time between updates. Defaults (mostly) to 1/frames_per_second\n \"\"\"\n for mobject in self.mobjects:\n mobject.update(dt)\n\n def should_update_mobjects(self):\n \"\"\"\n Returns True if any mobject in Scene is being updated\n or if the scene has always_update_mobjects set to true.\n\n Returns\n -------\n bool\n \"\"\"\n return self.always_update_mobjects or any(\n [mob.has_time_based_updater() for mob in self.get_mobject_family_members()]\n )\n\n def get_top_level_mobjects(self):\n \"\"\"\n Returns all mobjects which are not submobjects.\n\n Returns\n -------\n list\n List of top level mobjects.\n \"\"\"\n # Return only those which are not in the family\n # of another mobject from the scene\n families = [m.get_family() for m in self.mobjects]\n\n def is_top_level(mobject):\n num_families = sum([(mobject in family) for family in families])\n return num_families == 1\n\n return list(filter(is_top_level, self.mobjects))\n\n def get_mobject_family_members(self):\n \"\"\"\n Returns list of family-members of all mobjects in scene.\n If a Circle() and a VGroup(Rectangle(),Triangle()) were added,\n it returns not only the Circle(), Rectangle() and Triangle(), but\n also the VGroup() object.\n\n Returns\n -------\n list\n List of mobject family members.\n \"\"\"\n return extract_mobject_family_members(\n self.mobjects, use_z_index=self.renderer.camera.use_z_index\n )\n\n def add(self, *mobjects):\n \"\"\"\n Mobjects will be displayed, from background to\n foreground in the order with which they are added.\n\n Parameters\n ---------\n *mobjects : Mobject\n Mobjects to add.\n\n Returns\n -------\n Scene\n The same scene after adding the Mobjects in.\n\n \"\"\"\n mobjects = [*mobjects, *self.foreground_mobjects]\n self.restructure_mobjects(to_remove=mobjects)\n self.mobjects += mobjects\n if self.moving_mobjects:\n self.restructure_mobjects(\n to_remove=mobjects, mobject_list_name=\"moving_mobjects\"\n )\n self.moving_mobjects += mobjects\n return self\n\n def add_mobjects_from_animations(self, animations):\n\n curr_mobjects = self.get_mobject_family_members()\n for animation in animations:\n # Anything animated that's not already in the\n # scene gets added to the scene\n mob = animation.mobject\n if mob is not None and mob not in curr_mobjects:\n self.add(mob)\n curr_mobjects += mob.get_family()\n\n def remove(self, *mobjects):\n \"\"\"\n Removes mobjects in the passed list of mobjects\n from the scene and the foreground, by removing them\n from \"mobjects\" and \"foreground_mobjects\"\n\n Parameters\n ----------\n *mobjects : Mobject\n The mobjects to remove.\n \"\"\"\n for list_name in \"mobjects\", \"foreground_mobjects\":\n self.restructure_mobjects(mobjects, list_name, False)\n return self\n\n def restructure_mobjects(\n self, to_remove, mobject_list_name=\"mobjects\", extract_families=True\n ):\n \"\"\"\n tl:wr\n If your scene has a Group(), and you removed a mobject from the Group,\n this dissolves the group and puts the rest of the mobjects directly\n in self.mobjects or self.foreground_mobjects.\n\n In cases where the scene contains a group, e.g. Group(m1, m2, m3), but one\n of its submobjects is removed, e.g. scene.remove(m1), the list of mobjects\n will be edited to contain other submobjects, but not m1, e.g. it will now\n insert m2 and m3 to where the group once was.\n\n Parameters\n ----------\n to_remove : Mobject\n The Mobject to remove.\n\n mobject_list_name : str, optional\n The list of mobjects (\"mobjects\", \"foreground_mobjects\" etc) to remove from.\n\n extract_families : bool, optional\n Whether the mobject's families should be recursively extracted.\n\n Returns\n -------\n Scene\n The Scene mobject with restructured Mobjects.\n \"\"\"\n if extract_families:\n to_remove = extract_mobject_family_members(\n to_remove, use_z_index=self.renderer.camera.use_z_index\n )\n _list = getattr(self, mobject_list_name)\n new_list = self.get_restructured_mobject_list(_list, to_remove)\n setattr(self, mobject_list_name, new_list)\n return self\n\n def get_restructured_mobject_list(self, mobjects, to_remove):\n \"\"\"\n Given a list of mobjects and a list of mobjects to be removed, this\n filters out the removable mobjects from the list of mobjects.\n\n Parameters\n ----------\n\n mobjects : list\n The Mobjects to check.\n\n to_remove : list\n The list of mobjects to remove.\n\n Returns\n -------\n list\n The list of mobjects with the mobjects to remove removed.\n \"\"\"\n\n new_mobjects = []\n\n def add_safe_mobjects_from_list(list_to_examine, set_to_remove):\n for mob in list_to_examine:\n if mob in set_to_remove:\n continue\n intersect = set_to_remove.intersection(mob.get_family())\n if intersect:\n add_safe_mobjects_from_list(mob.submobjects, intersect)\n else:\n new_mobjects.append(mob)\n\n add_safe_mobjects_from_list(mobjects, set(to_remove))\n return new_mobjects\n\n # TODO, remove this, and calls to this\n def add_foreground_mobjects(self, *mobjects):\n \"\"\"\n Adds mobjects to the foreground, and internally to the list\n foreground_mobjects, and mobjects.\n\n Parameters\n ----------\n *mobjects : Mobject\n The Mobjects to add to the foreground.\n\n Returns\n ------\n Scene\n The Scene, with the foreground mobjects added.\n \"\"\"\n self.foreground_mobjects = list_update(self.foreground_mobjects, mobjects)\n self.add(*mobjects)\n return self\n\n def add_foreground_mobject(self, mobject):\n \"\"\"\n Adds a single mobject to the foreground, and internally to the list\n foreground_mobjects, and mobjects.\n\n Parameters\n ----------\n mobject : Mobject\n The Mobject to add to the foreground.\n\n Returns\n ------\n Scene\n The Scene, with the foreground mobject added.\n \"\"\"\n return self.add_foreground_mobjects(mobject)\n\n def remove_foreground_mobjects(self, *to_remove):\n \"\"\"\n Removes mobjects from the foreground, and internally from the list\n foreground_mobjects.\n\n Parameters\n ----------\n *to_remove : Mobject\n The mobject(s) to remove from the foreground.\n\n Returns\n ------\n Scene\n The Scene, with the foreground mobjects removed.\n \"\"\"\n self.restructure_mobjects(to_remove, \"foreground_mobjects\")\n return self\n\n def remove_foreground_mobject(self, mobject):\n \"\"\"\n Removes a single mobject from the foreground, and internally from the list\n foreground_mobjects.\n\n Parameters\n ----------\n mobject : Mobject\n The mobject to remove from the foreground.\n\n Returns\n ------\n Scene\n The Scene, with the foreground mobject removed.\n \"\"\"\n return self.remove_foreground_mobjects(mobject)\n\n def bring_to_front(self, *mobjects):\n \"\"\"\n Adds the passed mobjects to the scene again,\n pushing them to he front of the scene.\n\n Parameters\n ----------\n *mobjects : Mobject\n The mobject(s) to bring to the front of the scene.\n\n Returns\n ------\n Scene\n The Scene, with the mobjects brought to the front\n of the scene.\n \"\"\"\n self.add(*mobjects)\n return self\n\n def bring_to_back(self, *mobjects):\n \"\"\"\n Removes the mobject from the scene and\n adds them to the back of the scene.\n\n Parameters\n ----------\n *mobjects : Mobject\n The mobject(s) to push to the back of the scene.\n\n Returns\n ------\n Scene\n The Scene, with the mobjects pushed to the back\n of the scene.\n \"\"\"\n self.remove(*mobjects)\n self.mobjects = list(mobjects) + self.mobjects\n return self\n\n def clear(self):\n \"\"\"\n Removes all mobjects present in self.mobjects\n and self.foreground_mobjects from the scene.\n\n Returns\n ------\n Scene\n The Scene, with all of its mobjects in\n self.mobjects and self.foreground_mobjects\n removed.\n \"\"\"\n self.mobjects = []\n self.foreground_mobjects = []\n return self\n\n def get_moving_mobjects(self, *animations):\n \"\"\"\n Gets all moving mobjects in the passed animation(s).\n\n Parameters\n ----------\n *animations : Animation\n The animations to check for moving mobjects.\n\n Returns\n ------\n list\n The list of mobjects that could be moving in\n the Animation(s)\n \"\"\"\n # Go through mobjects from start to end, and\n # as soon as there's one that needs updating of\n # some kind per frame, return the list from that\n # point forward.\n animation_mobjects = [anim.mobject for anim in animations]\n mobjects = self.get_mobject_family_members()\n for i, mob in enumerate(mobjects):\n update_possibilities = [\n mob in animation_mobjects,\n len(mob.get_family_updaters()) > 0,\n mob in self.foreground_mobjects,\n ]\n if any(update_possibilities):\n return mobjects[i:]\n return []\n\n def get_moving_and_static_mobjects(self, animations):\n all_mobjects = list_update(self.mobjects, self.foreground_mobjects)\n all_mobject_families = extract_mobject_family_members(\n all_mobjects,\n use_z_index=self.renderer.camera.use_z_index,\n only_those_with_points=True,\n )\n moving_mobjects = self.get_moving_mobjects(*animations)\n all_moving_mobject_families = extract_mobject_family_members(\n moving_mobjects,\n use_z_index=self.renderer.camera.use_z_index,\n )\n static_mobjects = list_difference_update(\n all_mobject_families, all_moving_mobject_families\n )\n return all_moving_mobject_families, static_mobjects\n\n def compile_animations(self, *args, **kwargs):\n \"\"\"\n Creates _MethodAnimations from any _AnimationBuilders and updates animation\n kwargs with kwargs passed to play().\n Parameters\n ----------\n *animations : Tuple[:class:`Animation`]\n Animations to be played.\n **play_kwargs\n Configuration for the call to play().\n Returns\n -------\n Tuple[:class:`Animation`]\n Animations to be played.\n \"\"\"\n animations = []\n for arg in args:\n if isinstance(arg, _AnimationBuilder):\n animations.append(arg.build())\n elif isinstance(arg, Animation):\n animations.append(arg)\n elif inspect.ismethod(arg):\n raise TypeError(\n \"Passing Mobject methods to Scene.play is no longer supported. Use \"\n \"Mobject.animate instead.\"\n )\n else:\n raise TypeError(f\"Unexpected argument {arg} passed to Scene.play().\")\n\n for animation in animations:\n for k, v in kwargs.items():\n setattr(animation, k, v)\n\n return animations\n\n def _get_animation_time_progression(self, animations, duration):\n \"\"\"\n You will hardly use this when making your own animations.\n This method is for Manim's internal use.\n\n Uses :func:`~.get_time_progression` to obtain a\n CommandLine ProgressBar whose ``fill_time`` is\n dependent on the qualities of the passed Animation,\n\n Parameters\n ----------\n animations : List[:class:`~.Animation`, ...]\n The list of animations to get\n the time progression for.\n\n duration : int or float\n duration of wait time\n\n Returns\n -------\n time_progression\n The CommandLine Progress Bar.\n \"\"\"\n if len(animations) == 1 and isinstance(animations[0], Wait):\n stop_condition = animations[0].stop_condition\n if stop_condition is not None:\n time_progression = self.get_time_progression(\n duration,\n f\"Waiting for {stop_condition.__name__}\",\n n_iterations=-1, # So it doesn't show % progress\n override_skip_animations=True,\n )\n else:\n time_progression = self.get_time_progression(\n duration, f\"Waiting {self.renderer.num_plays}\"\n )\n else:\n time_progression = self.get_time_progression(\n duration,\n \"\".join(\n [\n f\"Animation {self.renderer.num_plays}: \",\n str(animations[0]),\n (\", etc.\" if len(animations) > 1 else \"\"),\n ]\n ),\n )\n return time_progression\n\n def get_time_progression(\n self, run_time, description, n_iterations=None, override_skip_animations=False\n ):\n \"\"\"\n You will hardly use this when making your own animations.\n This method is for Manim's internal use.\n\n Returns a CommandLine ProgressBar whose ``fill_time``\n is dependent on the ``run_time`` of an animation,\n the iterations to perform in that animation\n and a bool saying whether or not to consider\n the skipped animations.\n\n Parameters\n ----------\n run_time : float\n The ``run_time`` of the animation.\n\n n_iterations : int, optional\n The number of iterations in the animation.\n\n override_skip_animations : bool, optional\n Whether or not to show skipped animations in the progress bar.\n\n Returns\n -------\n time_progression\n The CommandLine Progress Bar.\n \"\"\"\n if self.renderer.skip_animations and not override_skip_animations:\n times = [run_time]\n else:\n step = 1 / self.renderer.camera.frame_rate\n times = np.arange(0, run_time, step)\n time_progression = tqdm(\n times,\n total=n_iterations,\n leave=config[\"leave_progress_bars\"],\n ascii=True if platform.system() == \"Windows\" else None,\n disable=not config[\"progress_bar\"],\n )\n return time_progression\n\n def get_run_time(self, animations):\n \"\"\"\n Gets the total run time for a list of animations.\n\n Parameters\n ----------\n animations : List[:class:`Animation`, ...]\n A list of the animations whose total\n ``run_time`` is to be calculated.\n\n Returns\n -------\n float\n The total ``run_time`` of all of the animations in the list.\n \"\"\"\n\n if len(animations) == 1 and isinstance(animations[0], Wait):\n if animations[0].stop_condition is not None:\n return 0\n else:\n return animations[0].duration\n\n else:\n return np.max([animation.run_time for animation in animations])\n\n def play(self, *args, **kwargs):\n self.renderer.play(self, *args, **kwargs)\n\n def wait(self, duration=DEFAULT_WAIT_TIME, stop_condition=None):\n self.play(Wait(duration=duration, stop_condition=stop_condition))\n\n def wait_until(self, stop_condition, max_time=60):\n \"\"\"\n Like a wrapper for wait().\n You pass a function that determines whether to continue waiting,\n and a max wait time if that is never fulfilled.\n\n Parameters\n ----------\n stop_condition : function\n The function whose boolean return value determines whether to continue waiting\n\n max_time : int or float, optional\n The maximum wait time in seconds, if the stop_condition is never fulfilled.\n \"\"\"\n self.wait(max_time, stop_condition=stop_condition)\n\n def compile_animation_data(self, *animations, skip_rendering=False, **play_kwargs):\n if len(animations) == 0:\n warnings.warn(\"Called Scene.play with no animations\")\n return None\n\n self.animations = self.compile_animations(*animations, **play_kwargs)\n self.add_mobjects_from_animations(self.animations)\n\n self.last_t = 0\n self.stop_condition = None\n self.moving_mobjects = None\n self.static_mobjects = None\n if len(self.animations) == 1 and isinstance(self.animations[0], Wait):\n self.update_mobjects(dt=0) # Any problems with this?\n if self.should_update_mobjects():\n # TODO, be smart about setting a static image\n # the same way Scene.play does\n self.renderer.static_image = None\n self.stop_condition = self.animations[0].stop_condition\n else:\n self.duration = self.animations[0].duration\n if not skip_rendering:\n self.add_static_frames(self.animations[0].duration)\n return None\n else:\n # Paint all non-moving objects onto the screen, so they don't\n # have to be rendered every frame\n (\n self.moving_mobjects,\n self.static_mobjects,\n ) = self.get_moving_and_static_mobjects(self.animations)\n self.renderer.save_static_frame_data(self, self.static_mobjects)\n\n self.duration = self.get_run_time(self.animations)\n self.time_progression = self._get_animation_time_progression(\n self.animations, self.duration\n )\n\n for animation in self.animations:\n animation.begin()\n\n return self\n\n def play_internal(self, skip_rendering=False):\n \"\"\"\n This method is used to prep the animations for rendering,\n apply the arguments and parameters required to them,\n render them, and write them to the video file.\n\n Parameters\n ----------\n args\n Animation or mobject with mobject method and params\n kwargs\n named parameters affecting what was passed in ``args``,\n e.g. ``run_time``, ``lag_ratio`` and so on.\n \"\"\"\n for t in self.time_progression:\n self.update_to_time(t)\n if not skip_rendering:\n self.renderer.render(self, self.moving_mobjects)\n if self.stop_condition is not None and self.stop_condition():\n self.time_progression.close()\n break\n\n for animation in self.animations:\n animation.finish()\n animation.clean_up_from_scene(self)\n self.renderer.static_image = None\n\n def update_to_time(self, t):\n dt = t - self.last_t\n self.last_t = t\n for animation in self.animations:\n animation.update_mobjects(dt)\n alpha = t / animation.run_time\n animation.interpolate(alpha)\n self.update_mobjects(dt)\n\n def add_static_frames(self, duration):\n self.renderer.update_frame(self)\n dt = 1 / self.renderer.camera.frame_rate\n self.renderer.add_frame(\n self.renderer.get_frame(),\n num_frames=int(duration / dt),\n )\n\n def add_sound(self, sound_file, time_offset=0, gain=None, **kwargs):\n \"\"\"\n This method is used to add a sound to the animation.\n\n Parameters\n ----------\n sound_file : str\n The path to the sound file.\n\n time_offset : int,float, optional\n The offset in the sound file after which\n the sound can be played.\n\n gain :\n\n \"\"\"\n if self.renderer.skip_animations:\n return\n time = self.time + time_offset\n self.renderer.file_writer.add_sound(sound_file, time, gain, **kwargs)\n", "path": "manim/scene/scene.py" } ]
[ { "content": "\"\"\"Basic canvas for animations.\"\"\"\n\n\n__all__ = [\"Scene\"]\n\n\nimport inspect\nimport random\nimport warnings\nimport platform\nimport copy\nimport string\nimport types\n\nfrom tqdm import tqdm\nimport numpy as np\n\nfrom .. import config, logger\nfrom ..animation.animation import Animation, Wait\nfrom ..animation.transform import MoveToTarget, _MethodAnimation\nfrom ..camera.camera import Camera\nfrom ..constants import *\nfrom ..container import Container\nfrom ..mobject.mobject import Mobject, _AnimationBuilder\nfrom ..utils.iterables import list_update, list_difference_update\nfrom ..utils.family import extract_mobject_family_members\nfrom ..renderer.cairo_renderer import CairoRenderer\nfrom ..utils.exceptions import EndSceneEarlyException\n\n\nclass Scene(Container):\n \"\"\"A Scene is the canvas of your animation.\n\n The primary role of :class:`Scene` is to provide the user with tools to manage\n mobjects and animations. Generally speaking, a manim script consists of a class\n that derives from :class:`Scene` whose :meth:`Scene.construct` method is overridden\n by the user's code.\n\n Mobjects are displayed on screen by calling :meth:`Scene.add` and removed from\n screen by calling :meth:`Scene.remove`. All mobjects currently on screen are kept\n in :attr:`Scene.mobjects`. Animations are played by calling :meth:`Scene.play`.\n\n A :class:`Scene` is rendered internally by calling :meth:`Scene.render`. This in\n turn calls :meth:`Scene.setup`, :meth:`Scene.construct`, and\n :meth:`Scene.tear_down`, in that order.\n\n It is not recommended to override the ``__init__`` method in user Scenes. For code\n that should be ran before a Scene is rendered, use :meth:`Scene.setup` instead.\n\n\n Examples\n --------\n Override the :meth:`Scene.construct` method with your code.\n\n .. code-block:: python\n\n class MyScene(Scene):\n def construct(self):\n self.play(Write(Text(\"Hello World!\")))\n\n \"\"\"\n\n def __init__(\n self,\n renderer=None,\n camera_class=Camera,\n always_update_mobjects=False,\n random_seed=0,\n **kwargs,\n ):\n self.camera_class = camera_class\n self.always_update_mobjects = always_update_mobjects\n self.random_seed = random_seed\n\n self.animations = None\n self.stop_condition = None\n self.moving_mobjects = None\n self.static_mobjects = None\n self.time_progression = None\n self.duration = None\n self.last_t = None\n\n if renderer is None:\n self.renderer = CairoRenderer(\n camera_class=self.camera_class,\n skip_animations=kwargs.get(\"skip_animations\", False),\n )\n else:\n self.renderer = renderer\n self.renderer.init_scene(self)\n\n self.mobjects = []\n # TODO, remove need for foreground mobjects\n self.foreground_mobjects = []\n if self.random_seed is not None:\n random.seed(self.random_seed)\n np.random.seed(self.random_seed)\n\n Container.__init__(self, **kwargs)\n\n @property\n def camera(self):\n return self.renderer.camera\n\n def __deepcopy__(self, clone_from_id):\n cls = self.__class__\n result = cls.__new__(cls)\n clone_from_id[id(self)] = result\n for k, v in self.__dict__.items():\n if k in [\"renderer\", \"time_progression\"]:\n continue\n if k == \"camera_class\":\n setattr(result, k, v)\n setattr(result, k, copy.deepcopy(v, clone_from_id))\n\n # Update updaters\n for mobject in self.mobjects:\n cloned_updaters = []\n for updater in mobject.updaters:\n # Make the cloned updater use the cloned Mobjects as free variables\n # rather than the original ones. Analyzing function bytecode with the\n # dis module will help in understanding this.\n # https://docs.python.org/3/library/dis.html\n # TODO: Do the same for function calls recursively.\n free_variable_map = inspect.getclosurevars(updater).nonlocals\n cloned_co_freevars = []\n cloned_closure = []\n for i, free_variable_name in enumerate(updater.__code__.co_freevars):\n free_variable_value = free_variable_map[free_variable_name]\n\n # If the referenced variable has not been cloned, raise.\n if id(free_variable_value) not in clone_from_id:\n raise Exception(\n f\"{free_variable_name} is referenced from an updater \"\n \"but is not an attribute of the Scene, which isn't \"\n \"allowed.\"\n )\n\n # Add the cloned object's name to the free variable list.\n cloned_co_freevars.append(free_variable_name)\n\n # Add a cell containing the cloned object's reference to the\n # closure list.\n cloned_closure.append(\n types.CellType(clone_from_id[id(free_variable_value)])\n )\n\n cloned_updater = types.FunctionType(\n updater.__code__.replace(co_freevars=tuple(cloned_co_freevars)),\n updater.__globals__,\n updater.__name__,\n updater.__defaults__,\n tuple(cloned_closure),\n )\n cloned_updaters.append(cloned_updater)\n clone_from_id[id(mobject)].updaters = cloned_updaters\n return result\n\n def render(self):\n \"\"\"\n Render this Scene.\n \"\"\"\n self.setup()\n try:\n self.construct()\n except EndSceneEarlyException:\n pass\n self.tear_down()\n # We have to reset these settings in case of multiple renders.\n self.renderer.scene_finished(self)\n logger.info(\n f\"Rendered {str(self)}\\nPlayed {self.renderer.num_plays} animations\"\n )\n\n def setup(self):\n \"\"\"\n This is meant to be implemented by any scenes which\n are commonly subclassed, and have some common setup\n involved before the construct method is called.\n \"\"\"\n pass\n\n def tear_down(self):\n \"\"\"\n This is meant to be implemented by any scenes which\n are commonly subclassed, and have some common method\n to be invoked before the scene ends.\n \"\"\"\n pass\n\n def construct(self):\n \"\"\"Add content to the Scene.\n\n From within :meth:`Scene.construct`, display mobjects on screen by calling\n :meth:`Scene.add` and remove them from screen by calling :meth:`Scene.remove`.\n All mobjects currently on screen are kept in :attr:`Scene.mobjects`. Play\n animations by calling :meth:`Scene.play`.\n\n Notes\n -----\n Initialization code should go in :meth:`Scene.setup`. Termination code should\n go in :meth:`Scene.tear_down`.\n\n Examples\n --------\n A typical manim script includes a class derived from :class:`Scene` with an\n overridden :meth:`Scene.contruct` method:\n\n .. code-block:: python\n\n class MyScene(Scene):\n def construct(self):\n self.play(Write(Text(\"Hello World!\")))\n\n See Also\n --------\n :meth:`Scene.setup`\n :meth:`Scene.render`\n :meth:`Scene.tear_down`\n\n \"\"\"\n pass # To be implemented in subclasses\n\n def __str__(self):\n return self.__class__.__name__\n\n def get_attrs(self, *keys):\n \"\"\"\n Gets attributes of a scene given the attribute's identifier/name.\n\n Parameters\n ----------\n *keys : str\n Name(s) of the argument(s) to return the attribute of.\n\n Returns\n -------\n list\n List of attributes of the passed identifiers.\n \"\"\"\n return [getattr(self, key) for key in keys]\n\n def update_mobjects(self, dt):\n \"\"\"\n Begins updating all mobjects in the Scene.\n\n Parameters\n ----------\n dt: int or float\n Change in time between updates. Defaults (mostly) to 1/frames_per_second\n \"\"\"\n for mobject in self.mobjects:\n mobject.update(dt)\n\n def should_update_mobjects(self):\n \"\"\"\n Returns True if any mobject in Scene is being updated\n or if the scene has always_update_mobjects set to true.\n\n Returns\n -------\n bool\n \"\"\"\n return self.always_update_mobjects or any(\n [mob.has_time_based_updater() for mob in self.get_mobject_family_members()]\n )\n\n def get_top_level_mobjects(self):\n \"\"\"\n Returns all mobjects which are not submobjects.\n\n Returns\n -------\n list\n List of top level mobjects.\n \"\"\"\n # Return only those which are not in the family\n # of another mobject from the scene\n families = [m.get_family() for m in self.mobjects]\n\n def is_top_level(mobject):\n num_families = sum([(mobject in family) for family in families])\n return num_families == 1\n\n return list(filter(is_top_level, self.mobjects))\n\n def get_mobject_family_members(self):\n \"\"\"\n Returns list of family-members of all mobjects in scene.\n If a Circle() and a VGroup(Rectangle(),Triangle()) were added,\n it returns not only the Circle(), Rectangle() and Triangle(), but\n also the VGroup() object.\n\n Returns\n -------\n list\n List of mobject family members.\n \"\"\"\n return extract_mobject_family_members(\n self.mobjects, use_z_index=self.renderer.camera.use_z_index\n )\n\n def add(self, *mobjects):\n \"\"\"\n Mobjects will be displayed, from background to\n foreground in the order with which they are added.\n\n Parameters\n ---------\n *mobjects : Mobject\n Mobjects to add.\n\n Returns\n -------\n Scene\n The same scene after adding the Mobjects in.\n\n \"\"\"\n mobjects = [*mobjects, *self.foreground_mobjects]\n self.restructure_mobjects(to_remove=mobjects)\n self.mobjects += mobjects\n if self.moving_mobjects:\n self.restructure_mobjects(\n to_remove=mobjects, mobject_list_name=\"moving_mobjects\"\n )\n self.moving_mobjects += mobjects\n return self\n\n def add_mobjects_from_animations(self, animations):\n\n curr_mobjects = self.get_mobject_family_members()\n for animation in animations:\n # Anything animated that's not already in the\n # scene gets added to the scene\n mob = animation.mobject\n if mob is not None and mob not in curr_mobjects:\n self.add(mob)\n curr_mobjects += mob.get_family()\n\n def remove(self, *mobjects):\n \"\"\"\n Removes mobjects in the passed list of mobjects\n from the scene and the foreground, by removing them\n from \"mobjects\" and \"foreground_mobjects\"\n\n Parameters\n ----------\n *mobjects : Mobject\n The mobjects to remove.\n \"\"\"\n for list_name in \"mobjects\", \"foreground_mobjects\":\n self.restructure_mobjects(mobjects, list_name, False)\n return self\n\n def restructure_mobjects(\n self, to_remove, mobject_list_name=\"mobjects\", extract_families=True\n ):\n \"\"\"\n tl:wr\n If your scene has a Group(), and you removed a mobject from the Group,\n this dissolves the group and puts the rest of the mobjects directly\n in self.mobjects or self.foreground_mobjects.\n\n In cases where the scene contains a group, e.g. Group(m1, m2, m3), but one\n of its submobjects is removed, e.g. scene.remove(m1), the list of mobjects\n will be edited to contain other submobjects, but not m1, e.g. it will now\n insert m2 and m3 to where the group once was.\n\n Parameters\n ----------\n to_remove : Mobject\n The Mobject to remove.\n\n mobject_list_name : str, optional\n The list of mobjects (\"mobjects\", \"foreground_mobjects\" etc) to remove from.\n\n extract_families : bool, optional\n Whether the mobject's families should be recursively extracted.\n\n Returns\n -------\n Scene\n The Scene mobject with restructured Mobjects.\n \"\"\"\n if extract_families:\n to_remove = extract_mobject_family_members(\n to_remove, use_z_index=self.renderer.camera.use_z_index\n )\n _list = getattr(self, mobject_list_name)\n new_list = self.get_restructured_mobject_list(_list, to_remove)\n setattr(self, mobject_list_name, new_list)\n return self\n\n def get_restructured_mobject_list(self, mobjects, to_remove):\n \"\"\"\n Given a list of mobjects and a list of mobjects to be removed, this\n filters out the removable mobjects from the list of mobjects.\n\n Parameters\n ----------\n\n mobjects : list\n The Mobjects to check.\n\n to_remove : list\n The list of mobjects to remove.\n\n Returns\n -------\n list\n The list of mobjects with the mobjects to remove removed.\n \"\"\"\n\n new_mobjects = []\n\n def add_safe_mobjects_from_list(list_to_examine, set_to_remove):\n for mob in list_to_examine:\n if mob in set_to_remove:\n continue\n intersect = set_to_remove.intersection(mob.get_family())\n if intersect:\n add_safe_mobjects_from_list(mob.submobjects, intersect)\n else:\n new_mobjects.append(mob)\n\n add_safe_mobjects_from_list(mobjects, set(to_remove))\n return new_mobjects\n\n # TODO, remove this, and calls to this\n def add_foreground_mobjects(self, *mobjects):\n \"\"\"\n Adds mobjects to the foreground, and internally to the list\n foreground_mobjects, and mobjects.\n\n Parameters\n ----------\n *mobjects : Mobject\n The Mobjects to add to the foreground.\n\n Returns\n ------\n Scene\n The Scene, with the foreground mobjects added.\n \"\"\"\n self.foreground_mobjects = list_update(self.foreground_mobjects, mobjects)\n self.add(*mobjects)\n return self\n\n def add_foreground_mobject(self, mobject):\n \"\"\"\n Adds a single mobject to the foreground, and internally to the list\n foreground_mobjects, and mobjects.\n\n Parameters\n ----------\n mobject : Mobject\n The Mobject to add to the foreground.\n\n Returns\n ------\n Scene\n The Scene, with the foreground mobject added.\n \"\"\"\n return self.add_foreground_mobjects(mobject)\n\n def remove_foreground_mobjects(self, *to_remove):\n \"\"\"\n Removes mobjects from the foreground, and internally from the list\n foreground_mobjects.\n\n Parameters\n ----------\n *to_remove : Mobject\n The mobject(s) to remove from the foreground.\n\n Returns\n ------\n Scene\n The Scene, with the foreground mobjects removed.\n \"\"\"\n self.restructure_mobjects(to_remove, \"foreground_mobjects\")\n return self\n\n def remove_foreground_mobject(self, mobject):\n \"\"\"\n Removes a single mobject from the foreground, and internally from the list\n foreground_mobjects.\n\n Parameters\n ----------\n mobject : Mobject\n The mobject to remove from the foreground.\n\n Returns\n ------\n Scene\n The Scene, with the foreground mobject removed.\n \"\"\"\n return self.remove_foreground_mobjects(mobject)\n\n def bring_to_front(self, *mobjects):\n \"\"\"\n Adds the passed mobjects to the scene again,\n pushing them to he front of the scene.\n\n Parameters\n ----------\n *mobjects : Mobject\n The mobject(s) to bring to the front of the scene.\n\n Returns\n ------\n Scene\n The Scene, with the mobjects brought to the front\n of the scene.\n \"\"\"\n self.add(*mobjects)\n return self\n\n def bring_to_back(self, *mobjects):\n \"\"\"\n Removes the mobject from the scene and\n adds them to the back of the scene.\n\n Parameters\n ----------\n *mobjects : Mobject\n The mobject(s) to push to the back of the scene.\n\n Returns\n ------\n Scene\n The Scene, with the mobjects pushed to the back\n of the scene.\n \"\"\"\n self.remove(*mobjects)\n self.mobjects = list(mobjects) + self.mobjects\n return self\n\n def clear(self):\n \"\"\"\n Removes all mobjects present in self.mobjects\n and self.foreground_mobjects from the scene.\n\n Returns\n ------\n Scene\n The Scene, with all of its mobjects in\n self.mobjects and self.foreground_mobjects\n removed.\n \"\"\"\n self.mobjects = []\n self.foreground_mobjects = []\n return self\n\n def get_moving_mobjects(self, *animations):\n \"\"\"\n Gets all moving mobjects in the passed animation(s).\n\n Parameters\n ----------\n *animations : Animation\n The animations to check for moving mobjects.\n\n Returns\n ------\n list\n The list of mobjects that could be moving in\n the Animation(s)\n \"\"\"\n # Go through mobjects from start to end, and\n # as soon as there's one that needs updating of\n # some kind per frame, return the list from that\n # point forward.\n animation_mobjects = [anim.mobject for anim in animations]\n mobjects = self.get_mobject_family_members()\n for i, mob in enumerate(mobjects):\n update_possibilities = [\n mob in animation_mobjects,\n len(mob.get_family_updaters()) > 0,\n mob in self.foreground_mobjects,\n ]\n if any(update_possibilities):\n return mobjects[i:]\n return []\n\n def get_moving_and_static_mobjects(self, animations):\n all_mobjects = list_update(self.mobjects, self.foreground_mobjects)\n all_mobject_families = extract_mobject_family_members(\n all_mobjects,\n use_z_index=self.renderer.camera.use_z_index,\n only_those_with_points=True,\n )\n moving_mobjects = self.get_moving_mobjects(*animations)\n all_moving_mobject_families = extract_mobject_family_members(\n moving_mobjects,\n use_z_index=self.renderer.camera.use_z_index,\n )\n static_mobjects = list_difference_update(\n all_mobject_families, all_moving_mobject_families\n )\n return all_moving_mobject_families, static_mobjects\n\n def compile_animations(self, *args, **kwargs):\n \"\"\"\n Creates _MethodAnimations from any _AnimationBuilders and updates animation\n kwargs with kwargs passed to play().\n Parameters\n ----------\n *animations : Tuple[:class:`Animation`]\n Animations to be played.\n **play_kwargs\n Configuration for the call to play().\n Returns\n -------\n Tuple[:class:`Animation`]\n Animations to be played.\n \"\"\"\n animations = []\n for arg in args:\n if isinstance(arg, _AnimationBuilder):\n animations.append(arg.build())\n elif isinstance(arg, Animation):\n animations.append(arg)\n elif inspect.ismethod(arg):\n raise TypeError(\n \"Passing Mobject methods to Scene.play is no longer supported. Use \"\n \"Mobject.animate instead.\"\n )\n else:\n raise TypeError(f\"Unexpected argument {arg} passed to Scene.play().\")\n\n for animation in animations:\n for k, v in kwargs.items():\n setattr(animation, k, v)\n\n return animations\n\n def _get_animation_time_progression(self, animations, duration):\n \"\"\"\n You will hardly use this when making your own animations.\n This method is for Manim's internal use.\n\n Uses :func:`~.get_time_progression` to obtain a\n CommandLine ProgressBar whose ``fill_time`` is\n dependent on the qualities of the passed Animation,\n\n Parameters\n ----------\n animations : List[:class:`~.Animation`, ...]\n The list of animations to get\n the time progression for.\n\n duration : int or float\n duration of wait time\n\n Returns\n -------\n time_progression\n The CommandLine Progress Bar.\n \"\"\"\n if len(animations) == 1 and isinstance(animations[0], Wait):\n stop_condition = animations[0].stop_condition\n if stop_condition is not None:\n time_progression = self.get_time_progression(\n duration,\n f\"Waiting for {stop_condition.__name__}\",\n n_iterations=-1, # So it doesn't show % progress\n override_skip_animations=True,\n )\n else:\n time_progression = self.get_time_progression(\n duration, f\"Waiting {self.renderer.num_plays}\"\n )\n else:\n time_progression = self.get_time_progression(\n duration,\n \"\".join(\n [\n f\"Animation {self.renderer.num_plays}: \",\n str(animations[0]),\n (\", etc.\" if len(animations) > 1 else \"\"),\n ]\n ),\n )\n return time_progression\n\n def get_time_progression(\n self, run_time, description, n_iterations=None, override_skip_animations=False\n ):\n \"\"\"\n You will hardly use this when making your own animations.\n This method is for Manim's internal use.\n\n Returns a CommandLine ProgressBar whose ``fill_time``\n is dependent on the ``run_time`` of an animation,\n the iterations to perform in that animation\n and a bool saying whether or not to consider\n the skipped animations.\n\n Parameters\n ----------\n run_time : float\n The ``run_time`` of the animation.\n\n n_iterations : int, optional\n The number of iterations in the animation.\n\n override_skip_animations : bool, optional\n Whether or not to show skipped animations in the progress bar.\n\n Returns\n -------\n time_progression\n The CommandLine Progress Bar.\n \"\"\"\n if self.renderer.skip_animations and not override_skip_animations:\n times = [run_time]\n else:\n step = 1 / self.renderer.camera.frame_rate\n times = np.arange(0, run_time, step)\n time_progression = tqdm(\n times,\n total=n_iterations,\n leave=config[\"leave_progress_bars\"],\n ascii=True if platform.system() == \"Windows\" else None,\n disable=not config[\"progress_bar\"],\n )\n return time_progression\n\n def get_run_time(self, animations):\n \"\"\"\n Gets the total run time for a list of animations.\n\n Parameters\n ----------\n animations : List[:class:`Animation`, ...]\n A list of the animations whose total\n ``run_time`` is to be calculated.\n\n Returns\n -------\n float\n The total ``run_time`` of all of the animations in the list.\n \"\"\"\n\n if len(animations) == 1 and isinstance(animations[0], Wait):\n if animations[0].stop_condition is not None:\n return 0\n else:\n return animations[0].duration\n\n else:\n return np.max([animation.run_time for animation in animations])\n\n def play(self, *args, **kwargs):\n self.renderer.play(self, *args, **kwargs)\n\n def wait(self, duration=DEFAULT_WAIT_TIME, stop_condition=None):\n self.play(Wait(duration=duration, stop_condition=stop_condition))\n\n def wait_until(self, stop_condition, max_time=60):\n \"\"\"\n Like a wrapper for wait().\n You pass a function that determines whether to continue waiting,\n and a max wait time if that is never fulfilled.\n\n Parameters\n ----------\n stop_condition : function\n The function whose boolean return value determines whether to continue waiting\n\n max_time : int or float, optional\n The maximum wait time in seconds, if the stop_condition is never fulfilled.\n \"\"\"\n self.wait(max_time, stop_condition=stop_condition)\n\n def compile_animation_data(self, *animations, skip_rendering=False, **play_kwargs):\n if len(animations) == 0:\n warnings.warn(\"Called Scene.play with no animations\")\n return None\n\n self.animations = self.compile_animations(*animations, **play_kwargs)\n self.add_mobjects_from_animations(self.animations)\n\n self.last_t = 0\n self.stop_condition = None\n self.moving_mobjects = None\n self.static_mobjects = None\n if len(self.animations) == 1 and isinstance(self.animations[0], Wait):\n self.update_mobjects(dt=0) # Any problems with this?\n if self.should_update_mobjects():\n # TODO, be smart about setting a static image\n # the same way Scene.play does\n self.renderer.static_image = None\n self.stop_condition = self.animations[0].stop_condition\n else:\n self.duration = self.animations[0].duration\n if not skip_rendering:\n self.add_static_frames(self.animations[0].duration)\n return None\n else:\n # Paint all non-moving objects onto the screen, so they don't\n # have to be rendered every frame\n (\n self.moving_mobjects,\n self.static_mobjects,\n ) = self.get_moving_and_static_mobjects(self.animations)\n self.renderer.save_static_frame_data(self, self.static_mobjects)\n\n self.duration = self.get_run_time(self.animations)\n self.time_progression = self._get_animation_time_progression(\n self.animations, self.duration\n )\n\n for animation in self.animations:\n animation.begin()\n\n return self\n\n def play_internal(self, skip_rendering=False):\n \"\"\"\n This method is used to prep the animations for rendering,\n apply the arguments and parameters required to them,\n render them, and write them to the video file.\n\n Parameters\n ----------\n args\n Animation or mobject with mobject method and params\n kwargs\n named parameters affecting what was passed in ``args``,\n e.g. ``run_time``, ``lag_ratio`` and so on.\n \"\"\"\n for t in self.time_progression:\n self.update_to_time(t)\n if not skip_rendering:\n self.renderer.render(self, self.moving_mobjects)\n if self.stop_condition is not None and self.stop_condition():\n self.time_progression.close()\n break\n\n for animation in self.animations:\n animation.finish()\n animation.clean_up_from_scene(self)\n self.renderer.static_image = None\n\n def update_to_time(self, t):\n dt = t - self.last_t\n self.last_t = t\n for animation in self.animations:\n animation.update_mobjects(dt)\n alpha = t / animation.run_time\n animation.interpolate(alpha)\n self.update_mobjects(dt)\n\n def add_static_frames(self, duration):\n self.renderer.update_frame(self)\n dt = 1 / self.renderer.camera.frame_rate\n self.renderer.add_frame(\n self.renderer.get_frame(),\n num_frames=int(duration / dt),\n )\n\n def add_sound(self, sound_file, time_offset=0, gain=None, **kwargs):\n \"\"\"\n This method is used to add a sound to the animation.\n\n Parameters\n ----------\n sound_file : str\n The path to the sound file.\n\n time_offset : int,float, optional\n The offset in the sound file after which\n the sound can be played.\n\n gain :\n\n \"\"\"\n if self.renderer.skip_animations:\n return\n time = self.renderer.time + time_offset\n self.renderer.file_writer.add_sound(sound_file, time, gain, **kwargs)\n", "path": "manim/scene/scene.py" } ]
diff --git a/manim/scene/scene.py b/manim/scene/scene.py index 9110b54fd1..11e42c8de4 100644 --- a/manim/scene/scene.py +++ b/manim/scene/scene.py @@ -880,5 +880,5 @@ def add_sound(self, sound_file, time_offset=0, gain=None, **kwargs): """ if self.renderer.skip_animations: return - time = self.time + time_offset + time = self.renderer.time + time_offset self.renderer.file_writer.add_sound(sound_file, time, gain, **kwargs) diff --git a/tests/test_sound.py b/tests/test_sound.py new file mode 100644 index 0000000000..d7614eca65 --- /dev/null +++ b/tests/test_sound.py @@ -0,0 +1,20 @@ +import os, struct, wave + +from manim import Scene + + +def test_add_sound(): + # create sound file + f = wave.open("noise.wav", "w") + f.setparams((2, 2, 44100, 0, "NONE", "not compressed")) + for _ in range(22050): # half a second of sound + packed_value = struct.pack("h", 14242) + f.writeframes(packed_value) + f.writeframes(packed_value) + + f.close() + + scene = Scene() + scene.add_sound("noise.wav") + + os.remove("noise.wav")
benoitc__gunicorn-1654
Access log not emitted when using `logconfig_dict` Using the unreleased version from `master`, HTTP requests do not create log records in the logger `gunicorn.access` when using the new `logconfig_dict`. See relevant snippet from `glogging.py`: ```python def access(self, resp, req, environ, request_time): """ See http://httpd.apache.org/docs/2.0/logs.html#combined for format details """ if not (self.cfg.accesslog or self.cfg.logconfig or (self.cfg.syslog and not self.cfg.disable_access_log_redirection)): return ```
[ { "content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nimport base64\nimport binascii\nimport time\nimport logging\nlogging.Logger.manager.emittedNoHandlerWarning = 1\nfrom logging.config import fileConfig\ntry:\n from logging.config import dictConfig\nexcept ImportError:\n # python 2.6\n dictConfig = None\nimport os\nimport socket\nimport sys\nimport threading\nimport traceback\n\nfrom gunicorn import util\nfrom gunicorn.six import PY3, string_types\n\n\n# syslog facility codes\nSYSLOG_FACILITIES = {\n \"auth\": 4,\n \"authpriv\": 10,\n \"cron\": 9,\n \"daemon\": 3,\n \"ftp\": 11,\n \"kern\": 0,\n \"lpr\": 6,\n \"mail\": 2,\n \"news\": 7,\n \"security\": 4, # DEPRECATED\n \"syslog\": 5,\n \"user\": 1,\n \"uucp\": 8,\n \"local0\": 16,\n \"local1\": 17,\n \"local2\": 18,\n \"local3\": 19,\n \"local4\": 20,\n \"local5\": 21,\n \"local6\": 22,\n \"local7\": 23\n }\n\n\nCONFIG_DEFAULTS = dict(\n version=1,\n disable_existing_loggers=False,\n\n loggers={\n \"root\": {\"level\": \"INFO\", \"handlers\": [\"console\"]},\n \"gunicorn.error\": {\n \"level\": \"INFO\",\n \"handlers\": [\"error_console\"],\n \"propagate\": True,\n \"qualname\": \"gunicorn.error\"\n },\n\n \"gunicorn.access\": {\n \"level\": \"INFO\",\n \"handlers\": [\"console\"],\n \"propagate\": True,\n \"qualname\": \"gunicorn.access\"\n }\n },\n handlers={\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"generic\",\n \"stream\": \"sys.stdout\"\n },\n \"error_console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"generic\",\n \"stream\": \"sys.stderr\"\n },\n },\n formatters={\n \"generic\": {\n \"format\": \"%(asctime)s [%(process)d] [%(levelname)s] %(message)s\",\n \"datefmt\": \"[%Y-%m-%d %H:%M:%S %z]\",\n \"class\": \"logging.Formatter\"\n }\n }\n)\n\n\ndef loggers():\n \"\"\" get list of all loggers \"\"\"\n root = logging.root\n existing = root.manager.loggerDict.keys()\n return [logging.getLogger(name) for name in existing]\n\n\nclass SafeAtoms(dict):\n\n def __init__(self, atoms):\n dict.__init__(self)\n for key, value in atoms.items():\n if isinstance(value, string_types):\n self[key] = value.replace('\"', '\\\\\"')\n else:\n self[key] = value\n\n def __getitem__(self, k):\n if k.startswith(\"{\"):\n kl = k.lower()\n if kl in self:\n return super(SafeAtoms, self).__getitem__(kl)\n else:\n return \"-\"\n if k in self:\n return super(SafeAtoms, self).__getitem__(k)\n else:\n return '-'\n\n\ndef parse_syslog_address(addr):\n\n # unix domain socket type depends on backend\n # SysLogHandler will try both when given None\n if addr.startswith(\"unix://\"):\n sock_type = None\n\n # set socket type only if explicitly requested\n parts = addr.split(\"#\", 1)\n if len(parts) == 2:\n addr = parts[0]\n if parts[1] == \"dgram\":\n sock_type = socket.SOCK_DGRAM\n\n return (sock_type, addr.split(\"unix://\")[1])\n\n if addr.startswith(\"udp://\"):\n addr = addr.split(\"udp://\")[1]\n socktype = socket.SOCK_DGRAM\n elif addr.startswith(\"tcp://\"):\n addr = addr.split(\"tcp://\")[1]\n socktype = socket.SOCK_STREAM\n else:\n raise RuntimeError(\"invalid syslog address\")\n\n if '[' in addr and ']' in addr:\n host = addr.split(']')[0][1:].lower()\n elif ':' in addr:\n host = addr.split(':')[0].lower()\n elif addr == \"\":\n host = \"localhost\"\n else:\n host = addr.lower()\n\n addr = addr.split(']')[-1]\n if \":\" in addr:\n port = addr.split(':', 1)[1]\n if not port.isdigit():\n raise RuntimeError(\"%r is not a valid port number.\" % port)\n port = int(port)\n else:\n port = 514\n\n return (socktype, (host, port))\n\n\nclass Logger(object):\n\n LOG_LEVELS = {\n \"critical\": logging.CRITICAL,\n \"error\": logging.ERROR,\n \"warning\": logging.WARNING,\n \"info\": logging.INFO,\n \"debug\": logging.DEBUG\n }\n loglevel = logging.INFO\n\n error_fmt = r\"%(asctime)s [%(process)d] [%(levelname)s] %(message)s\"\n datefmt = r\"[%Y-%m-%d %H:%M:%S %z]\"\n\n access_fmt = \"%(message)s\"\n syslog_fmt = \"[%(process)d] %(message)s\"\n\n atoms_wrapper_class = SafeAtoms\n\n def __init__(self, cfg):\n self.error_log = logging.getLogger(\"gunicorn.error\")\n self.error_log.propagate = False\n self.access_log = logging.getLogger(\"gunicorn.access\")\n self.access_log.propagate = False\n self.error_handlers = []\n self.access_handlers = []\n self.logfile = None\n self.lock = threading.Lock()\n self.cfg = cfg\n self.setup(cfg)\n\n def setup(self, cfg):\n self.loglevel = self.LOG_LEVELS.get(cfg.loglevel.lower(), logging.INFO)\n self.error_log.setLevel(self.loglevel)\n self.access_log.setLevel(logging.INFO)\n\n # set gunicorn.error handler\n if self.cfg.capture_output and cfg.errorlog != \"-\":\n for stream in sys.stdout, sys.stderr:\n stream.flush()\n\n self.logfile = open(cfg.errorlog, 'a+')\n os.dup2(self.logfile.fileno(), sys.stdout.fileno())\n os.dup2(self.logfile.fileno(), sys.stderr.fileno())\n\n self._set_handler(self.error_log, cfg.errorlog,\n logging.Formatter(self.error_fmt, self.datefmt))\n\n # set gunicorn.access handler\n if cfg.accesslog is not None:\n self._set_handler(self.access_log, cfg.accesslog,\n fmt=logging.Formatter(self.access_fmt), stream=sys.stdout)\n\n # set syslog handler\n if cfg.syslog:\n self._set_syslog_handler(\n self.error_log, cfg, self.syslog_fmt, \"error\"\n )\n if not cfg.disable_redirect_access_to_syslog:\n self._set_syslog_handler(\n self.access_log, cfg, self.syslog_fmt, \"access\"\n )\n\n if dictConfig is None and cfg.logconfig_dict:\n util.warn(\"Dictionary-based log configuration requires \"\n \"Python 2.7 or above.\")\n\n if dictConfig and cfg.logconfig_dict:\n config = CONFIG_DEFAULTS.copy()\n config.update(cfg.logconfig_dict)\n try:\n dictConfig(config)\n except (\n AttributeError,\n ImportError,\n ValueError,\n TypeError\n ) as exc:\n raise RuntimeError(str(exc))\n elif cfg.logconfig:\n if os.path.exists(cfg.logconfig):\n defaults = CONFIG_DEFAULTS.copy()\n defaults['__file__'] = cfg.logconfig\n defaults['here'] = os.path.dirname(cfg.logconfig)\n fileConfig(cfg.logconfig, defaults=defaults,\n disable_existing_loggers=False)\n else:\n msg = \"Error: log config '%s' not found\"\n raise RuntimeError(msg % cfg.logconfig)\n\n def critical(self, msg, *args, **kwargs):\n self.error_log.critical(msg, *args, **kwargs)\n\n def error(self, msg, *args, **kwargs):\n self.error_log.error(msg, *args, **kwargs)\n\n def warning(self, msg, *args, **kwargs):\n self.error_log.warning(msg, *args, **kwargs)\n\n def info(self, msg, *args, **kwargs):\n self.error_log.info(msg, *args, **kwargs)\n\n def debug(self, msg, *args, **kwargs):\n self.error_log.debug(msg, *args, **kwargs)\n\n def exception(self, msg, *args, **kwargs):\n self.error_log.exception(msg, *args, **kwargs)\n\n def log(self, lvl, msg, *args, **kwargs):\n if isinstance(lvl, string_types):\n lvl = self.LOG_LEVELS.get(lvl.lower(), logging.INFO)\n self.error_log.log(lvl, msg, *args, **kwargs)\n\n def atoms(self, resp, req, environ, request_time):\n \"\"\" Gets atoms for log formating.\n \"\"\"\n status = resp.status\n if isinstance(status, str):\n status = status.split(None, 1)[0]\n atoms = {\n 'h': environ.get('REMOTE_ADDR', '-'),\n 'l': '-',\n 'u': self._get_user(environ) or '-',\n 't': self.now(),\n 'r': \"%s %s %s\" % (environ['REQUEST_METHOD'],\n environ['RAW_URI'], environ[\"SERVER_PROTOCOL\"]),\n 's': status,\n 'm': environ.get('REQUEST_METHOD'),\n 'U': environ.get('PATH_INFO'),\n 'q': environ.get('QUERY_STRING'),\n 'H': environ.get('SERVER_PROTOCOL'),\n 'b': getattr(resp, 'sent', None) and str(resp.sent) or '-',\n 'B': getattr(resp, 'sent', None),\n 'f': environ.get('HTTP_REFERER', '-'),\n 'a': environ.get('HTTP_USER_AGENT', '-'),\n 'T': request_time.seconds,\n 'D': (request_time.seconds*1000000) + request_time.microseconds,\n 'L': \"%d.%06d\" % (request_time.seconds, request_time.microseconds),\n 'p': \"<%s>\" % os.getpid()\n }\n\n # add request headers\n if hasattr(req, 'headers'):\n req_headers = req.headers\n else:\n req_headers = req\n\n if hasattr(req_headers, \"items\"):\n req_headers = req_headers.items()\n\n atoms.update(dict([(\"{%s}i\" % k.lower(), v) for k, v in req_headers]))\n\n resp_headers = resp.headers\n if hasattr(resp_headers, \"items\"):\n resp_headers = resp_headers.items()\n\n # add response headers\n atoms.update(dict([(\"{%s}o\" % k.lower(), v) for k, v in resp_headers]))\n\n # add environ variables\n environ_variables = environ.items()\n atoms.update(dict([(\"{%s}e\" % k.lower(), v) for k, v in environ_variables]))\n\n return atoms\n\n def access(self, resp, req, environ, request_time):\n \"\"\" See http://httpd.apache.org/docs/2.0/logs.html#combined\n for format details\n \"\"\"\n\n if not (self.cfg.accesslog or self.cfg.logconfig or\n (self.cfg.syslog and not self.cfg.disable_access_log_redirection)):\n return\n\n # wrap atoms:\n # - make sure atoms will be test case insensitively\n # - if atom doesn't exist replace it by '-'\n safe_atoms = self.atoms_wrapper_class(self.atoms(resp, req, environ,\n request_time))\n\n try:\n self.access_log.info(self.cfg.access_log_format, safe_atoms)\n except:\n self.error(traceback.format_exc())\n\n def now(self):\n \"\"\" return date in Apache Common Log Format \"\"\"\n return time.strftime('[%d/%b/%Y:%H:%M:%S %z]')\n\n def reopen_files(self):\n if self.cfg.capture_output and self.cfg.errorlog != \"-\":\n for stream in sys.stdout, sys.stderr:\n stream.flush()\n\n with self.lock:\n if self.logfile is not None:\n self.logfile.close()\n self.logfile = open(self.cfg.errorlog, 'a+')\n os.dup2(self.logfile.fileno(), sys.stdout.fileno())\n os.dup2(self.logfile.fileno(), sys.stderr.fileno())\n\n\n for log in loggers():\n for handler in log.handlers:\n if isinstance(handler, logging.FileHandler):\n handler.acquire()\n try:\n if handler.stream:\n handler.stream.close()\n handler.stream = open(handler.baseFilename,\n handler.mode)\n finally:\n handler.release()\n\n def close_on_exec(self):\n for log in loggers():\n for handler in log.handlers:\n if isinstance(handler, logging.FileHandler):\n handler.acquire()\n try:\n if handler.stream:\n util.close_on_exec(handler.stream.fileno())\n finally:\n handler.release()\n\n def _get_gunicorn_handler(self, log):\n for h in log.handlers:\n if getattr(h, \"_gunicorn\", False):\n return h\n\n def _set_handler(self, log, output, fmt, stream=None):\n # remove previous gunicorn log handler\n h = self._get_gunicorn_handler(log)\n if h:\n log.handlers.remove(h)\n\n if output is not None:\n if output == \"-\":\n h = logging.StreamHandler(stream)\n else:\n util.check_is_writeable(output)\n h = logging.FileHandler(output)\n # make sure the user can reopen the file\n try:\n os.chown(h.baseFilename, self.cfg.user, self.cfg.group)\n except OSError:\n # it's probably OK there, we assume the user has given\n # /dev/null as a parameter.\n pass\n\n h.setFormatter(fmt)\n h._gunicorn = True\n log.addHandler(h)\n\n def _set_syslog_handler(self, log, cfg, fmt, name):\n # setup format\n if not cfg.syslog_prefix:\n prefix = cfg.proc_name.replace(\":\", \".\")\n else:\n prefix = cfg.syslog_prefix\n\n prefix = \"gunicorn.%s.%s\" % (prefix, name)\n\n # set format\n fmt = logging.Formatter(r\"%s: %s\" % (prefix, fmt))\n\n # syslog facility\n try:\n facility = SYSLOG_FACILITIES[cfg.syslog_facility.lower()]\n except KeyError:\n raise RuntimeError(\"unknown facility name\")\n\n # parse syslog address\n socktype, addr = parse_syslog_address(cfg.syslog_addr)\n\n # finally setup the syslog handler\n if sys.version_info >= (2, 7):\n h = logging.handlers.SysLogHandler(address=addr,\n facility=facility, socktype=socktype)\n else:\n # socktype is only supported in 2.7 and sup\n # fix issue #541\n h = logging.handlers.SysLogHandler(address=addr,\n facility=facility)\n\n h.setFormatter(fmt)\n h._gunicorn = True\n log.addHandler(h)\n\n def _get_user(self, environ):\n user = None\n http_auth = environ.get(\"HTTP_AUTHORIZATION\")\n if http_auth and http_auth.startswith('Basic'):\n auth = http_auth.split(\" \", 1)\n if len(auth) == 2:\n try:\n # b64decode doesn't accept unicode in Python < 3.3\n # so we need to convert it to a byte string\n auth = base64.b64decode(auth[1].strip().encode('utf-8'))\n if PY3: # b64decode returns a byte string in Python 3\n auth = auth.decode('utf-8')\n auth = auth.split(\":\", 1)\n except TypeError as exc:\n self.debug(\"Couldn't get username: %s\", exc)\n return user\n except binascii.Error as exc:\n self.debug(\"Couldn't get username: %s\", exc)\n return user\n if len(auth) == 2:\n user = auth[0]\n return user\n", "path": "gunicorn/glogging.py" } ]
[ { "content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nimport base64\nimport binascii\nimport time\nimport logging\nlogging.Logger.manager.emittedNoHandlerWarning = 1\nfrom logging.config import fileConfig\ntry:\n from logging.config import dictConfig\nexcept ImportError:\n # python 2.6\n dictConfig = None\nimport os\nimport socket\nimport sys\nimport threading\nimport traceback\n\nfrom gunicorn import util\nfrom gunicorn.six import PY3, string_types\n\n\n# syslog facility codes\nSYSLOG_FACILITIES = {\n \"auth\": 4,\n \"authpriv\": 10,\n \"cron\": 9,\n \"daemon\": 3,\n \"ftp\": 11,\n \"kern\": 0,\n \"lpr\": 6,\n \"mail\": 2,\n \"news\": 7,\n \"security\": 4, # DEPRECATED\n \"syslog\": 5,\n \"user\": 1,\n \"uucp\": 8,\n \"local0\": 16,\n \"local1\": 17,\n \"local2\": 18,\n \"local3\": 19,\n \"local4\": 20,\n \"local5\": 21,\n \"local6\": 22,\n \"local7\": 23\n }\n\n\nCONFIG_DEFAULTS = dict(\n version=1,\n disable_existing_loggers=False,\n\n loggers={\n \"root\": {\"level\": \"INFO\", \"handlers\": [\"console\"]},\n \"gunicorn.error\": {\n \"level\": \"INFO\",\n \"handlers\": [\"error_console\"],\n \"propagate\": True,\n \"qualname\": \"gunicorn.error\"\n },\n\n \"gunicorn.access\": {\n \"level\": \"INFO\",\n \"handlers\": [\"console\"],\n \"propagate\": True,\n \"qualname\": \"gunicorn.access\"\n }\n },\n handlers={\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"generic\",\n \"stream\": \"sys.stdout\"\n },\n \"error_console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"generic\",\n \"stream\": \"sys.stderr\"\n },\n },\n formatters={\n \"generic\": {\n \"format\": \"%(asctime)s [%(process)d] [%(levelname)s] %(message)s\",\n \"datefmt\": \"[%Y-%m-%d %H:%M:%S %z]\",\n \"class\": \"logging.Formatter\"\n }\n }\n)\n\n\ndef loggers():\n \"\"\" get list of all loggers \"\"\"\n root = logging.root\n existing = root.manager.loggerDict.keys()\n return [logging.getLogger(name) for name in existing]\n\n\nclass SafeAtoms(dict):\n\n def __init__(self, atoms):\n dict.__init__(self)\n for key, value in atoms.items():\n if isinstance(value, string_types):\n self[key] = value.replace('\"', '\\\\\"')\n else:\n self[key] = value\n\n def __getitem__(self, k):\n if k.startswith(\"{\"):\n kl = k.lower()\n if kl in self:\n return super(SafeAtoms, self).__getitem__(kl)\n else:\n return \"-\"\n if k in self:\n return super(SafeAtoms, self).__getitem__(k)\n else:\n return '-'\n\n\ndef parse_syslog_address(addr):\n\n # unix domain socket type depends on backend\n # SysLogHandler will try both when given None\n if addr.startswith(\"unix://\"):\n sock_type = None\n\n # set socket type only if explicitly requested\n parts = addr.split(\"#\", 1)\n if len(parts) == 2:\n addr = parts[0]\n if parts[1] == \"dgram\":\n sock_type = socket.SOCK_DGRAM\n\n return (sock_type, addr.split(\"unix://\")[1])\n\n if addr.startswith(\"udp://\"):\n addr = addr.split(\"udp://\")[1]\n socktype = socket.SOCK_DGRAM\n elif addr.startswith(\"tcp://\"):\n addr = addr.split(\"tcp://\")[1]\n socktype = socket.SOCK_STREAM\n else:\n raise RuntimeError(\"invalid syslog address\")\n\n if '[' in addr and ']' in addr:\n host = addr.split(']')[0][1:].lower()\n elif ':' in addr:\n host = addr.split(':')[0].lower()\n elif addr == \"\":\n host = \"localhost\"\n else:\n host = addr.lower()\n\n addr = addr.split(']')[-1]\n if \":\" in addr:\n port = addr.split(':', 1)[1]\n if not port.isdigit():\n raise RuntimeError(\"%r is not a valid port number.\" % port)\n port = int(port)\n else:\n port = 514\n\n return (socktype, (host, port))\n\n\nclass Logger(object):\n\n LOG_LEVELS = {\n \"critical\": logging.CRITICAL,\n \"error\": logging.ERROR,\n \"warning\": logging.WARNING,\n \"info\": logging.INFO,\n \"debug\": logging.DEBUG\n }\n loglevel = logging.INFO\n\n error_fmt = r\"%(asctime)s [%(process)d] [%(levelname)s] %(message)s\"\n datefmt = r\"[%Y-%m-%d %H:%M:%S %z]\"\n\n access_fmt = \"%(message)s\"\n syslog_fmt = \"[%(process)d] %(message)s\"\n\n atoms_wrapper_class = SafeAtoms\n\n def __init__(self, cfg):\n self.error_log = logging.getLogger(\"gunicorn.error\")\n self.error_log.propagate = False\n self.access_log = logging.getLogger(\"gunicorn.access\")\n self.access_log.propagate = False\n self.error_handlers = []\n self.access_handlers = []\n self.logfile = None\n self.lock = threading.Lock()\n self.cfg = cfg\n self.setup(cfg)\n\n def setup(self, cfg):\n self.loglevel = self.LOG_LEVELS.get(cfg.loglevel.lower(), logging.INFO)\n self.error_log.setLevel(self.loglevel)\n self.access_log.setLevel(logging.INFO)\n\n # set gunicorn.error handler\n if self.cfg.capture_output and cfg.errorlog != \"-\":\n for stream in sys.stdout, sys.stderr:\n stream.flush()\n\n self.logfile = open(cfg.errorlog, 'a+')\n os.dup2(self.logfile.fileno(), sys.stdout.fileno())\n os.dup2(self.logfile.fileno(), sys.stderr.fileno())\n\n self._set_handler(self.error_log, cfg.errorlog,\n logging.Formatter(self.error_fmt, self.datefmt))\n\n # set gunicorn.access handler\n if cfg.accesslog is not None:\n self._set_handler(self.access_log, cfg.accesslog,\n fmt=logging.Formatter(self.access_fmt), stream=sys.stdout)\n\n # set syslog handler\n if cfg.syslog:\n self._set_syslog_handler(\n self.error_log, cfg, self.syslog_fmt, \"error\"\n )\n if not cfg.disable_redirect_access_to_syslog:\n self._set_syslog_handler(\n self.access_log, cfg, self.syslog_fmt, \"access\"\n )\n\n if dictConfig is None and cfg.logconfig_dict:\n util.warn(\"Dictionary-based log configuration requires \"\n \"Python 2.7 or above.\")\n\n if dictConfig and cfg.logconfig_dict:\n config = CONFIG_DEFAULTS.copy()\n config.update(cfg.logconfig_dict)\n try:\n dictConfig(config)\n except (\n AttributeError,\n ImportError,\n ValueError,\n TypeError\n ) as exc:\n raise RuntimeError(str(exc))\n elif cfg.logconfig:\n if os.path.exists(cfg.logconfig):\n defaults = CONFIG_DEFAULTS.copy()\n defaults['__file__'] = cfg.logconfig\n defaults['here'] = os.path.dirname(cfg.logconfig)\n fileConfig(cfg.logconfig, defaults=defaults,\n disable_existing_loggers=False)\n else:\n msg = \"Error: log config '%s' not found\"\n raise RuntimeError(msg % cfg.logconfig)\n\n def critical(self, msg, *args, **kwargs):\n self.error_log.critical(msg, *args, **kwargs)\n\n def error(self, msg, *args, **kwargs):\n self.error_log.error(msg, *args, **kwargs)\n\n def warning(self, msg, *args, **kwargs):\n self.error_log.warning(msg, *args, **kwargs)\n\n def info(self, msg, *args, **kwargs):\n self.error_log.info(msg, *args, **kwargs)\n\n def debug(self, msg, *args, **kwargs):\n self.error_log.debug(msg, *args, **kwargs)\n\n def exception(self, msg, *args, **kwargs):\n self.error_log.exception(msg, *args, **kwargs)\n\n def log(self, lvl, msg, *args, **kwargs):\n if isinstance(lvl, string_types):\n lvl = self.LOG_LEVELS.get(lvl.lower(), logging.INFO)\n self.error_log.log(lvl, msg, *args, **kwargs)\n\n def atoms(self, resp, req, environ, request_time):\n \"\"\" Gets atoms for log formating.\n \"\"\"\n status = resp.status\n if isinstance(status, str):\n status = status.split(None, 1)[0]\n atoms = {\n 'h': environ.get('REMOTE_ADDR', '-'),\n 'l': '-',\n 'u': self._get_user(environ) or '-',\n 't': self.now(),\n 'r': \"%s %s %s\" % (environ['REQUEST_METHOD'],\n environ['RAW_URI'], environ[\"SERVER_PROTOCOL\"]),\n 's': status,\n 'm': environ.get('REQUEST_METHOD'),\n 'U': environ.get('PATH_INFO'),\n 'q': environ.get('QUERY_STRING'),\n 'H': environ.get('SERVER_PROTOCOL'),\n 'b': getattr(resp, 'sent', None) and str(resp.sent) or '-',\n 'B': getattr(resp, 'sent', None),\n 'f': environ.get('HTTP_REFERER', '-'),\n 'a': environ.get('HTTP_USER_AGENT', '-'),\n 'T': request_time.seconds,\n 'D': (request_time.seconds*1000000) + request_time.microseconds,\n 'L': \"%d.%06d\" % (request_time.seconds, request_time.microseconds),\n 'p': \"<%s>\" % os.getpid()\n }\n\n # add request headers\n if hasattr(req, 'headers'):\n req_headers = req.headers\n else:\n req_headers = req\n\n if hasattr(req_headers, \"items\"):\n req_headers = req_headers.items()\n\n atoms.update(dict([(\"{%s}i\" % k.lower(), v) for k, v in req_headers]))\n\n resp_headers = resp.headers\n if hasattr(resp_headers, \"items\"):\n resp_headers = resp_headers.items()\n\n # add response headers\n atoms.update(dict([(\"{%s}o\" % k.lower(), v) for k, v in resp_headers]))\n\n # add environ variables\n environ_variables = environ.items()\n atoms.update(dict([(\"{%s}e\" % k.lower(), v) for k, v in environ_variables]))\n\n return atoms\n\n def access(self, resp, req, environ, request_time):\n \"\"\" See http://httpd.apache.org/docs/2.0/logs.html#combined\n for format details\n \"\"\"\n\n if not (self.cfg.accesslog or self.cfg.logconfig or\n self.cfg.logconfig_dict or\n (self.cfg.syslog and not self.cfg.disable_access_log_redirection)):\n return\n\n # wrap atoms:\n # - make sure atoms will be test case insensitively\n # - if atom doesn't exist replace it by '-'\n safe_atoms = self.atoms_wrapper_class(self.atoms(resp, req, environ,\n request_time))\n\n try:\n self.access_log.info(self.cfg.access_log_format, safe_atoms)\n except:\n self.error(traceback.format_exc())\n\n def now(self):\n \"\"\" return date in Apache Common Log Format \"\"\"\n return time.strftime('[%d/%b/%Y:%H:%M:%S %z]')\n\n def reopen_files(self):\n if self.cfg.capture_output and self.cfg.errorlog != \"-\":\n for stream in sys.stdout, sys.stderr:\n stream.flush()\n\n with self.lock:\n if self.logfile is not None:\n self.logfile.close()\n self.logfile = open(self.cfg.errorlog, 'a+')\n os.dup2(self.logfile.fileno(), sys.stdout.fileno())\n os.dup2(self.logfile.fileno(), sys.stderr.fileno())\n\n\n for log in loggers():\n for handler in log.handlers:\n if isinstance(handler, logging.FileHandler):\n handler.acquire()\n try:\n if handler.stream:\n handler.stream.close()\n handler.stream = open(handler.baseFilename,\n handler.mode)\n finally:\n handler.release()\n\n def close_on_exec(self):\n for log in loggers():\n for handler in log.handlers:\n if isinstance(handler, logging.FileHandler):\n handler.acquire()\n try:\n if handler.stream:\n util.close_on_exec(handler.stream.fileno())\n finally:\n handler.release()\n\n def _get_gunicorn_handler(self, log):\n for h in log.handlers:\n if getattr(h, \"_gunicorn\", False):\n return h\n\n def _set_handler(self, log, output, fmt, stream=None):\n # remove previous gunicorn log handler\n h = self._get_gunicorn_handler(log)\n if h:\n log.handlers.remove(h)\n\n if output is not None:\n if output == \"-\":\n h = logging.StreamHandler(stream)\n else:\n util.check_is_writeable(output)\n h = logging.FileHandler(output)\n # make sure the user can reopen the file\n try:\n os.chown(h.baseFilename, self.cfg.user, self.cfg.group)\n except OSError:\n # it's probably OK there, we assume the user has given\n # /dev/null as a parameter.\n pass\n\n h.setFormatter(fmt)\n h._gunicorn = True\n log.addHandler(h)\n\n def _set_syslog_handler(self, log, cfg, fmt, name):\n # setup format\n if not cfg.syslog_prefix:\n prefix = cfg.proc_name.replace(\":\", \".\")\n else:\n prefix = cfg.syslog_prefix\n\n prefix = \"gunicorn.%s.%s\" % (prefix, name)\n\n # set format\n fmt = logging.Formatter(r\"%s: %s\" % (prefix, fmt))\n\n # syslog facility\n try:\n facility = SYSLOG_FACILITIES[cfg.syslog_facility.lower()]\n except KeyError:\n raise RuntimeError(\"unknown facility name\")\n\n # parse syslog address\n socktype, addr = parse_syslog_address(cfg.syslog_addr)\n\n # finally setup the syslog handler\n if sys.version_info >= (2, 7):\n h = logging.handlers.SysLogHandler(address=addr,\n facility=facility, socktype=socktype)\n else:\n # socktype is only supported in 2.7 and sup\n # fix issue #541\n h = logging.handlers.SysLogHandler(address=addr,\n facility=facility)\n\n h.setFormatter(fmt)\n h._gunicorn = True\n log.addHandler(h)\n\n def _get_user(self, environ):\n user = None\n http_auth = environ.get(\"HTTP_AUTHORIZATION\")\n if http_auth and http_auth.startswith('Basic'):\n auth = http_auth.split(\" \", 1)\n if len(auth) == 2:\n try:\n # b64decode doesn't accept unicode in Python < 3.3\n # so we need to convert it to a byte string\n auth = base64.b64decode(auth[1].strip().encode('utf-8'))\n if PY3: # b64decode returns a byte string in Python 3\n auth = auth.decode('utf-8')\n auth = auth.split(\":\", 1)\n except TypeError as exc:\n self.debug(\"Couldn't get username: %s\", exc)\n return user\n except binascii.Error as exc:\n self.debug(\"Couldn't get username: %s\", exc)\n return user\n if len(auth) == 2:\n user = auth[0]\n return user\n", "path": "gunicorn/glogging.py" } ]
diff --git a/gunicorn/glogging.py b/gunicorn/glogging.py index f5d4cfd0d..88f0c1336 100644 --- a/gunicorn/glogging.py +++ b/gunicorn/glogging.py @@ -339,6 +339,7 @@ def access(self, resp, req, environ, request_time): """ if not (self.cfg.accesslog or self.cfg.logconfig or + self.cfg.logconfig_dict or (self.cfg.syslog and not self.cfg.disable_access_log_redirection)): return
cisagov__manage.get.gov-959
Fix for checkbox accessibility no longer working ### Current Behavior Checkboxes in django admin superuser no longer generated with an associated label. ### Expected Behavior Expect to see accessible checkboxes in django admin, no missing columns in either superuser or staff views. ### Steps to Reproduce 1. Log in as superuser 2. Go to list view on a model 3. Run ANDI or inspect checkboxes ### Environment _No response_ ### Additional Context Traced this to the fix for missing columns in staff view. The check {% if results.0.form %} did not work and failed silently. Have a fix for this. Will prioritize implementation and deployment to staging since we have some accessibility testing in progress. ### Issue Links _No response_
[ { "content": "from django import template\nimport re\n\nregister = template.Library()\n\n\[email protected](name=\"extract_value\")\ndef extract_value(html_input):\n match = re.search(r'value=\"([^\"]*)\"', html_input)\n if match:\n return match.group(1)\n return \"\"\n\n\[email protected]\ndef extract_a_text(value):\n # Use regex to extract the text within the <a> tag\n pattern = r\"<a\\b[^>]*>(.*?)</a>\"\n match = re.search(pattern, value)\n if match:\n extracted_text = match.group(1)\n else:\n extracted_text = \"\"\n\n return extracted_text\n\n\[email protected]\ndef find_index(haystack, needle):\n try:\n return haystack.index(needle)\n except ValueError:\n return -1\n\n\[email protected]\ndef slice_after(value, substring):\n index = value.find(substring)\n if index != -1:\n result = value[index + len(substring) :]\n return result\n return value\n", "path": "src/registrar/templatetags/custom_filters.py" } ]
[ { "content": "from django import template\nimport re\n\nregister = template.Library()\n\n\[email protected](name=\"extract_value\")\ndef extract_value(html_input):\n match = re.search(r'value=\"([^\"]*)\"', html_input)\n if match:\n return match.group(1)\n return \"\"\n\n\[email protected]\ndef extract_a_text(value):\n # Use regex to extract the text within the <a> tag\n pattern = r\"<a\\b[^>]*>(.*?)</a>\"\n match = re.search(pattern, value)\n if match:\n extracted_text = match.group(1)\n else:\n extracted_text = \"\"\n\n return extracted_text\n\n\[email protected]\ndef find_index(haystack, needle):\n try:\n return haystack.index(needle)\n except ValueError:\n return -1\n\n\[email protected]\ndef slice_after(value, substring):\n index = value.find(substring)\n if index != -1:\n result = value[index + len(substring) :]\n return result\n return value\n\n\[email protected]\ndef contains_checkbox(html_list):\n for html_string in html_list:\n if re.search(r'<input[^>]*type=\"checkbox\"', html_string):\n return True\n return False\n", "path": "src/registrar/templatetags/custom_filters.py" } ]
diff --git a/src/registrar/templates/admin/change_list_results.html b/src/registrar/templates/admin/change_list_results.html index 9ee3f9f59..831350888 100644 --- a/src/registrar/templates/admin/change_list_results.html +++ b/src/registrar/templates/admin/change_list_results.html @@ -17,7 +17,7 @@ <thead> <tr> -{% if results.0.form %} +{% if results.0|contains_checkbox %} {# .gov - hardcode the select all checkbox #} <th scope="col" class="action-checkbox-column" title="Toggle all"> <div class="text"> diff --git a/src/registrar/templatetags/custom_filters.py b/src/registrar/templatetags/custom_filters.py index f16408bf8..3614db18e 100644 --- a/src/registrar/templatetags/custom_filters.py +++ b/src/registrar/templatetags/custom_filters.py @@ -40,3 +40,11 @@ def slice_after(value, substring): result = value[index + len(substring) :] return result return value + + [email protected] +def contains_checkbox(html_list): + for html_string in html_list: + if re.search(r'<input[^>]*type="checkbox"', html_string): + return True + return False diff --git a/src/registrar/tests/test_templatetags.py b/src/registrar/tests/test_templatetags.py index 36325ab5d..d5f8523c8 100644 --- a/src/registrar/tests/test_templatetags.py +++ b/src/registrar/tests/test_templatetags.py @@ -8,6 +8,7 @@ extract_a_text, find_index, slice_after, + contains_checkbox, ) @@ -83,3 +84,21 @@ def test_slice_after(self): self.assertEqual( result, value ) # Should return the original value if substring not found + + def test_contains_checkbox_with_checkbox(self): + # Test the filter when HTML list contains a checkbox + html_list = [ + '<input type="checkbox" name="_selected_action">', + "<div>Some other HTML content</div>", + ] + result = contains_checkbox(html_list) + self.assertTrue(result) # Expecting True + + def test_contains_checkbox_without_checkbox(self): + # Test the filter when HTML list does not contain a checkbox + html_list = [ + "<div>Some HTML content without checkbox</div>", + "<p>More HTML content</p>", + ] + result = contains_checkbox(html_list) + self.assertFalse(result) # Expecting False
pymedusa__Medusa-9537
Error message "Failed parsing provider" & "list index out of range" **Describe the bug** Error message: `2020-12-05 07:52:25 ERROR SEARCHQUEUE-BACKLOG-260586 :: [Beyond-HD] :: [ce68da5] Failed parsing provider. Traceback (most recent call last): File "/Applications/Medusa/medusa/providers/torrent/html/beyondhd.py", line 128, in parse download_url = urljoin(self.url, cells[2].find('a')['href']) IndexError: list index out of range` **Medusa (please complete the following information):** - OS: macOS Catalina 10.15,.7 - Branch: master - Commit: ce68da57b3878591f77c21bb2acf28e6a58269fa - Python version: 3.8.5 - Database version: 44.17
[ { "content": "# coding=utf-8\n\n\"\"\"Provider code for Beyond-hd.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.helper.common import convert_size\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.torrent.torrent_provider import TorrentProvider\n\nfrom requests.compat import urljoin\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass BeyondHDProvider(TorrentProvider):\n \"\"\"Beyond-hd Torrent provider.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(BeyondHDProvider, self).__init__('Beyond-HD')\n\n self.enable_cookies = True\n self.cookies = ''\n self.required_cookies = ('remember_web_[**long_hash**]',)\n\n self.url = 'https://beyond-hd.me'\n self.urls = {\n 'login': urljoin(self.url, 'login'),\n 'search': urljoin(self.url, 'torrents'),\n }\n\n # Proper Strings\n self.proper_strings = ['PROPER', 'REPACK', 'REAL', 'RERIP']\n\n # Miscellaneous Options\n\n # Cache\n self.cache = tv.Cache(self)\n\n def search(self, search_strings, *args, **kwargs):\n \"\"\"\n Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n if not self.login():\n return results\n\n for mode in search_strings:\n log.debug('Search mode: {0}', mode)\n\n for search_string in search_strings[mode]:\n\n search_params = {\n 'categories[]': 2,\n 'sorting': 'created_at',\n 'qty': '100',\n 'direction': 'desc',\n 'doSearch': 'Search'\n }\n\n if mode != 'RSS':\n log.debug('Search string: {search}',\n {'search': search_string})\n search_params['search'] = search_string\n\n if mode == 'season':\n search_params['pack'] = 1\n\n response = self.session.get(self.urls['search'], params=search_params)\n if not response or not response.text:\n log.debug('No data returned from provider')\n continue\n\n results += self.parse(response.text, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"\n Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n # Units\n units = ['B', 'KIB', 'MIB', 'GIB', 'TIB', 'PIB']\n\n items = []\n\n with BS4Parser(data, 'html5lib') as html:\n if html.find('div', class_='table-torrents'):\n theme = 'modern'\n torrent_table = html.find('div', class_='table-torrents').find('table')\n else:\n theme = 'classic'\n torrent_table = html.find('div', class_='table-responsive').find('table')\n\n torrent_rows = torrent_table('tr') if torrent_table else []\n labels = [label.get_text(strip=True) for label in torrent_rows[0]('th')]\n # For the classic theme, the tr don't match the td.\n if theme == 'classic':\n del labels[3]\n\n # Continue only if one release is found\n if len(torrent_rows) < 2:\n log.debug('Data returned from provider does not contain any torrents')\n return items\n\n for result in torrent_rows[1:]:\n cells = result('td')\n\n try:\n if len(cells) < 2:\n continue\n\n link = cells[1].find('a')\n download_url = urljoin(self.url, cells[2].find('a')['href'])\n title = link.get_text(strip=True)\n if not all([title, download_url]):\n continue\n\n seeders = int(cells[labels.index('S')].find('span').get_text())\n leechers = int(cells[labels.index('L')].find('span').get_text())\n\n # Filter unseeded torrent\n if seeders < self.minseed:\n if mode != 'RSS':\n log.debug(\"Discarding torrent because it doesn't meet the\"\n ' minimum seeders: {0}. Seeders: {1}',\n title, seeders)\n continue\n\n torrent_size = cells[labels.index('Size')].find('span').get_text()\n size = convert_size(torrent_size, units=units) or -1\n\n pubdate_raw = cells[labels.index('Age')].find('span').get_text()\n pubdate = self.parse_pubdate(pubdate_raw, human_time=True)\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'seeders': seeders,\n 'leechers': leechers,\n 'pubdate': pubdate,\n }\n if mode != 'RSS':\n log.debug('Found result: {0} with {1} seeders and {2} leechers',\n title, seeders, leechers)\n\n items.append(item)\n except (AttributeError, TypeError, KeyError, ValueError, IndexError):\n log.exception('Failed parsing provider.')\n\n return items\n\n def login(self):\n \"\"\"Login method used for logging in before doing search and torrent downloads.\"\"\"\n return self.cookie_login('Login now')\n\n def check_required_cookies(self):\n \"\"\"\n Check if we have the required cookies in the requests sessions object.\n\n Meaning that we've already successfully authenticated once, and we don't need to go through this again.\n Note! This doesn't mean the cookies are correct!\n \"\"\"\n return False\n\n\nprovider = BeyondHDProvider()\n", "path": "medusa/providers/torrent/html/beyondhd.py" } ]
[ { "content": "# coding=utf-8\n\n\"\"\"Provider code for Beyond-hd.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.helper.common import convert_size\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.torrent.torrent_provider import TorrentProvider\n\nfrom requests.compat import urljoin\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass BeyondHDProvider(TorrentProvider):\n \"\"\"Beyond-hd Torrent provider.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(BeyondHDProvider, self).__init__('Beyond-HD')\n\n self.enable_cookies = True\n self.cookies = ''\n self.required_cookies = ('remember_web_[**long_hash**]',)\n\n self.url = 'https://beyond-hd.me'\n self.urls = {\n 'login': urljoin(self.url, 'login'),\n 'search': urljoin(self.url, 'torrents'),\n }\n\n # Proper Strings\n self.proper_strings = ['PROPER', 'REPACK', 'REAL', 'RERIP']\n\n # Miscellaneous Options\n\n # Cache\n self.cache = tv.Cache(self)\n\n def search(self, search_strings, *args, **kwargs):\n \"\"\"\n Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n if not self.login():\n return results\n\n for mode in search_strings:\n log.debug('Search mode: {0}', mode)\n\n for search_string in search_strings[mode]:\n\n search_params = {\n 'categories[]': 2,\n 'sorting': 'created_at',\n 'qty': '100',\n 'direction': 'desc',\n 'doSearch': 'Search'\n }\n\n if mode != 'RSS':\n log.debug('Search string: {search}',\n {'search': search_string})\n search_params['search'] = search_string\n\n if mode == 'season':\n search_params['pack'] = 1\n\n response = self.session.get(self.urls['search'], params=search_params)\n if not response or not response.text:\n log.debug('No data returned from provider')\n continue\n\n results += self.parse(response.text, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"\n Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n # Units\n units = ['B', 'KIB', 'MIB', 'GIB', 'TIB', 'PIB']\n\n items = []\n\n with BS4Parser(data, 'html5lib') as html:\n if html.find('div', class_='table-torrents'):\n theme = 'modern'\n torrent_table = html.find('div', class_='table-torrents').find('table')\n else:\n theme = 'classic'\n torrent_table = html.find('div', class_='table-responsive').find('table')\n\n torrent_rows = torrent_table('tr') if torrent_table else []\n labels = [label.get_text(strip=True) for label in torrent_rows[0]('th')]\n # For the classic theme, the tr don't match the td.\n if theme == 'classic':\n del labels[3]\n\n # Continue only if one release is found\n if len(torrent_rows) < 2:\n log.debug('Data returned from provider does not contain any torrents')\n return items\n\n for result in torrent_rows[1:]:\n cells = result('td')\n\n try:\n if len(cells) < 3:\n continue\n\n link = cells[1].find('a')\n download_url = urljoin(self.url, cells[2].find('a')['href'])\n title = link.get_text(strip=True)\n if not all([title, download_url]):\n continue\n\n seeders = int(cells[labels.index('S')].find('span').get_text())\n leechers = int(cells[labels.index('L')].find('span').get_text())\n\n # Filter unseeded torrent\n if seeders < self.minseed:\n if mode != 'RSS':\n log.debug(\"Discarding torrent because it doesn't meet the\"\n ' minimum seeders: {0}. Seeders: {1}',\n title, seeders)\n continue\n\n torrent_size = cells[labels.index('Size')].find('span').get_text()\n size = convert_size(torrent_size, units=units) or -1\n\n pubdate_raw = cells[labels.index('Age')].find('span').get_text()\n pubdate = self.parse_pubdate(pubdate_raw, human_time=True)\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'seeders': seeders,\n 'leechers': leechers,\n 'pubdate': pubdate,\n }\n if mode != 'RSS':\n log.debug('Found result: {0} with {1} seeders and {2} leechers',\n title, seeders, leechers)\n\n items.append(item)\n except (AttributeError, TypeError, KeyError, ValueError, IndexError):\n log.exception('Failed parsing provider.')\n\n return items\n\n def login(self):\n \"\"\"Login method used for logging in before doing search and torrent downloads.\"\"\"\n return self.cookie_login('Login now')\n\n def check_required_cookies(self):\n \"\"\"\n Check if we have the required cookies in the requests sessions object.\n\n Meaning that we've already successfully authenticated once, and we don't need to go through this again.\n Note! This doesn't mean the cookies are correct!\n \"\"\"\n return False\n\n\nprovider = BeyondHDProvider()\n", "path": "medusa/providers/torrent/html/beyondhd.py" } ]
diff --git a/medusa/providers/torrent/html/beyondhd.py b/medusa/providers/torrent/html/beyondhd.py index 47be8c1a10..3978d9ef4c 100644 --- a/medusa/providers/torrent/html/beyondhd.py +++ b/medusa/providers/torrent/html/beyondhd.py @@ -121,7 +121,7 @@ def parse(self, data, mode): cells = result('td') try: - if len(cells) < 2: + if len(cells) < 3: continue link = cells[1].find('a')
pyca__cryptography-8318
Incorrect docstrings in x25519 and x448 `.public_key()` methods See: https://github.com/pyca/cryptography/blob/127a2860740c77f45362e68e0ed7d2d108a39033/src/cryptography/hazmat/primitives/asymmetric/x25519.py#L60-L64 https://github.com/pyca/cryptography/blob/127a2860740c77f45362e68e0ed7d2d108a39033/src/cryptography/hazmat/primitives/asymmetric/x448.py#L60-L64 In both instances, the method does not return serialised bytes, but a public key object. The full [generated documentation](https://cryptography.io/en/latest/hazmat/primitives/asymmetric/x25519/#cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.public_key) is correct, as are the Ed* docstrings.
[ { "content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nimport abc\n\nfrom cryptography.exceptions import UnsupportedAlgorithm, _Reasons\nfrom cryptography.hazmat.primitives import _serialization\n\n\nclass X25519PublicKey(metaclass=abc.ABCMeta):\n @classmethod\n def from_public_bytes(cls, data: bytes) -> \"X25519PublicKey\":\n from cryptography.hazmat.backends.openssl.backend import backend\n\n if not backend.x25519_supported():\n raise UnsupportedAlgorithm(\n \"X25519 is not supported by this version of OpenSSL.\",\n _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,\n )\n\n return backend.x25519_load_public_bytes(data)\n\n @abc.abstractmethod\n def public_bytes(\n self,\n encoding: _serialization.Encoding,\n format: _serialization.PublicFormat,\n ) -> bytes:\n \"\"\"\n The serialized bytes of the public key.\n \"\"\"\n\n\nclass X25519PrivateKey(metaclass=abc.ABCMeta):\n @classmethod\n def generate(cls) -> \"X25519PrivateKey\":\n from cryptography.hazmat.backends.openssl.backend import backend\n\n if not backend.x25519_supported():\n raise UnsupportedAlgorithm(\n \"X25519 is not supported by this version of OpenSSL.\",\n _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,\n )\n return backend.x25519_generate_key()\n\n @classmethod\n def from_private_bytes(cls, data: bytes) -> \"X25519PrivateKey\":\n from cryptography.hazmat.backends.openssl.backend import backend\n\n if not backend.x25519_supported():\n raise UnsupportedAlgorithm(\n \"X25519 is not supported by this version of OpenSSL.\",\n _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,\n )\n\n return backend.x25519_load_private_bytes(data)\n\n @abc.abstractmethod\n def public_key(self) -> X25519PublicKey:\n \"\"\"\n The serialized bytes of the public key.\n \"\"\"\n\n @abc.abstractmethod\n def private_bytes(\n self,\n encoding: _serialization.Encoding,\n format: _serialization.PrivateFormat,\n encryption_algorithm: _serialization.KeySerializationEncryption,\n ) -> bytes:\n \"\"\"\n The serialized bytes of the private key.\n \"\"\"\n\n @abc.abstractmethod\n def exchange(self, peer_public_key: X25519PublicKey) -> bytes:\n \"\"\"\n Performs a key exchange operation using the provided peer's public key.\n \"\"\"\n", "path": "src/cryptography/hazmat/primitives/asymmetric/x25519.py" } ]
[ { "content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nimport abc\n\nfrom cryptography.exceptions import UnsupportedAlgorithm, _Reasons\nfrom cryptography.hazmat.primitives import _serialization\n\n\nclass X25519PublicKey(metaclass=abc.ABCMeta):\n @classmethod\n def from_public_bytes(cls, data: bytes) -> \"X25519PublicKey\":\n from cryptography.hazmat.backends.openssl.backend import backend\n\n if not backend.x25519_supported():\n raise UnsupportedAlgorithm(\n \"X25519 is not supported by this version of OpenSSL.\",\n _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,\n )\n\n return backend.x25519_load_public_bytes(data)\n\n @abc.abstractmethod\n def public_bytes(\n self,\n encoding: _serialization.Encoding,\n format: _serialization.PublicFormat,\n ) -> bytes:\n \"\"\"\n The serialized bytes of the public key.\n \"\"\"\n\n\nclass X25519PrivateKey(metaclass=abc.ABCMeta):\n @classmethod\n def generate(cls) -> \"X25519PrivateKey\":\n from cryptography.hazmat.backends.openssl.backend import backend\n\n if not backend.x25519_supported():\n raise UnsupportedAlgorithm(\n \"X25519 is not supported by this version of OpenSSL.\",\n _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,\n )\n return backend.x25519_generate_key()\n\n @classmethod\n def from_private_bytes(cls, data: bytes) -> \"X25519PrivateKey\":\n from cryptography.hazmat.backends.openssl.backend import backend\n\n if not backend.x25519_supported():\n raise UnsupportedAlgorithm(\n \"X25519 is not supported by this version of OpenSSL.\",\n _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,\n )\n\n return backend.x25519_load_private_bytes(data)\n\n @abc.abstractmethod\n def public_key(self) -> X25519PublicKey:\n \"\"\"\n Returns the public key assosciated with this private key\n \"\"\"\n\n @abc.abstractmethod\n def private_bytes(\n self,\n encoding: _serialization.Encoding,\n format: _serialization.PrivateFormat,\n encryption_algorithm: _serialization.KeySerializationEncryption,\n ) -> bytes:\n \"\"\"\n The serialized bytes of the private key.\n \"\"\"\n\n @abc.abstractmethod\n def exchange(self, peer_public_key: X25519PublicKey) -> bytes:\n \"\"\"\n Performs a key exchange operation using the provided peer's public key.\n \"\"\"\n", "path": "src/cryptography/hazmat/primitives/asymmetric/x25519.py" } ]
diff --git a/src/cryptography/hazmat/primitives/asymmetric/x25519.py b/src/cryptography/hazmat/primitives/asymmetric/x25519.py index 690af78c2152..d1347b883f37 100644 --- a/src/cryptography/hazmat/primitives/asymmetric/x25519.py +++ b/src/cryptography/hazmat/primitives/asymmetric/x25519.py @@ -60,7 +60,7 @@ def from_private_bytes(cls, data: bytes) -> "X25519PrivateKey": @abc.abstractmethod def public_key(self) -> X25519PublicKey: """ - The serialized bytes of the public key. + Returns the public key assosciated with this private key """ @abc.abstractmethod
huggingface__text-generation-inference-1617
Incorrectly multiplied timeout by 60 in the asynchronous client ### System Info I'm testing TGI using Docker. Below is the exact command I'm utilizing: ```console docker run --gpus '"device=1,2"' --shm-size 1g -p 8000:80 -v ~/tgi-test:/data ghcr.io/huggingface/text-generation-inference:1.4 --model-id mistralai/Mistral-7B-v0.1 --max-input-length 8000 --max-total-tokens 8001 ``` ### Information - [ ] Docker - [ ] The CLI directly ### Tasks - [ ] An officially supported command - [ ] My own modifications ### Reproduction Given the generation request: ```python async def test(): start = time.time() try: response = await client.generate('1' * 6_000, max_new_tokens=1_800) except Exception as ex: pass print(time.time() - start) ``` And this async client definition: ```python client = AsyncClient('http://localhost:8000', timeout=1) ``` It doesn't timeout after 1 second: ```python >>> await test() 60.88534379005432 ``` But if we create a client with a timeout of 2/60: ```python client = AsyncClient('http://localhost:8000', timeout=(2/60)) ``` It does timeout after 2 seconds: ```python >>> await test() 2.0035104751586914 ``` ### Expected behavior The function should have timed out after 1 second with this client definition: ```python client = AsyncClient('http://localhost:8000', timeout=1) ```
[ { "content": "import json\nimport requests\n\nfrom aiohttp import ClientSession, ClientTimeout\nfrom pydantic import ValidationError\nfrom typing import Dict, Optional, List, AsyncIterator, Iterator, Union\n\nfrom text_generation.types import (\n StreamResponse,\n Response,\n Request,\n Parameters,\n Grammar,\n ChatRequest,\n ChatCompletionChunk,\n ChatComplete,\n Message,\n Tool,\n)\nfrom text_generation.errors import parse_error\n\n\nclass Client:\n \"\"\"Client to make calls to a text-generation-inference instance\n\n Example:\n\n ```python\n >>> from text_generation import Client\n\n >>> client = Client(\"https://api-inference.huggingface.co/models/bigscience/bloomz\")\n >>> client.generate(\"Why is the sky blue?\").generated_text\n ' Rayleigh scattering'\n\n >>> result = \"\"\n >>> for response in client.generate_stream(\"Why is the sky blue?\"):\n >>> if not response.token.special:\n >>> result += response.token.text\n >>> result\n ' Rayleigh scattering'\n ```\n \"\"\"\n\n def __init__(\n self,\n base_url: str,\n headers: Optional[Dict[str, str]] = None,\n cookies: Optional[Dict[str, str]] = None,\n timeout: int = 10,\n ):\n \"\"\"\n Args:\n base_url (`str`):\n text-generation-inference instance base url\n headers (`Optional[Dict[str, str]]`):\n Additional headers\n cookies (`Optional[Dict[str, str]]`):\n Cookies to include in the requests\n timeout (`int`):\n Timeout in seconds\n \"\"\"\n self.base_url = base_url\n self.headers = headers\n self.cookies = cookies\n self.timeout = timeout\n\n def chat(\n self,\n messages: List[Message],\n frequency_penalty: Optional[float] = None,\n logit_bias: Optional[List[float]] = None,\n logprobs: Optional[bool] = None,\n top_logprobs: Optional[int] = None,\n max_tokens: Optional[int] = None,\n n: Optional[int] = None,\n presence_penalty: Optional[float] = None,\n stream: bool = False,\n seed: Optional[int] = None,\n temperature: Optional[float] = None,\n top_p: Optional[float] = None,\n tools: Optional[List[Tool]] = None,\n tool_choice: Optional[str] = None,\n ):\n \"\"\"\n Given a list of messages, generate a response asynchronously\n\n Args:\n messages (`List[Message]`):\n List of messages\n frequency_penalty (`float`):\n The parameter for frequency penalty. 0.0 means no penalty. See [this\n paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.\n logit_bias (`List[float]`):\n Adjust the likelihood of specified tokens\n logprobs (`bool`):\n Include log probabilities in the response\n top_logprobs (`int`):\n Include the `n` most likely tokens at each step\n max_tokens (`int`):\n Maximum number of generated tokens\n n (`int`):\n Generate `n` completions\n presence_penalty (`float`):\n The parameter for presence penalty. 0.0 means no penalty. See [this\n paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.\n stream (`bool`):\n Stream the response\n seed (`int`):\n Random sampling seed\n temperature (`float`):\n The value used to module the logits distribution.\n top_p (`float`):\n If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or\n higher are kept for generation\n tools (`List[Tool]`):\n List of tools to use\n tool_choice (`str`):\n The tool to use\n\n \"\"\"\n request = ChatRequest(\n model=\"tgi\",\n messages=messages,\n frequency_penalty=frequency_penalty,\n logit_bias=logit_bias,\n logprobs=logprobs,\n top_logprobs=top_logprobs,\n max_tokens=max_tokens,\n n=n,\n presence_penalty=presence_penalty,\n stream=stream,\n seed=seed,\n temperature=temperature,\n top_p=top_p,\n tools=tools,\n tool_choice=tool_choice,\n )\n if not stream:\n resp = requests.post(\n f\"{self.base_url}/v1/chat/completions\",\n json=request.dict(),\n headers=self.headers,\n cookies=self.cookies,\n timeout=self.timeout,\n )\n payload = resp.json()\n if resp.status_code != 200:\n raise parse_error(resp.status_code, payload)\n return ChatComplete(**payload)\n else:\n return self._chat_stream_response(request)\n\n def _chat_stream_response(self, request):\n resp = requests.post(\n f\"{self.base_url}/v1/chat/completions\",\n json=request.dict(),\n headers=self.headers,\n cookies=self.cookies,\n timeout=self.timeout,\n stream=True,\n )\n # iterate and print stream\n for byte_payload in resp.iter_lines():\n if byte_payload == b\"\\n\":\n continue\n payload = byte_payload.decode(\"utf-8\")\n if payload.startswith(\"data:\"):\n json_payload = json.loads(payload.lstrip(\"data:\").rstrip(\"\\n\"))\n try:\n response = ChatCompletionChunk(**json_payload)\n yield response\n except ValidationError:\n raise parse_error(resp.status, json_payload)\n\n def generate(\n self,\n prompt: str,\n do_sample: bool = False,\n max_new_tokens: int = 20,\n best_of: Optional[int] = None,\n repetition_penalty: Optional[float] = None,\n return_full_text: bool = False,\n seed: Optional[int] = None,\n stop_sequences: Optional[List[str]] = None,\n temperature: Optional[float] = None,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n truncate: Optional[int] = None,\n typical_p: Optional[float] = None,\n watermark: bool = False,\n decoder_input_details: bool = False,\n top_n_tokens: Optional[int] = None,\n grammar: Optional[Grammar] = None,\n ) -> Response:\n \"\"\"\n Given a prompt, generate the following text\n\n Args:\n prompt (`str`):\n Input text\n do_sample (`bool`):\n Activate logits sampling\n max_new_tokens (`int`):\n Maximum number of generated tokens\n best_of (`int`):\n Generate best_of sequences and return the one if the highest token logprobs\n repetition_penalty (`float`):\n The parameter for repetition penalty. 1.0 means no penalty. See [this\n paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.\n return_full_text (`bool`):\n Whether to prepend the prompt to the generated text\n seed (`int`):\n Random sampling seed\n stop_sequences (`List[str]`):\n Stop generating tokens if a member of `stop_sequences` is generated\n temperature (`float`):\n The value used to module the logits distribution.\n top_k (`int`):\n The number of highest probability vocabulary tokens to keep for top-k-filtering.\n top_p (`float`):\n If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or\n higher are kept for generation.\n truncate (`int`):\n Truncate inputs tokens to the given size\n typical_p (`float`):\n Typical Decoding mass\n See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information\n watermark (`bool`):\n Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)\n decoder_input_details (`bool`):\n Return the decoder input token logprobs and ids\n top_n_tokens (`int`):\n Return the `n` most likely tokens at each step\n\n Returns:\n Response: generated response\n \"\"\"\n # Validate parameters\n parameters = Parameters(\n best_of=best_of,\n details=True,\n do_sample=do_sample,\n max_new_tokens=max_new_tokens,\n repetition_penalty=repetition_penalty,\n return_full_text=return_full_text,\n seed=seed,\n stop=stop_sequences if stop_sequences is not None else [],\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n truncate=truncate,\n typical_p=typical_p,\n watermark=watermark,\n decoder_input_details=decoder_input_details,\n top_n_tokens=top_n_tokens,\n grammar=grammar,\n )\n request = Request(inputs=prompt, stream=False, parameters=parameters)\n\n resp = requests.post(\n self.base_url,\n json=request.dict(),\n headers=self.headers,\n cookies=self.cookies,\n timeout=self.timeout,\n )\n payload = resp.json()\n if resp.status_code != 200:\n raise parse_error(resp.status_code, payload)\n return Response(**payload[0])\n\n def generate_stream(\n self,\n prompt: str,\n do_sample: bool = False,\n max_new_tokens: int = 20,\n repetition_penalty: Optional[float] = None,\n return_full_text: bool = False,\n seed: Optional[int] = None,\n stop_sequences: Optional[List[str]] = None,\n temperature: Optional[float] = None,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n truncate: Optional[int] = None,\n typical_p: Optional[float] = None,\n watermark: bool = False,\n top_n_tokens: Optional[int] = None,\n grammar: Optional[Grammar] = None,\n ) -> Iterator[StreamResponse]:\n \"\"\"\n Given a prompt, generate the following stream of tokens\n\n Args:\n prompt (`str`):\n Input text\n do_sample (`bool`):\n Activate logits sampling\n max_new_tokens (`int`):\n Maximum number of generated tokens\n repetition_penalty (`float`):\n The parameter for repetition penalty. 1.0 means no penalty. See [this\n paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.\n return_full_text (`bool`):\n Whether to prepend the prompt to the generated text\n seed (`int`):\n Random sampling seed\n stop_sequences (`List[str]`):\n Stop generating tokens if a member of `stop_sequences` is generated\n temperature (`float`):\n The value used to module the logits distribution.\n top_k (`int`):\n The number of highest probability vocabulary tokens to keep for top-k-filtering.\n top_p (`float`):\n If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or\n higher are kept for generation.\n truncate (`int`):\n Truncate inputs tokens to the given size\n typical_p (`float`):\n Typical Decoding mass\n See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information\n watermark (`bool`):\n Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)\n top_n_tokens (`int`):\n Return the `n` most likely tokens at each step\n\n Returns:\n Iterator[StreamResponse]: stream of generated tokens\n \"\"\"\n # Validate parameters\n parameters = Parameters(\n best_of=None,\n details=True,\n decoder_input_details=False,\n do_sample=do_sample,\n max_new_tokens=max_new_tokens,\n repetition_penalty=repetition_penalty,\n return_full_text=return_full_text,\n seed=seed,\n stop=stop_sequences if stop_sequences is not None else [],\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n truncate=truncate,\n typical_p=typical_p,\n watermark=watermark,\n top_n_tokens=top_n_tokens,\n grammar=grammar,\n )\n request = Request(inputs=prompt, stream=True, parameters=parameters)\n\n resp = requests.post(\n self.base_url,\n json=request.dict(),\n headers=self.headers,\n cookies=self.cookies,\n timeout=self.timeout,\n stream=True,\n )\n\n if resp.status_code != 200:\n raise parse_error(resp.status_code, resp.json())\n\n # Parse ServerSentEvents\n for byte_payload in resp.iter_lines():\n # Skip line\n if byte_payload == b\"\\n\":\n continue\n\n payload = byte_payload.decode(\"utf-8\")\n\n # Event data\n if payload.startswith(\"data:\"):\n # Decode payload\n json_payload = json.loads(payload.lstrip(\"data:\").rstrip(\"/n\"))\n # Parse payload\n try:\n response = StreamResponse(**json_payload)\n except ValidationError:\n # If we failed to parse the payload, then it is an error payload\n raise parse_error(resp.status_code, json_payload)\n yield response\n\n\nclass AsyncClient:\n \"\"\"Asynchronous Client to make calls to a text-generation-inference instance\n\n Example:\n\n ```python\n >>> from text_generation import AsyncClient\n\n >>> client = AsyncClient(\"https://api-inference.huggingface.co/models/bigscience/bloomz\")\n >>> response = await client.generate(\"Why is the sky blue?\")\n >>> response.generated_text\n ' Rayleigh scattering'\n\n >>> result = \"\"\n >>> async for response in client.generate_stream(\"Why is the sky blue?\"):\n >>> if not response.token.special:\n >>> result += response.token.text\n >>> result\n ' Rayleigh scattering'\n ```\n \"\"\"\n\n def __init__(\n self,\n base_url: str,\n headers: Optional[Dict[str, str]] = None,\n cookies: Optional[Dict[str, str]] = None,\n timeout: int = 10,\n ):\n \"\"\"\n Args:\n base_url (`str`):\n text-generation-inference instance base url\n headers (`Optional[Dict[str, str]]`):\n Additional headers\n cookies (`Optional[Dict[str, str]]`):\n Cookies to include in the requests\n timeout (`int`):\n Timeout in seconds\n \"\"\"\n self.base_url = base_url\n self.headers = headers\n self.cookies = cookies\n self.timeout = ClientTimeout(timeout * 60)\n\n async def chat(\n self,\n messages: List[Message],\n frequency_penalty: Optional[float] = None,\n logit_bias: Optional[List[float]] = None,\n logprobs: Optional[bool] = None,\n top_logprobs: Optional[int] = None,\n max_tokens: Optional[int] = None,\n n: Optional[int] = None,\n presence_penalty: Optional[float] = None,\n stream: bool = False,\n seed: Optional[int] = None,\n temperature: Optional[float] = None,\n top_p: Optional[float] = None,\n tools: Optional[List[Tool]] = None,\n tool_choice: Optional[str] = None,\n ) -> Union[ChatComplete, AsyncIterator[ChatCompletionChunk]]:\n \"\"\"\n Given a list of messages, generate a response asynchronously\n\n Args:\n messages (`List[Message]`):\n List of messages\n frequency_penalty (`float`):\n The parameter for frequency penalty. 0.0 means no penalty. See [this\n paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.\n logit_bias (`List[float]`):\n Adjust the likelihood of specified tokens\n logprobs (`bool`):\n Include log probabilities in the response\n top_logprobs (`int`):\n Include the `n` most likely tokens at each step\n max_tokens (`int`):\n Maximum number of generated tokens\n n (`int`):\n Generate `n` completions\n presence_penalty (`float`):\n The parameter for presence penalty. 0.0 means no penalty. See [this\n paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.\n stream (`bool`):\n Stream the response\n seed (`int`):\n Random sampling seed\n temperature (`float`):\n The value used to module the logits distribution.\n top_p (`float`):\n If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or\n higher are kept for generation\n tools (`List[Tool]`):\n List of tools to use\n tool_choice (`str`):\n The tool to use\n\n \"\"\"\n request = ChatRequest(\n model=\"tgi\",\n messages=messages,\n frequency_penalty=frequency_penalty,\n logit_bias=logit_bias,\n logprobs=logprobs,\n top_logprobs=top_logprobs,\n max_tokens=max_tokens,\n n=n,\n presence_penalty=presence_penalty,\n stream=stream,\n seed=seed,\n temperature=temperature,\n top_p=top_p,\n tools=tools,\n tool_choice=tool_choice,\n )\n if not stream:\n return await self._chat_single_response(request)\n else:\n return self._chat_stream_response(request)\n\n async def _chat_single_response(self, request):\n async with ClientSession(\n headers=self.headers, cookies=self.cookies, timeout=self.timeout\n ) as session:\n async with session.post(\n f\"{self.base_url}/v1/chat/completions\", json=request.dict()\n ) as resp:\n payload = await resp.json()\n if resp.status != 200:\n raise parse_error(resp.status, payload)\n return ChatComplete(**payload)\n\n async def _chat_stream_response(self, request):\n async with ClientSession(\n headers=self.headers, cookies=self.cookies, timeout=self.timeout\n ) as session:\n async with session.post(\n f\"{self.base_url}/v1/chat/completions\", json=request.dict()\n ) as resp:\n async for byte_payload in resp.content:\n if byte_payload == b\"\\n\":\n continue\n payload = byte_payload.decode(\"utf-8\")\n if payload.startswith(\"data:\"):\n json_payload = json.loads(payload.lstrip(\"data:\").rstrip(\"\\n\"))\n try:\n response = ChatCompletionChunk(**json_payload)\n yield response\n except ValidationError:\n raise parse_error(resp.status, json_payload)\n\n async def generate(\n self,\n prompt: str,\n do_sample: bool = False,\n max_new_tokens: int = 20,\n best_of: Optional[int] = None,\n repetition_penalty: Optional[float] = None,\n return_full_text: bool = False,\n seed: Optional[int] = None,\n stop_sequences: Optional[List[str]] = None,\n temperature: Optional[float] = None,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n truncate: Optional[int] = None,\n typical_p: Optional[float] = None,\n watermark: bool = False,\n decoder_input_details: bool = False,\n top_n_tokens: Optional[int] = None,\n grammar: Optional[Grammar] = None,\n ) -> Response:\n \"\"\"\n Given a prompt, generate the following text asynchronously\n\n Args:\n prompt (`str`):\n Input text\n do_sample (`bool`):\n Activate logits sampling\n max_new_tokens (`int`):\n Maximum number of generated tokens\n best_of (`int`):\n Generate best_of sequences and return the one if the highest token logprobs\n repetition_penalty (`float`):\n The parameter for repetition penalty. 1.0 means no penalty. See [this\n paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.\n return_full_text (`bool`):\n Whether to prepend the prompt to the generated text\n seed (`int`):\n Random sampling seed\n stop_sequences (`List[str]`):\n Stop generating tokens if a member of `stop_sequences` is generated\n temperature (`float`):\n The value used to module the logits distribution.\n top_k (`int`):\n The number of highest probability vocabulary tokens to keep for top-k-filtering.\n top_p (`float`):\n If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or\n higher are kept for generation.\n truncate (`int`):\n Truncate inputs tokens to the given size\n typical_p (`float`):\n Typical Decoding mass\n See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information\n watermark (`bool`):\n Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)\n decoder_input_details (`bool`):\n Return the decoder input token logprobs and ids\n top_n_tokens (`int`):\n Return the `n` most likely tokens at each step\n\n Returns:\n Response: generated response\n \"\"\"\n\n # Validate parameters\n parameters = Parameters(\n best_of=best_of,\n details=True,\n decoder_input_details=decoder_input_details,\n do_sample=do_sample,\n max_new_tokens=max_new_tokens,\n repetition_penalty=repetition_penalty,\n return_full_text=return_full_text,\n seed=seed,\n stop=stop_sequences if stop_sequences is not None else [],\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n truncate=truncate,\n typical_p=typical_p,\n watermark=watermark,\n top_n_tokens=top_n_tokens,\n grammar=grammar,\n )\n request = Request(inputs=prompt, stream=False, parameters=parameters)\n\n async with ClientSession(\n headers=self.headers, cookies=self.cookies, timeout=self.timeout\n ) as session:\n async with session.post(self.base_url, json=request.dict()) as resp:\n payload = await resp.json()\n\n if resp.status != 200:\n raise parse_error(resp.status, payload)\n return Response(**payload[0])\n\n async def generate_stream(\n self,\n prompt: str,\n do_sample: bool = False,\n max_new_tokens: int = 20,\n repetition_penalty: Optional[float] = None,\n return_full_text: bool = False,\n seed: Optional[int] = None,\n stop_sequences: Optional[List[str]] = None,\n temperature: Optional[float] = None,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n truncate: Optional[int] = None,\n typical_p: Optional[float] = None,\n watermark: bool = False,\n top_n_tokens: Optional[int] = None,\n grammar: Optional[Grammar] = None,\n ) -> AsyncIterator[StreamResponse]:\n \"\"\"\n Given a prompt, generate the following stream of tokens asynchronously\n\n Args:\n prompt (`str`):\n Input text\n do_sample (`bool`):\n Activate logits sampling\n max_new_tokens (`int`):\n Maximum number of generated tokens\n repetition_penalty (`float`):\n The parameter for repetition penalty. 1.0 means no penalty. See [this\n paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.\n return_full_text (`bool`):\n Whether to prepend the prompt to the generated text\n seed (`int`):\n Random sampling seed\n stop_sequences (`List[str]`):\n Stop generating tokens if a member of `stop_sequences` is generated\n temperature (`float`):\n The value used to module the logits distribution.\n top_k (`int`):\n The number of highest probability vocabulary tokens to keep for top-k-filtering.\n top_p (`float`):\n If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or\n higher are kept for generation.\n truncate (`int`):\n Truncate inputs tokens to the given size\n typical_p (`float`):\n Typical Decoding mass\n See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information\n watermark (`bool`):\n Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)\n top_n_tokens (`int`):\n Return the `n` most likely tokens at each step\n\n Returns:\n AsyncIterator[StreamResponse]: stream of generated tokens\n \"\"\"\n # Validate parameters\n parameters = Parameters(\n best_of=None,\n details=True,\n decoder_input_details=False,\n do_sample=do_sample,\n max_new_tokens=max_new_tokens,\n repetition_penalty=repetition_penalty,\n return_full_text=return_full_text,\n seed=seed,\n stop=stop_sequences if stop_sequences is not None else [],\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n truncate=truncate,\n typical_p=typical_p,\n watermark=watermark,\n top_n_tokens=top_n_tokens,\n grammar=grammar,\n )\n request = Request(inputs=prompt, stream=True, parameters=parameters)\n\n async with ClientSession(\n headers=self.headers, cookies=self.cookies, timeout=self.timeout\n ) as session:\n async with session.post(self.base_url, json=request.dict()) as resp:\n if resp.status != 200:\n raise parse_error(resp.status, await resp.json())\n\n # Parse ServerSentEvents\n async for byte_payload in resp.content:\n # Skip line\n if byte_payload == b\"\\n\":\n continue\n\n payload = byte_payload.decode(\"utf-8\")\n\n # Event data\n if payload.startswith(\"data:\"):\n # Decode payload\n json_payload = json.loads(payload.lstrip(\"data:\").rstrip(\"/n\"))\n # Parse payload\n try:\n response = StreamResponse(**json_payload)\n except ValidationError:\n # If we failed to parse the payload, then it is an error payload\n raise parse_error(resp.status, json_payload)\n yield response\n", "path": "clients/python/text_generation/client.py" } ]
[ { "content": "import json\nimport requests\n\nfrom aiohttp import ClientSession, ClientTimeout\nfrom pydantic import ValidationError\nfrom typing import Dict, Optional, List, AsyncIterator, Iterator, Union\n\nfrom text_generation.types import (\n StreamResponse,\n Response,\n Request,\n Parameters,\n Grammar,\n ChatRequest,\n ChatCompletionChunk,\n ChatComplete,\n Message,\n Tool,\n)\nfrom text_generation.errors import parse_error\n\n\nclass Client:\n \"\"\"Client to make calls to a text-generation-inference instance\n\n Example:\n\n ```python\n >>> from text_generation import Client\n\n >>> client = Client(\"https://api-inference.huggingface.co/models/bigscience/bloomz\")\n >>> client.generate(\"Why is the sky blue?\").generated_text\n ' Rayleigh scattering'\n\n >>> result = \"\"\n >>> for response in client.generate_stream(\"Why is the sky blue?\"):\n >>> if not response.token.special:\n >>> result += response.token.text\n >>> result\n ' Rayleigh scattering'\n ```\n \"\"\"\n\n def __init__(\n self,\n base_url: str,\n headers: Optional[Dict[str, str]] = None,\n cookies: Optional[Dict[str, str]] = None,\n timeout: int = 10,\n ):\n \"\"\"\n Args:\n base_url (`str`):\n text-generation-inference instance base url\n headers (`Optional[Dict[str, str]]`):\n Additional headers\n cookies (`Optional[Dict[str, str]]`):\n Cookies to include in the requests\n timeout (`int`):\n Timeout in seconds\n \"\"\"\n self.base_url = base_url\n self.headers = headers\n self.cookies = cookies\n self.timeout = timeout\n\n def chat(\n self,\n messages: List[Message],\n frequency_penalty: Optional[float] = None,\n logit_bias: Optional[List[float]] = None,\n logprobs: Optional[bool] = None,\n top_logprobs: Optional[int] = None,\n max_tokens: Optional[int] = None,\n n: Optional[int] = None,\n presence_penalty: Optional[float] = None,\n stream: bool = False,\n seed: Optional[int] = None,\n temperature: Optional[float] = None,\n top_p: Optional[float] = None,\n tools: Optional[List[Tool]] = None,\n tool_choice: Optional[str] = None,\n ):\n \"\"\"\n Given a list of messages, generate a response asynchronously\n\n Args:\n messages (`List[Message]`):\n List of messages\n frequency_penalty (`float`):\n The parameter for frequency penalty. 0.0 means no penalty. See [this\n paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.\n logit_bias (`List[float]`):\n Adjust the likelihood of specified tokens\n logprobs (`bool`):\n Include log probabilities in the response\n top_logprobs (`int`):\n Include the `n` most likely tokens at each step\n max_tokens (`int`):\n Maximum number of generated tokens\n n (`int`):\n Generate `n` completions\n presence_penalty (`float`):\n The parameter for presence penalty. 0.0 means no penalty. See [this\n paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.\n stream (`bool`):\n Stream the response\n seed (`int`):\n Random sampling seed\n temperature (`float`):\n The value used to module the logits distribution.\n top_p (`float`):\n If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or\n higher are kept for generation\n tools (`List[Tool]`):\n List of tools to use\n tool_choice (`str`):\n The tool to use\n\n \"\"\"\n request = ChatRequest(\n model=\"tgi\",\n messages=messages,\n frequency_penalty=frequency_penalty,\n logit_bias=logit_bias,\n logprobs=logprobs,\n top_logprobs=top_logprobs,\n max_tokens=max_tokens,\n n=n,\n presence_penalty=presence_penalty,\n stream=stream,\n seed=seed,\n temperature=temperature,\n top_p=top_p,\n tools=tools,\n tool_choice=tool_choice,\n )\n if not stream:\n resp = requests.post(\n f\"{self.base_url}/v1/chat/completions\",\n json=request.dict(),\n headers=self.headers,\n cookies=self.cookies,\n timeout=self.timeout,\n )\n payload = resp.json()\n if resp.status_code != 200:\n raise parse_error(resp.status_code, payload)\n return ChatComplete(**payload)\n else:\n return self._chat_stream_response(request)\n\n def _chat_stream_response(self, request):\n resp = requests.post(\n f\"{self.base_url}/v1/chat/completions\",\n json=request.dict(),\n headers=self.headers,\n cookies=self.cookies,\n timeout=self.timeout,\n stream=True,\n )\n # iterate and print stream\n for byte_payload in resp.iter_lines():\n if byte_payload == b\"\\n\":\n continue\n payload = byte_payload.decode(\"utf-8\")\n if payload.startswith(\"data:\"):\n json_payload = json.loads(payload.lstrip(\"data:\").rstrip(\"\\n\"))\n try:\n response = ChatCompletionChunk(**json_payload)\n yield response\n except ValidationError:\n raise parse_error(resp.status, json_payload)\n\n def generate(\n self,\n prompt: str,\n do_sample: bool = False,\n max_new_tokens: int = 20,\n best_of: Optional[int] = None,\n repetition_penalty: Optional[float] = None,\n return_full_text: bool = False,\n seed: Optional[int] = None,\n stop_sequences: Optional[List[str]] = None,\n temperature: Optional[float] = None,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n truncate: Optional[int] = None,\n typical_p: Optional[float] = None,\n watermark: bool = False,\n decoder_input_details: bool = False,\n top_n_tokens: Optional[int] = None,\n grammar: Optional[Grammar] = None,\n ) -> Response:\n \"\"\"\n Given a prompt, generate the following text\n\n Args:\n prompt (`str`):\n Input text\n do_sample (`bool`):\n Activate logits sampling\n max_new_tokens (`int`):\n Maximum number of generated tokens\n best_of (`int`):\n Generate best_of sequences and return the one if the highest token logprobs\n repetition_penalty (`float`):\n The parameter for repetition penalty. 1.0 means no penalty. See [this\n paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.\n return_full_text (`bool`):\n Whether to prepend the prompt to the generated text\n seed (`int`):\n Random sampling seed\n stop_sequences (`List[str]`):\n Stop generating tokens if a member of `stop_sequences` is generated\n temperature (`float`):\n The value used to module the logits distribution.\n top_k (`int`):\n The number of highest probability vocabulary tokens to keep for top-k-filtering.\n top_p (`float`):\n If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or\n higher are kept for generation.\n truncate (`int`):\n Truncate inputs tokens to the given size\n typical_p (`float`):\n Typical Decoding mass\n See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information\n watermark (`bool`):\n Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)\n decoder_input_details (`bool`):\n Return the decoder input token logprobs and ids\n top_n_tokens (`int`):\n Return the `n` most likely tokens at each step\n\n Returns:\n Response: generated response\n \"\"\"\n # Validate parameters\n parameters = Parameters(\n best_of=best_of,\n details=True,\n do_sample=do_sample,\n max_new_tokens=max_new_tokens,\n repetition_penalty=repetition_penalty,\n return_full_text=return_full_text,\n seed=seed,\n stop=stop_sequences if stop_sequences is not None else [],\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n truncate=truncate,\n typical_p=typical_p,\n watermark=watermark,\n decoder_input_details=decoder_input_details,\n top_n_tokens=top_n_tokens,\n grammar=grammar,\n )\n request = Request(inputs=prompt, stream=False, parameters=parameters)\n\n resp = requests.post(\n self.base_url,\n json=request.dict(),\n headers=self.headers,\n cookies=self.cookies,\n timeout=self.timeout,\n )\n payload = resp.json()\n if resp.status_code != 200:\n raise parse_error(resp.status_code, payload)\n return Response(**payload[0])\n\n def generate_stream(\n self,\n prompt: str,\n do_sample: bool = False,\n max_new_tokens: int = 20,\n repetition_penalty: Optional[float] = None,\n return_full_text: bool = False,\n seed: Optional[int] = None,\n stop_sequences: Optional[List[str]] = None,\n temperature: Optional[float] = None,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n truncate: Optional[int] = None,\n typical_p: Optional[float] = None,\n watermark: bool = False,\n top_n_tokens: Optional[int] = None,\n grammar: Optional[Grammar] = None,\n ) -> Iterator[StreamResponse]:\n \"\"\"\n Given a prompt, generate the following stream of tokens\n\n Args:\n prompt (`str`):\n Input text\n do_sample (`bool`):\n Activate logits sampling\n max_new_tokens (`int`):\n Maximum number of generated tokens\n repetition_penalty (`float`):\n The parameter for repetition penalty. 1.0 means no penalty. See [this\n paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.\n return_full_text (`bool`):\n Whether to prepend the prompt to the generated text\n seed (`int`):\n Random sampling seed\n stop_sequences (`List[str]`):\n Stop generating tokens if a member of `stop_sequences` is generated\n temperature (`float`):\n The value used to module the logits distribution.\n top_k (`int`):\n The number of highest probability vocabulary tokens to keep for top-k-filtering.\n top_p (`float`):\n If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or\n higher are kept for generation.\n truncate (`int`):\n Truncate inputs tokens to the given size\n typical_p (`float`):\n Typical Decoding mass\n See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information\n watermark (`bool`):\n Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)\n top_n_tokens (`int`):\n Return the `n` most likely tokens at each step\n\n Returns:\n Iterator[StreamResponse]: stream of generated tokens\n \"\"\"\n # Validate parameters\n parameters = Parameters(\n best_of=None,\n details=True,\n decoder_input_details=False,\n do_sample=do_sample,\n max_new_tokens=max_new_tokens,\n repetition_penalty=repetition_penalty,\n return_full_text=return_full_text,\n seed=seed,\n stop=stop_sequences if stop_sequences is not None else [],\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n truncate=truncate,\n typical_p=typical_p,\n watermark=watermark,\n top_n_tokens=top_n_tokens,\n grammar=grammar,\n )\n request = Request(inputs=prompt, stream=True, parameters=parameters)\n\n resp = requests.post(\n self.base_url,\n json=request.dict(),\n headers=self.headers,\n cookies=self.cookies,\n timeout=self.timeout,\n stream=True,\n )\n\n if resp.status_code != 200:\n raise parse_error(resp.status_code, resp.json())\n\n # Parse ServerSentEvents\n for byte_payload in resp.iter_lines():\n # Skip line\n if byte_payload == b\"\\n\":\n continue\n\n payload = byte_payload.decode(\"utf-8\")\n\n # Event data\n if payload.startswith(\"data:\"):\n # Decode payload\n json_payload = json.loads(payload.lstrip(\"data:\").rstrip(\"/n\"))\n # Parse payload\n try:\n response = StreamResponse(**json_payload)\n except ValidationError:\n # If we failed to parse the payload, then it is an error payload\n raise parse_error(resp.status_code, json_payload)\n yield response\n\n\nclass AsyncClient:\n \"\"\"Asynchronous Client to make calls to a text-generation-inference instance\n\n Example:\n\n ```python\n >>> from text_generation import AsyncClient\n\n >>> client = AsyncClient(\"https://api-inference.huggingface.co/models/bigscience/bloomz\")\n >>> response = await client.generate(\"Why is the sky blue?\")\n >>> response.generated_text\n ' Rayleigh scattering'\n\n >>> result = \"\"\n >>> async for response in client.generate_stream(\"Why is the sky blue?\"):\n >>> if not response.token.special:\n >>> result += response.token.text\n >>> result\n ' Rayleigh scattering'\n ```\n \"\"\"\n\n def __init__(\n self,\n base_url: str,\n headers: Optional[Dict[str, str]] = None,\n cookies: Optional[Dict[str, str]] = None,\n timeout: int = 10,\n ):\n \"\"\"\n Args:\n base_url (`str`):\n text-generation-inference instance base url\n headers (`Optional[Dict[str, str]]`):\n Additional headers\n cookies (`Optional[Dict[str, str]]`):\n Cookies to include in the requests\n timeout (`int`):\n Timeout in seconds\n \"\"\"\n self.base_url = base_url\n self.headers = headers\n self.cookies = cookies\n self.timeout = ClientTimeout(timeout)\n\n async def chat(\n self,\n messages: List[Message],\n frequency_penalty: Optional[float] = None,\n logit_bias: Optional[List[float]] = None,\n logprobs: Optional[bool] = None,\n top_logprobs: Optional[int] = None,\n max_tokens: Optional[int] = None,\n n: Optional[int] = None,\n presence_penalty: Optional[float] = None,\n stream: bool = False,\n seed: Optional[int] = None,\n temperature: Optional[float] = None,\n top_p: Optional[float] = None,\n tools: Optional[List[Tool]] = None,\n tool_choice: Optional[str] = None,\n ) -> Union[ChatComplete, AsyncIterator[ChatCompletionChunk]]:\n \"\"\"\n Given a list of messages, generate a response asynchronously\n\n Args:\n messages (`List[Message]`):\n List of messages\n frequency_penalty (`float`):\n The parameter for frequency penalty. 0.0 means no penalty. See [this\n paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.\n logit_bias (`List[float]`):\n Adjust the likelihood of specified tokens\n logprobs (`bool`):\n Include log probabilities in the response\n top_logprobs (`int`):\n Include the `n` most likely tokens at each step\n max_tokens (`int`):\n Maximum number of generated tokens\n n (`int`):\n Generate `n` completions\n presence_penalty (`float`):\n The parameter for presence penalty. 0.0 means no penalty. See [this\n paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.\n stream (`bool`):\n Stream the response\n seed (`int`):\n Random sampling seed\n temperature (`float`):\n The value used to module the logits distribution.\n top_p (`float`):\n If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or\n higher are kept for generation\n tools (`List[Tool]`):\n List of tools to use\n tool_choice (`str`):\n The tool to use\n\n \"\"\"\n request = ChatRequest(\n model=\"tgi\",\n messages=messages,\n frequency_penalty=frequency_penalty,\n logit_bias=logit_bias,\n logprobs=logprobs,\n top_logprobs=top_logprobs,\n max_tokens=max_tokens,\n n=n,\n presence_penalty=presence_penalty,\n stream=stream,\n seed=seed,\n temperature=temperature,\n top_p=top_p,\n tools=tools,\n tool_choice=tool_choice,\n )\n if not stream:\n return await self._chat_single_response(request)\n else:\n return self._chat_stream_response(request)\n\n async def _chat_single_response(self, request):\n async with ClientSession(\n headers=self.headers, cookies=self.cookies, timeout=self.timeout\n ) as session:\n async with session.post(\n f\"{self.base_url}/v1/chat/completions\", json=request.dict()\n ) as resp:\n payload = await resp.json()\n if resp.status != 200:\n raise parse_error(resp.status, payload)\n return ChatComplete(**payload)\n\n async def _chat_stream_response(self, request):\n async with ClientSession(\n headers=self.headers, cookies=self.cookies, timeout=self.timeout\n ) as session:\n async with session.post(\n f\"{self.base_url}/v1/chat/completions\", json=request.dict()\n ) as resp:\n async for byte_payload in resp.content:\n if byte_payload == b\"\\n\":\n continue\n payload = byte_payload.decode(\"utf-8\")\n if payload.startswith(\"data:\"):\n json_payload = json.loads(payload.lstrip(\"data:\").rstrip(\"\\n\"))\n try:\n response = ChatCompletionChunk(**json_payload)\n yield response\n except ValidationError:\n raise parse_error(resp.status, json_payload)\n\n async def generate(\n self,\n prompt: str,\n do_sample: bool = False,\n max_new_tokens: int = 20,\n best_of: Optional[int] = None,\n repetition_penalty: Optional[float] = None,\n return_full_text: bool = False,\n seed: Optional[int] = None,\n stop_sequences: Optional[List[str]] = None,\n temperature: Optional[float] = None,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n truncate: Optional[int] = None,\n typical_p: Optional[float] = None,\n watermark: bool = False,\n decoder_input_details: bool = False,\n top_n_tokens: Optional[int] = None,\n grammar: Optional[Grammar] = None,\n ) -> Response:\n \"\"\"\n Given a prompt, generate the following text asynchronously\n\n Args:\n prompt (`str`):\n Input text\n do_sample (`bool`):\n Activate logits sampling\n max_new_tokens (`int`):\n Maximum number of generated tokens\n best_of (`int`):\n Generate best_of sequences and return the one if the highest token logprobs\n repetition_penalty (`float`):\n The parameter for repetition penalty. 1.0 means no penalty. See [this\n paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.\n return_full_text (`bool`):\n Whether to prepend the prompt to the generated text\n seed (`int`):\n Random sampling seed\n stop_sequences (`List[str]`):\n Stop generating tokens if a member of `stop_sequences` is generated\n temperature (`float`):\n The value used to module the logits distribution.\n top_k (`int`):\n The number of highest probability vocabulary tokens to keep for top-k-filtering.\n top_p (`float`):\n If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or\n higher are kept for generation.\n truncate (`int`):\n Truncate inputs tokens to the given size\n typical_p (`float`):\n Typical Decoding mass\n See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information\n watermark (`bool`):\n Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)\n decoder_input_details (`bool`):\n Return the decoder input token logprobs and ids\n top_n_tokens (`int`):\n Return the `n` most likely tokens at each step\n\n Returns:\n Response: generated response\n \"\"\"\n\n # Validate parameters\n parameters = Parameters(\n best_of=best_of,\n details=True,\n decoder_input_details=decoder_input_details,\n do_sample=do_sample,\n max_new_tokens=max_new_tokens,\n repetition_penalty=repetition_penalty,\n return_full_text=return_full_text,\n seed=seed,\n stop=stop_sequences if stop_sequences is not None else [],\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n truncate=truncate,\n typical_p=typical_p,\n watermark=watermark,\n top_n_tokens=top_n_tokens,\n grammar=grammar,\n )\n request = Request(inputs=prompt, stream=False, parameters=parameters)\n\n async with ClientSession(\n headers=self.headers, cookies=self.cookies, timeout=self.timeout\n ) as session:\n async with session.post(self.base_url, json=request.dict()) as resp:\n payload = await resp.json()\n\n if resp.status != 200:\n raise parse_error(resp.status, payload)\n return Response(**payload[0])\n\n async def generate_stream(\n self,\n prompt: str,\n do_sample: bool = False,\n max_new_tokens: int = 20,\n repetition_penalty: Optional[float] = None,\n return_full_text: bool = False,\n seed: Optional[int] = None,\n stop_sequences: Optional[List[str]] = None,\n temperature: Optional[float] = None,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n truncate: Optional[int] = None,\n typical_p: Optional[float] = None,\n watermark: bool = False,\n top_n_tokens: Optional[int] = None,\n grammar: Optional[Grammar] = None,\n ) -> AsyncIterator[StreamResponse]:\n \"\"\"\n Given a prompt, generate the following stream of tokens asynchronously\n\n Args:\n prompt (`str`):\n Input text\n do_sample (`bool`):\n Activate logits sampling\n max_new_tokens (`int`):\n Maximum number of generated tokens\n repetition_penalty (`float`):\n The parameter for repetition penalty. 1.0 means no penalty. See [this\n paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.\n return_full_text (`bool`):\n Whether to prepend the prompt to the generated text\n seed (`int`):\n Random sampling seed\n stop_sequences (`List[str]`):\n Stop generating tokens if a member of `stop_sequences` is generated\n temperature (`float`):\n The value used to module the logits distribution.\n top_k (`int`):\n The number of highest probability vocabulary tokens to keep for top-k-filtering.\n top_p (`float`):\n If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or\n higher are kept for generation.\n truncate (`int`):\n Truncate inputs tokens to the given size\n typical_p (`float`):\n Typical Decoding mass\n See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information\n watermark (`bool`):\n Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)\n top_n_tokens (`int`):\n Return the `n` most likely tokens at each step\n\n Returns:\n AsyncIterator[StreamResponse]: stream of generated tokens\n \"\"\"\n # Validate parameters\n parameters = Parameters(\n best_of=None,\n details=True,\n decoder_input_details=False,\n do_sample=do_sample,\n max_new_tokens=max_new_tokens,\n repetition_penalty=repetition_penalty,\n return_full_text=return_full_text,\n seed=seed,\n stop=stop_sequences if stop_sequences is not None else [],\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n truncate=truncate,\n typical_p=typical_p,\n watermark=watermark,\n top_n_tokens=top_n_tokens,\n grammar=grammar,\n )\n request = Request(inputs=prompt, stream=True, parameters=parameters)\n\n async with ClientSession(\n headers=self.headers, cookies=self.cookies, timeout=self.timeout\n ) as session:\n async with session.post(self.base_url, json=request.dict()) as resp:\n if resp.status != 200:\n raise parse_error(resp.status, await resp.json())\n\n # Parse ServerSentEvents\n async for byte_payload in resp.content:\n # Skip line\n if byte_payload == b\"\\n\":\n continue\n\n payload = byte_payload.decode(\"utf-8\")\n\n # Event data\n if payload.startswith(\"data:\"):\n # Decode payload\n json_payload = json.loads(payload.lstrip(\"data:\").rstrip(\"/n\"))\n # Parse payload\n try:\n response = StreamResponse(**json_payload)\n except ValidationError:\n # If we failed to parse the payload, then it is an error payload\n raise parse_error(resp.status, json_payload)\n yield response\n", "path": "clients/python/text_generation/client.py" } ]
diff --git a/clients/python/text_generation/client.py b/clients/python/text_generation/client.py index 09660de3cd9..e1de253b780 100644 --- a/clients/python/text_generation/client.py +++ b/clients/python/text_generation/client.py @@ -424,7 +424,7 @@ def __init__( self.base_url = base_url self.headers = headers self.cookies = cookies - self.timeout = ClientTimeout(timeout * 60) + self.timeout = ClientTimeout(timeout) async def chat( self,
mkdocs__mkdocs-1998
Fix simple typo: seperate -> separate # Issue Type [x] Bug (Typo) # Steps to Replicate 1. Examine mkdocs/commands/serve.py. 2. Search for `seperate`. # Expected Behaviour 1. Should read `separate`.
[ { "content": "import logging\nimport shutil\nimport tempfile\nimport sys\n\nfrom os.path import isfile, join\nfrom mkdocs.commands.build import build\nfrom mkdocs.config import load_config\n\nlog = logging.getLogger(__name__)\n\n\ndef _init_asyncio_patch():\n \"\"\"\n Select compatible event loop for Tornado 5+.\n\n As of Python 3.8, the default event loop on Windows is `proactor`,\n however Tornado requires the old default \"selector\" event loop.\n As Tornado has decided to leave this to users to set, MkDocs needs\n to set it. See https://github.com/tornadoweb/tornado/issues/2608.\n \"\"\"\n if sys.platform.startswith(\"win\") and sys.version_info >= (3, 8):\n import asyncio\n try:\n from asyncio import WindowsSelectorEventLoopPolicy\n except ImportError:\n pass # Can't assign a policy which doesn't exist.\n else:\n if not isinstance(asyncio.get_event_loop_policy(), WindowsSelectorEventLoopPolicy):\n asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())\n\n\ndef _get_handler(site_dir, StaticFileHandler):\n\n from tornado.template import Loader\n\n class WebHandler(StaticFileHandler):\n\n def write_error(self, status_code, **kwargs):\n\n if status_code in (404, 500):\n error_page = '{}.html'.format(status_code)\n if isfile(join(site_dir, error_page)):\n self.write(Loader(site_dir).load(error_page).generate())\n else:\n super().write_error(status_code, **kwargs)\n\n return WebHandler\n\n\ndef _livereload(host, port, config, builder, site_dir):\n\n # We are importing here for anyone that has issues with livereload. Even if\n # this fails, the --no-livereload alternative should still work.\n _init_asyncio_patch()\n from livereload import Server\n import livereload.handlers\n\n class LiveReloadServer(Server):\n\n def get_web_handlers(self, script):\n handlers = super().get_web_handlers(script)\n # replace livereload handler\n return [(handlers[0][0], _get_handler(site_dir, livereload.handlers.StaticFileHandler), handlers[0][2],)]\n\n server = LiveReloadServer()\n\n # Watch the documentation files, the config file and the theme files.\n server.watch(config['docs_dir'], builder)\n server.watch(config['config_file_path'], builder)\n\n for d in config['theme'].dirs:\n server.watch(d, builder)\n\n # Run `serve` plugin events.\n server = config['plugins'].run_event('serve', server, config=config)\n\n server.serve(root=site_dir, host=host, port=port, restart_delay=0)\n\n\ndef _static_server(host, port, site_dir):\n\n # Importing here to seperate the code paths from the --livereload\n # alternative.\n _init_asyncio_patch()\n from tornado import ioloop\n from tornado import web\n\n application = web.Application([\n (r\"/(.*)\", _get_handler(site_dir, web.StaticFileHandler), {\n \"path\": site_dir,\n \"default_filename\": \"index.html\"\n }),\n ])\n application.listen(port=port, address=host)\n\n log.info('Running at: http://%s:%s/', host, port)\n log.info('Hold ctrl+c to quit.')\n try:\n ioloop.IOLoop.instance().start()\n except KeyboardInterrupt:\n log.info('Stopping server...')\n\n\ndef serve(config_file=None, dev_addr=None, strict=None, theme=None,\n theme_dir=None, livereload='livereload', **kwargs):\n \"\"\"\n Start the MkDocs development server\n\n By default it will serve the documentation on http://localhost:8000/ and\n it will rebuild the documentation and refresh the page automatically\n whenever a file is edited.\n \"\"\"\n\n # Create a temporary build directory, and set some options to serve it\n # PY2 returns a byte string by default. The Unicode prefix ensures a Unicode\n # string is returned. And it makes MkDocs temp dirs easier to identify.\n site_dir = tempfile.mkdtemp(prefix='mkdocs_')\n\n def builder():\n log.info(\"Building documentation...\")\n config = load_config(\n config_file=config_file,\n dev_addr=dev_addr,\n strict=strict,\n theme=theme,\n theme_dir=theme_dir,\n site_dir=site_dir,\n **kwargs\n )\n # Override a few config settings after validation\n config['site_url'] = 'http://{}/'.format(config['dev_addr'])\n\n live_server = livereload in ['dirty', 'livereload']\n dirty = livereload == 'dirty'\n build(config, live_server=live_server, dirty=dirty)\n return config\n\n try:\n # Perform the initial build\n config = builder()\n\n host, port = config['dev_addr']\n\n if livereload in ['livereload', 'dirty']:\n _livereload(host, port, config, builder, site_dir)\n else:\n _static_server(host, port, site_dir)\n finally:\n shutil.rmtree(site_dir)\n", "path": "mkdocs/commands/serve.py" } ]
[ { "content": "import logging\nimport shutil\nimport tempfile\nimport sys\n\nfrom os.path import isfile, join\nfrom mkdocs.commands.build import build\nfrom mkdocs.config import load_config\n\nlog = logging.getLogger(__name__)\n\n\ndef _init_asyncio_patch():\n \"\"\"\n Select compatible event loop for Tornado 5+.\n\n As of Python 3.8, the default event loop on Windows is `proactor`,\n however Tornado requires the old default \"selector\" event loop.\n As Tornado has decided to leave this to users to set, MkDocs needs\n to set it. See https://github.com/tornadoweb/tornado/issues/2608.\n \"\"\"\n if sys.platform.startswith(\"win\") and sys.version_info >= (3, 8):\n import asyncio\n try:\n from asyncio import WindowsSelectorEventLoopPolicy\n except ImportError:\n pass # Can't assign a policy which doesn't exist.\n else:\n if not isinstance(asyncio.get_event_loop_policy(), WindowsSelectorEventLoopPolicy):\n asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())\n\n\ndef _get_handler(site_dir, StaticFileHandler):\n\n from tornado.template import Loader\n\n class WebHandler(StaticFileHandler):\n\n def write_error(self, status_code, **kwargs):\n\n if status_code in (404, 500):\n error_page = '{}.html'.format(status_code)\n if isfile(join(site_dir, error_page)):\n self.write(Loader(site_dir).load(error_page).generate())\n else:\n super().write_error(status_code, **kwargs)\n\n return WebHandler\n\n\ndef _livereload(host, port, config, builder, site_dir):\n\n # We are importing here for anyone that has issues with livereload. Even if\n # this fails, the --no-livereload alternative should still work.\n _init_asyncio_patch()\n from livereload import Server\n import livereload.handlers\n\n class LiveReloadServer(Server):\n\n def get_web_handlers(self, script):\n handlers = super().get_web_handlers(script)\n # replace livereload handler\n return [(handlers[0][0], _get_handler(site_dir, livereload.handlers.StaticFileHandler), handlers[0][2],)]\n\n server = LiveReloadServer()\n\n # Watch the documentation files, the config file and the theme files.\n server.watch(config['docs_dir'], builder)\n server.watch(config['config_file_path'], builder)\n\n for d in config['theme'].dirs:\n server.watch(d, builder)\n\n # Run `serve` plugin events.\n server = config['plugins'].run_event('serve', server, config=config)\n\n server.serve(root=site_dir, host=host, port=port, restart_delay=0)\n\n\ndef _static_server(host, port, site_dir):\n\n # Importing here to separate the code paths from the --livereload\n # alternative.\n _init_asyncio_patch()\n from tornado import ioloop\n from tornado import web\n\n application = web.Application([\n (r\"/(.*)\", _get_handler(site_dir, web.StaticFileHandler), {\n \"path\": site_dir,\n \"default_filename\": \"index.html\"\n }),\n ])\n application.listen(port=port, address=host)\n\n log.info('Running at: http://%s:%s/', host, port)\n log.info('Hold ctrl+c to quit.')\n try:\n ioloop.IOLoop.instance().start()\n except KeyboardInterrupt:\n log.info('Stopping server...')\n\n\ndef serve(config_file=None, dev_addr=None, strict=None, theme=None,\n theme_dir=None, livereload='livereload', **kwargs):\n \"\"\"\n Start the MkDocs development server\n\n By default it will serve the documentation on http://localhost:8000/ and\n it will rebuild the documentation and refresh the page automatically\n whenever a file is edited.\n \"\"\"\n\n # Create a temporary build directory, and set some options to serve it\n # PY2 returns a byte string by default. The Unicode prefix ensures a Unicode\n # string is returned. And it makes MkDocs temp dirs easier to identify.\n site_dir = tempfile.mkdtemp(prefix='mkdocs_')\n\n def builder():\n log.info(\"Building documentation...\")\n config = load_config(\n config_file=config_file,\n dev_addr=dev_addr,\n strict=strict,\n theme=theme,\n theme_dir=theme_dir,\n site_dir=site_dir,\n **kwargs\n )\n # Override a few config settings after validation\n config['site_url'] = 'http://{}/'.format(config['dev_addr'])\n\n live_server = livereload in ['dirty', 'livereload']\n dirty = livereload == 'dirty'\n build(config, live_server=live_server, dirty=dirty)\n return config\n\n try:\n # Perform the initial build\n config = builder()\n\n host, port = config['dev_addr']\n\n if livereload in ['livereload', 'dirty']:\n _livereload(host, port, config, builder, site_dir)\n else:\n _static_server(host, port, site_dir)\n finally:\n shutil.rmtree(site_dir)\n", "path": "mkdocs/commands/serve.py" } ]
diff --git a/mkdocs/commands/serve.py b/mkdocs/commands/serve.py index ba716776a8..21b7ca6c1e 100644 --- a/mkdocs/commands/serve.py +++ b/mkdocs/commands/serve.py @@ -80,7 +80,7 @@ def get_web_handlers(self, script): def _static_server(host, port, site_dir): - # Importing here to seperate the code paths from the --livereload + # Importing here to separate the code paths from the --livereload # alternative. _init_asyncio_patch() from tornado import ioloop
scrapy__scrapy-4585
Downloadable documentation is missing for versions 2.0 and 2.1 on readthedocs.org For some reason downloadable documentation on https://readthedocs.org/projects/scrapy/downloads/ is available only up to version 1.8. That's a minor issue, but I think that I'm not the only one who prefers to read technical papers in the pdf format (to be able to take notes).
[ { "content": "# Scrapy documentation build configuration file, created by\n# sphinx-quickstart on Mon Nov 24 12:02:52 2008.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# The contents of this file are pickled, so don't put values in the namespace\n# that aren't pickleable (module imports are okay, they're removed automatically).\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nfrom datetime import datetime\nfrom os import path\n\n# If your extensions are in another directory, add it here. If the directory\n# is relative to the documentation root, use os.path.abspath to make it\n# absolute, like shown here.\nsys.path.append(path.join(path.dirname(__file__), \"_ext\"))\nsys.path.insert(0, path.dirname(path.dirname(__file__)))\n\n\n# General configuration\n# ---------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'hoverxref.extension',\n 'notfound.extension',\n 'scrapydocs',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.coverage',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.viewcode',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Scrapy'\ncopyright = '2008–{}, Scrapy developers'.format(datetime.now().year)\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\ntry:\n import scrapy\n version = '.'.join(map(str, scrapy.version_info[:2]))\n release = scrapy.__version__\nexcept ImportError:\n version = ''\n release = ''\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\nlanguage = 'en'\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n#unused_docs = []\n\nexclude_patterns = ['build']\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = ['.build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n\n# Options for HTML output\n# -----------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# Add path to the RTD explicitly to robustify builds (otherwise might\n# fail in a clean Debian build env)\nimport sphinx_rtd_theme\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n\n# The style sheet to use for HTML and HTML Help pages. A file of that name\n# must exist either in Sphinx' static/ path, or in one of the custom paths\n# given in html_static_path.\n# html_style = 'scrapydoc.css'\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_use_modindex = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, the reST sources are included in the HTML build as _sources/<name>.\nhtml_copy_source = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Scrapydoc'\n\n\n# Options for LaTeX output\n# ------------------------\n\n# The paper size ('letter' or 'a4').\n#latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, document class [howto/manual]).\nlatex_documents = [\n ('index', 'Scrapy.tex', 'Scrapy Documentation',\n 'Scrapy developers', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n#latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_use_modindex = True\n\n\n# Options for the linkcheck builder\n# ---------------------------------\n\n# A list of regular expressions that match URIs that should not be checked when\n# doing a linkcheck build.\nlinkcheck_ignore = [\n 'http://localhost:\\d+', 'http://hg.scrapy.org',\n 'http://directory.google.com/'\n]\n\n\n# Options for the Coverage extension\n# ----------------------------------\ncoverage_ignore_pyobjects = [\n # Contract’s add_pre_hook and add_post_hook are not documented because\n # they should be transparent to contract developers, for whom pre_hook and\n # post_hook should be the actual concern.\n r'\\bContract\\.add_(pre|post)_hook$',\n\n # ContractsManager is an internal class, developers are not expected to\n # interact with it directly in any way.\n r'\\bContractsManager\\b$',\n\n # For default contracts we only want to document their general purpose in\n # their __init__ method, the methods they reimplement to achieve that purpose\n # should be irrelevant to developers using those contracts.\n r'\\w+Contract\\.(adjust_request_args|(pre|post)_process)$',\n\n # Methods of downloader middlewares are not documented, only the classes\n # themselves, since downloader middlewares are controlled through Scrapy\n # settings.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.(\\w*?Middleware|DownloaderStats)\\.',\n\n # Base classes of downloader middlewares are implementation details that\n # are not meant for users.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.Base\\w*?Middleware',\n\n # Private exception used by the command-line interface implementation.\n r'^scrapy\\.exceptions\\.UsageError',\n\n # Methods of BaseItemExporter subclasses are only documented in\n # BaseItemExporter.\n r'^scrapy\\.exporters\\.(?!BaseItemExporter\\b)\\w*?\\.',\n\n # Extension behavior is only modified through settings. Methods of\n # extension classes, as well as helper functions, are implementation\n # details that are not documented.\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[A-Z]\\w*?\\.', # methods\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[a-z]', # helper functions\n\n # Never documented before, and deprecated now.\n r'^scrapy\\.item\\.DictItem$',\n r'^scrapy\\.linkextractors\\.FilteringLinkExtractor$',\n\n # Implementation detail of LxmlLinkExtractor\n r'^scrapy\\.linkextractors\\.lxmlhtml\\.LxmlParserLinkExtractor',\n]\n\n\n# Options for the InterSphinx extension\n# -------------------------------------\n\nintersphinx_mapping = {\n 'coverage': ('https://coverage.readthedocs.io/en/stable', None),\n 'cssselect': ('https://cssselect.readthedocs.io/en/latest', None),\n 'pytest': ('https://docs.pytest.org/en/latest', None),\n 'python': ('https://docs.python.org/3', None),\n 'sphinx': ('https://www.sphinx-doc.org/en/master', None),\n 'tox': ('https://tox.readthedocs.io/en/latest', None),\n 'twisted': ('https://twistedmatrix.com/documents/current', None),\n 'twistedapi': ('https://twistedmatrix.com/documents/current/api', None),\n}\n\n\n# Options for sphinx-hoverxref options\n# ------------------------------------\n\nhoverxref_auto_ref = True\nhoverxref_role_types = {\n \"class\": \"tooltip\",\n \"confval\": \"tooltip\",\n \"hoverxref\": \"tooltip\",\n \"mod\": \"tooltip\",\n \"ref\": \"tooltip\",\n}\n", "path": "docs/conf.py" } ]
[ { "content": "# Scrapy documentation build configuration file, created by\n# sphinx-quickstart on Mon Nov 24 12:02:52 2008.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# The contents of this file are pickled, so don't put values in the namespace\n# that aren't pickleable (module imports are okay, they're removed automatically).\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nfrom datetime import datetime\nfrom os import path\n\n# If your extensions are in another directory, add it here. If the directory\n# is relative to the documentation root, use os.path.abspath to make it\n# absolute, like shown here.\nsys.path.append(path.join(path.dirname(__file__), \"_ext\"))\nsys.path.insert(0, path.dirname(path.dirname(__file__)))\n\n\n# General configuration\n# ---------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'hoverxref.extension',\n 'notfound.extension',\n 'scrapydocs',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.coverage',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.viewcode',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Scrapy'\ncopyright = '2008–{}, Scrapy developers'.format(datetime.now().year)\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\ntry:\n import scrapy\n version = '.'.join(map(str, scrapy.version_info[:2]))\n release = scrapy.__version__\nexcept ImportError:\n version = ''\n release = ''\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\nlanguage = 'en'\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n#unused_docs = []\n\nexclude_patterns = ['build']\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = ['.build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# List of Sphinx warnings that will not be raised\nsuppress_warnings = ['epub.unknown_project_files']\n\n\n# Options for HTML output\n# -----------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# Add path to the RTD explicitly to robustify builds (otherwise might\n# fail in a clean Debian build env)\nimport sphinx_rtd_theme\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n\n# The style sheet to use for HTML and HTML Help pages. A file of that name\n# must exist either in Sphinx' static/ path, or in one of the custom paths\n# given in html_static_path.\n# html_style = 'scrapydoc.css'\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_use_modindex = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, the reST sources are included in the HTML build as _sources/<name>.\nhtml_copy_source = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Scrapydoc'\n\n\n# Options for LaTeX output\n# ------------------------\n\n# The paper size ('letter' or 'a4').\n#latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, document class [howto/manual]).\nlatex_documents = [\n ('index', 'Scrapy.tex', 'Scrapy Documentation',\n 'Scrapy developers', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n#latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_use_modindex = True\n\n\n# Options for the linkcheck builder\n# ---------------------------------\n\n# A list of regular expressions that match URIs that should not be checked when\n# doing a linkcheck build.\nlinkcheck_ignore = [\n 'http://localhost:\\d+', 'http://hg.scrapy.org',\n 'http://directory.google.com/'\n]\n\n\n# Options for the Coverage extension\n# ----------------------------------\ncoverage_ignore_pyobjects = [\n # Contract’s add_pre_hook and add_post_hook are not documented because\n # they should be transparent to contract developers, for whom pre_hook and\n # post_hook should be the actual concern.\n r'\\bContract\\.add_(pre|post)_hook$',\n\n # ContractsManager is an internal class, developers are not expected to\n # interact with it directly in any way.\n r'\\bContractsManager\\b$',\n\n # For default contracts we only want to document their general purpose in\n # their __init__ method, the methods they reimplement to achieve that purpose\n # should be irrelevant to developers using those contracts.\n r'\\w+Contract\\.(adjust_request_args|(pre|post)_process)$',\n\n # Methods of downloader middlewares are not documented, only the classes\n # themselves, since downloader middlewares are controlled through Scrapy\n # settings.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.(\\w*?Middleware|DownloaderStats)\\.',\n\n # Base classes of downloader middlewares are implementation details that\n # are not meant for users.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.Base\\w*?Middleware',\n\n # Private exception used by the command-line interface implementation.\n r'^scrapy\\.exceptions\\.UsageError',\n\n # Methods of BaseItemExporter subclasses are only documented in\n # BaseItemExporter.\n r'^scrapy\\.exporters\\.(?!BaseItemExporter\\b)\\w*?\\.',\n\n # Extension behavior is only modified through settings. Methods of\n # extension classes, as well as helper functions, are implementation\n # details that are not documented.\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[A-Z]\\w*?\\.', # methods\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[a-z]', # helper functions\n\n # Never documented before, and deprecated now.\n r'^scrapy\\.item\\.DictItem$',\n r'^scrapy\\.linkextractors\\.FilteringLinkExtractor$',\n\n # Implementation detail of LxmlLinkExtractor\n r'^scrapy\\.linkextractors\\.lxmlhtml\\.LxmlParserLinkExtractor',\n]\n\n\n# Options for the InterSphinx extension\n# -------------------------------------\n\nintersphinx_mapping = {\n 'coverage': ('https://coverage.readthedocs.io/en/stable', None),\n 'cssselect': ('https://cssselect.readthedocs.io/en/latest', None),\n 'pytest': ('https://docs.pytest.org/en/latest', None),\n 'python': ('https://docs.python.org/3', None),\n 'sphinx': ('https://www.sphinx-doc.org/en/master', None),\n 'tox': ('https://tox.readthedocs.io/en/latest', None),\n 'twisted': ('https://twistedmatrix.com/documents/current', None),\n 'twistedapi': ('https://twistedmatrix.com/documents/current/api', None),\n}\n\n\n# Options for sphinx-hoverxref options\n# ------------------------------------\n\nhoverxref_auto_ref = True\nhoverxref_role_types = {\n \"class\": \"tooltip\",\n \"confval\": \"tooltip\",\n \"hoverxref\": \"tooltip\",\n \"mod\": \"tooltip\",\n \"ref\": \"tooltip\",\n}\n", "path": "docs/conf.py" } ]
diff --git a/.readthedocs.yml b/.readthedocs.yml index 17eba34f337..e4d3f02cc3f 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -1,4 +1,5 @@ version: 2 +formats: all sphinx: configuration: docs/conf.py fail_on_warning: true diff --git a/docs/conf.py b/docs/conf.py index 8ab38a090c3..468c1d1901b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -100,6 +100,9 @@ # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' +# List of Sphinx warnings that will not be raised +suppress_warnings = ['epub.unknown_project_files'] + # Options for HTML output # -----------------------
pre-commit__pre-commit-1713
BUG - hooks not working on windows 10, when user account name contains non-ascii characters When user account name contains non-ascii characters such as 'š', such that python executable ends up for example in C:\Users\john.š\\.cache\pre-commit\repo\py_env-python3.8\Scripts\python.exe, when committing to the git repository following message appears: An unexpected error has occurred: AssertionError: BUG: expected environment for python to be healthy() immediately after install, please open an issue describing your environment. PS: fucntion os.path.isfile() in parse_shebang.normexe() returns False, even though the executable exists there and is a file.
[ { "content": "import contextlib\nimport functools\nimport os\nimport sys\nfrom typing import Dict\nfrom typing import Generator\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.parse_shebang import find_executable\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\n\nENVIRONMENT_DIR = 'py_env'\n\n\[email protected]_cache(maxsize=None)\ndef _version_info(exe: str) -> str:\n prog = 'import sys;print(\".\".join(str(p) for p in sys.version_info))'\n try:\n return cmd_output(exe, '-S', '-c', prog)[1].strip()\n except CalledProcessError:\n return f'<<error retrieving version from {exe}>>'\n\n\ndef _read_pyvenv_cfg(filename: str) -> Dict[str, str]:\n ret = {}\n with open(filename) as f:\n for line in f:\n try:\n k, v = line.split('=')\n except ValueError: # blank line / comment / etc.\n continue\n else:\n ret[k.strip()] = v.strip()\n return ret\n\n\ndef bin_dir(venv: str) -> str:\n \"\"\"On windows there's a different directory for the virtualenv\"\"\"\n bin_part = 'Scripts' if os.name == 'nt' else 'bin'\n return os.path.join(venv, bin_part)\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n return (\n ('PIP_DISABLE_PIP_VERSION_CHECK', '1'),\n ('PYTHONHOME', UNSET),\n ('VIRTUAL_ENV', venv),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\ndef _find_by_py_launcher(\n version: str,\n) -> Optional[str]: # pragma: no cover (windows only)\n if version.startswith('python'):\n num = version[len('python'):]\n cmd = ('py', f'-{num}', '-c', 'import sys; print(sys.executable)')\n env = dict(os.environ, PYTHONIOENCODING='UTF-8')\n try:\n return cmd_output(*cmd, env=env)[1].strip()\n except CalledProcessError:\n pass\n return None\n\n\ndef _find_by_sys_executable() -> Optional[str]:\n def _norm(path: str) -> Optional[str]:\n _, exe = os.path.split(path.lower())\n exe, _, _ = exe.partition('.exe')\n if exe not in {'python', 'pythonw'} and find_executable(exe):\n return exe\n return None\n\n # On linux, I see these common sys.executables:\n #\n # system `python`: /usr/bin/python -> python2.7\n # system `python2`: /usr/bin/python2 -> python2.7\n # virtualenv v: v/bin/python (will not return from this loop)\n # virtualenv v -ppython2: v/bin/python -> python2\n # virtualenv v -ppython2.7: v/bin/python -> python2.7\n # virtualenv v -ppypy: v/bin/python -> v/bin/pypy\n for path in (sys.executable, os.path.realpath(sys.executable)):\n exe = _norm(path)\n if exe:\n return exe\n return None\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str: # pragma: no cover (platform dependent)\n # First attempt from `sys.executable` (or the realpath)\n exe = _find_by_sys_executable()\n if exe:\n return exe\n\n # Next try the `pythonX.X` executable\n exe = f'python{sys.version_info[0]}.{sys.version_info[1]}'\n if find_executable(exe):\n return exe\n\n if _find_by_py_launcher(exe):\n return exe\n\n # We tried!\n return C.DEFAULT\n\n\ndef _sys_executable_matches(version: str) -> bool:\n if version == 'python':\n return True\n elif not version.startswith('python'):\n return False\n\n try:\n info = tuple(int(p) for p in version[len('python'):].split('.'))\n except ValueError:\n return False\n\n return sys.version_info[:len(info)] == info\n\n\ndef norm_version(version: str) -> Optional[str]:\n if version == C.DEFAULT: # use virtualenv's default\n return None\n elif _sys_executable_matches(version): # virtualenv defaults to our exe\n return None\n\n if os.name == 'nt': # pragma: no cover (windows)\n version_exec = _find_by_py_launcher(version)\n if version_exec:\n return version_exec\n\n # Try looking up by name\n version_exec = find_executable(version)\n if version_exec and version_exec != version:\n return version_exec\n\n # Otherwise assume it is a path\n return os.path.expanduser(version)\n\n\[email protected]\ndef in_env(\n prefix: Prefix,\n language_version: str,\n) -> Generator[None, None, None]:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, language_version)\n envdir = prefix.path(directory)\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef healthy(prefix: Prefix, language_version: str) -> bool:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, language_version)\n envdir = prefix.path(directory)\n pyvenv_cfg = os.path.join(envdir, 'pyvenv.cfg')\n\n # created with \"old\" virtualenv\n if not os.path.exists(pyvenv_cfg):\n return False\n\n exe_name = 'python.exe' if sys.platform == 'win32' else 'python'\n py_exe = prefix.path(bin_dir(envdir), exe_name)\n cfg = _read_pyvenv_cfg(pyvenv_cfg)\n\n return (\n 'version_info' in cfg and\n # always use uncached lookup here in case we replaced an unhealthy env\n _version_info.__wrapped__(py_exe) == cfg['version_info'] and (\n 'base-executable' not in cfg or\n _version_info(cfg['base-executable']) == cfg['version_info']\n )\n )\n\n\ndef install_environment(\n prefix: Prefix,\n version: str,\n additional_dependencies: Sequence[str],\n) -> None:\n envdir = prefix.path(helpers.environment_dir(ENVIRONMENT_DIR, version))\n venv_cmd = [sys.executable, '-mvirtualenv', envdir]\n python = norm_version(version)\n if python is not None:\n venv_cmd.extend(('-p', python))\n install_cmd = ('python', '-mpip', 'install', '.', *additional_dependencies)\n\n with clean_path_on_failure(envdir):\n cmd_output_b(*venv_cmd, cwd='/')\n with in_env(prefix, version):\n helpers.run_setup_cmd(prefix, install_cmd)\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/python.py" } ]
[ { "content": "import contextlib\nimport functools\nimport os\nimport sys\nfrom typing import Dict\nfrom typing import Generator\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.parse_shebang import find_executable\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\n\nENVIRONMENT_DIR = 'py_env'\n\n\[email protected]_cache(maxsize=None)\ndef _version_info(exe: str) -> str:\n prog = 'import sys;print(\".\".join(str(p) for p in sys.version_info))'\n try:\n return cmd_output(exe, '-S', '-c', prog)[1].strip()\n except CalledProcessError:\n return f'<<error retrieving version from {exe}>>'\n\n\ndef _read_pyvenv_cfg(filename: str) -> Dict[str, str]:\n ret = {}\n with open(filename, encoding='UTF-8') as f:\n for line in f:\n try:\n k, v = line.split('=')\n except ValueError: # blank line / comment / etc.\n continue\n else:\n ret[k.strip()] = v.strip()\n return ret\n\n\ndef bin_dir(venv: str) -> str:\n \"\"\"On windows there's a different directory for the virtualenv\"\"\"\n bin_part = 'Scripts' if os.name == 'nt' else 'bin'\n return os.path.join(venv, bin_part)\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n return (\n ('PIP_DISABLE_PIP_VERSION_CHECK', '1'),\n ('PYTHONHOME', UNSET),\n ('VIRTUAL_ENV', venv),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\ndef _find_by_py_launcher(\n version: str,\n) -> Optional[str]: # pragma: no cover (windows only)\n if version.startswith('python'):\n num = version[len('python'):]\n cmd = ('py', f'-{num}', '-c', 'import sys; print(sys.executable)')\n env = dict(os.environ, PYTHONIOENCODING='UTF-8')\n try:\n return cmd_output(*cmd, env=env)[1].strip()\n except CalledProcessError:\n pass\n return None\n\n\ndef _find_by_sys_executable() -> Optional[str]:\n def _norm(path: str) -> Optional[str]:\n _, exe = os.path.split(path.lower())\n exe, _, _ = exe.partition('.exe')\n if exe not in {'python', 'pythonw'} and find_executable(exe):\n return exe\n return None\n\n # On linux, I see these common sys.executables:\n #\n # system `python`: /usr/bin/python -> python2.7\n # system `python2`: /usr/bin/python2 -> python2.7\n # virtualenv v: v/bin/python (will not return from this loop)\n # virtualenv v -ppython2: v/bin/python -> python2\n # virtualenv v -ppython2.7: v/bin/python -> python2.7\n # virtualenv v -ppypy: v/bin/python -> v/bin/pypy\n for path in (sys.executable, os.path.realpath(sys.executable)):\n exe = _norm(path)\n if exe:\n return exe\n return None\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str: # pragma: no cover (platform dependent)\n # First attempt from `sys.executable` (or the realpath)\n exe = _find_by_sys_executable()\n if exe:\n return exe\n\n # Next try the `pythonX.X` executable\n exe = f'python{sys.version_info[0]}.{sys.version_info[1]}'\n if find_executable(exe):\n return exe\n\n if _find_by_py_launcher(exe):\n return exe\n\n # We tried!\n return C.DEFAULT\n\n\ndef _sys_executable_matches(version: str) -> bool:\n if version == 'python':\n return True\n elif not version.startswith('python'):\n return False\n\n try:\n info = tuple(int(p) for p in version[len('python'):].split('.'))\n except ValueError:\n return False\n\n return sys.version_info[:len(info)] == info\n\n\ndef norm_version(version: str) -> Optional[str]:\n if version == C.DEFAULT: # use virtualenv's default\n return None\n elif _sys_executable_matches(version): # virtualenv defaults to our exe\n return None\n\n if os.name == 'nt': # pragma: no cover (windows)\n version_exec = _find_by_py_launcher(version)\n if version_exec:\n return version_exec\n\n # Try looking up by name\n version_exec = find_executable(version)\n if version_exec and version_exec != version:\n return version_exec\n\n # Otherwise assume it is a path\n return os.path.expanduser(version)\n\n\[email protected]\ndef in_env(\n prefix: Prefix,\n language_version: str,\n) -> Generator[None, None, None]:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, language_version)\n envdir = prefix.path(directory)\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef healthy(prefix: Prefix, language_version: str) -> bool:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, language_version)\n envdir = prefix.path(directory)\n pyvenv_cfg = os.path.join(envdir, 'pyvenv.cfg')\n\n # created with \"old\" virtualenv\n if not os.path.exists(pyvenv_cfg):\n return False\n\n exe_name = 'python.exe' if sys.platform == 'win32' else 'python'\n py_exe = prefix.path(bin_dir(envdir), exe_name)\n cfg = _read_pyvenv_cfg(pyvenv_cfg)\n\n return (\n 'version_info' in cfg and\n # always use uncached lookup here in case we replaced an unhealthy env\n _version_info.__wrapped__(py_exe) == cfg['version_info'] and (\n 'base-executable' not in cfg or\n _version_info(cfg['base-executable']) == cfg['version_info']\n )\n )\n\n\ndef install_environment(\n prefix: Prefix,\n version: str,\n additional_dependencies: Sequence[str],\n) -> None:\n envdir = prefix.path(helpers.environment_dir(ENVIRONMENT_DIR, version))\n venv_cmd = [sys.executable, '-mvirtualenv', envdir]\n python = norm_version(version)\n if python is not None:\n venv_cmd.extend(('-p', python))\n install_cmd = ('python', '-mpip', 'install', '.', *additional_dependencies)\n\n with clean_path_on_failure(envdir):\n cmd_output_b(*venv_cmd, cwd='/')\n with in_env(prefix, version):\n helpers.run_setup_cmd(prefix, install_cmd)\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/python.py" } ]
diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py index 65f521cdc..43b728082 100644 --- a/pre_commit/languages/python.py +++ b/pre_commit/languages/python.py @@ -36,7 +36,7 @@ def _version_info(exe: str) -> str: def _read_pyvenv_cfg(filename: str) -> Dict[str, str]: ret = {} - with open(filename) as f: + with open(filename, encoding='UTF-8') as f: for line in f: try: k, v = line.split('=') diff --git a/tests/languages/python_test.py b/tests/languages/python_test.py index cfe14834f..90d1036a3 100644 --- a/tests/languages/python_test.py +++ b/tests/languages/python_test.py @@ -23,6 +23,13 @@ def test_read_pyvenv_cfg(tmpdir): assert python._read_pyvenv_cfg(pyvenv_cfg) == expected +def test_read_pyvenv_cfg_non_utf8(tmpdir): + pyvenv_cfg = tmpdir.join('pyvenv_cfg') + pyvenv_cfg.write_binary('hello = hello john.š\n'.encode()) + expected = {'hello': 'hello john.š'} + assert python._read_pyvenv_cfg(pyvenv_cfg) == expected + + def test_norm_version_expanduser(): home = os.path.expanduser('~') if os.name == 'nt': # pragma: nt cover
zigpy__zha-device-handlers-342
Update zigpy version to use the new (old module) name for zigpy? @dmulcahey Ready to update zigpy version to use new (old) module name without -homeassistant suffix? @Adminiuga in the PR https://github.com/zigpy/zigpy/pull/363 changed the zigpy module name back to just "zigpy" (from "zigpy-homeassistant") https://github.com/zigpy/zigpy/pull/363/commits/6c9e0e9412a322d4b9558977decf50ca4dfb5ffd From https://pypi.org/project/zigpy-homeassistant/ back to https://pypi.org/project/zigpy/
[ { "content": "\"\"\"Setup module for ZHAQuirks.\"\"\"\n\nfrom setuptools import find_packages, setup\n\nVERSION = \"0.0.38\"\n\n\ndef readme():\n \"\"\"Print long description.\"\"\"\n with open(\"README.md\") as f:\n return f.read()\n\n\nsetup(\n name=\"zha-quirks\",\n version=VERSION,\n description=\"Library implementing Zigpy quirks for ZHA in Home Assistant\",\n long_description=readme(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/dmulcahey/zha-device-handlers\",\n author=\"David F. Mulcahey\",\n author_email=\"[email protected]\",\n license=\"Apache License Version 2.0\",\n keywords=\"zha quirks homeassistant hass\",\n packages=find_packages(exclude=[\"*.tests\"]),\n python_requires=\">=3\",\n install_requires=[\"zigpy-homeassistant>=0.18.1\"],\n tests_require=[\"pytest\"],\n)\n", "path": "setup.py" } ]
[ { "content": "\"\"\"Setup module for ZHAQuirks.\"\"\"\n\nfrom setuptools import find_packages, setup\n\nVERSION = \"0.0.38\"\n\n\ndef readme():\n \"\"\"Print long description.\"\"\"\n with open(\"README.md\") as f:\n return f.read()\n\n\nsetup(\n name=\"zha-quirks\",\n version=VERSION,\n description=\"Library implementing Zigpy quirks for ZHA in Home Assistant\",\n long_description=readme(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/dmulcahey/zha-device-handlers\",\n author=\"David F. Mulcahey\",\n author_email=\"[email protected]\",\n license=\"Apache License Version 2.0\",\n keywords=\"zha quirks homeassistant hass\",\n packages=find_packages(exclude=[\"*.tests\"]),\n python_requires=\">=3\",\n install_requires=[\"zigpy>=0.20.0\"],\n tests_require=[\"pytest\"],\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 99bd58aeea..1c5e45900e 100644 --- a/setup.py +++ b/setup.py @@ -24,6 +24,6 @@ def readme(): keywords="zha quirks homeassistant hass", packages=find_packages(exclude=["*.tests"]), python_requires=">=3", - install_requires=["zigpy-homeassistant>=0.18.1"], + install_requires=["zigpy>=0.20.0"], tests_require=["pytest"], )
emissary-ingress__emissary-515
'unicode' is an undefined name in Python 3 flake8 testing of https://github.com/datawire/ambassador on Python 3.6.3 2.28s$ time flake8 . --count --select=E901,E999,F821,F822,F823 --show-source --statistics__ ``` ./ambassador/ambassador_diag/envoy.py:223:45: F821 undefined name 'active_cluster_map' if True or (cluster_name in active_cluster_map): ^ ./scripts/versioner.py:103:16: F821 undefined name 'unicode' return unicode(self) ^ 2 F821 undefined name 'active_cluster_map' 2 ```
[ { "content": "#!/usr/bin/env python\n\nimport sys\n\nimport json\nimport logging\nimport os\nimport re\nimport subprocess\n\nfrom semantic_version import Version\nfrom git import Repo\n\ndry_run = True\n\nclass VersionedBranch (object):\n \"\"\" A branch for which we're going to wrangle versions based on tags. \"\"\"\n\n def __init__(self, git_repo, git_head):\n \"\"\" Here git_repo and git_head are gitpython objects, not just strings. \"\"\"\n self.log = logging.getLogger(\"VersionedBranch\")\n\n self.repo = git_repo\n self.head = git_head\n self.branch_name = git_head.name\n\n try:\n branch_info = self.repo.git.describe(tags=True, dirty=True, long=True).split('-')\n except Exception as e:\n self.log.warning(\"VersionedBranch: %s could not be described: %s\" % (self.branch_name, e))\n\n if not branch_info:\n self.log.warning(\"VersionedBranch: %s has no description info?\" % self.branch_name)\n\n self.log.debug(\"VersionedBranch: %s gets %s\" % (self.branch_name, branch_info))\n\n try:\n self._version_tag = self.repo.tags[branch_info[0]]\n except Exception as e:\n self.log.warning(\"VersionedBranch: %s has no valid tag %s?\" % (self.branch_name, branch_info[0]))\n\n self.log.debug(\"%s _version_tag: %s\" % (self.branch_name, self._version_tag.name))\n\n self._version = None\n self._versioned_commit = None\n\n self._current_commit = branch_info[2][1:]\n self.log.debug(\"%s _current_commit: %s\" %\n (self.branch_name, self._current_commit))\n\n self._commit_count = int(branch_info[1])\n self.log.debug(\"%s _commit_count: %s\" % (self.branch_name, self._commit_count))\n\n self.is_dirty = True if (len(branch_info) > 3) else False\n self.log.debug(\"%s is_dirty: %s\" % (self.branch_name, self.is_dirty))\n\n @property\n def version_tag(self):\n if self._version_tag is None:\n self.log.warning(\"version_tag: %s got no tag\" % self.branch_name)\n\n return self._version_tag\n \n @property\n def version(self):\n if (self._version is None) and (self.version_tag is not None):\n self._version = Version(self.version_tag.name[1:])\n self.log.debug(\"version: %s => %s\" % (self.branch_name, self._version))\n\n return self._version\n\n @property\n def versioned_commit(self):\n if (self._versioned_commit is None) and (self.version_tag is not None):\n self._versioned_commit = self._version_tag.commit\n self.log.debug(\"versioned_commit: %s => %s @ %s\" % \n (self.branch_name, self.version, self._versioned_commit))\n\n return self._versioned_commit\n\n @property\n def current_commit(self):\n if not self._current_commit:\n self._current_commit = self.head.commit\n self.log.debug(\"current_commit: %s => %s\" %\n (self.branch_name, self._current_commit))\n\n return self._current_commit\n\n @property\n def commit_count(self):\n if not self._commit_count:\n self.log.warning(\"commit_count: %s got no count\" % self.branch_name)\n\n return self._commit_count\n\n def __unicode__(self):\n return (\"<VersionedBranch %s @ %s [%s @ %s]>\" %\n (self.branch_name, str(self.current_commit)[0:8],\n self.version, str(self.versioned_commit)[0:8]))\n\n def __str__(self):\n return unicode(self)\n\n def recent_commits(self, since_tag=None):\n if not since_tag:\n since_tag = self.version_tag.name\n\n for line in self.repo.git.log(\"--reverse\", \"--oneline\", self.branch_name,\n \"--not\", since_tag).split(\"\\n\"):\n self.log.debug(\"line: %s\" % line)\n\n if line:\n commitID, subject = line.split(\" \", 1)\n\n yield commitID, subject\n\n def next_version(self, magic_pre=False, since_tag=None, reduced_zero=True,\n only_if_changes=False, pre_release=None, build=None,\n commit_map=None):\n rdelta = ReleaseDelta(self, only_if_changes=only_if_changes, magic_pre=magic_pre,\n since_tag=since_tag, reduced_zero=reduced_zero,\n commit_map=commit_map, pre_release=pre_release, build=build)\n\n return rdelta.next_version\n\nclass VersionDelta(object):\n def __init__(self, scale, xform, tag):\n self.scale = scale\n self.xform = xform\n self.tag = tag\n self.delta = scale\n\n def __cmp__(self, other):\n if self.scale < other.scale:\n return -1\n elif self.scale > other.scale:\n return 1\n else:\n return 0\n\n def __lt__(self, other):\n return self.__cmp__(other) < 0\n\n def __gt__(self, other):\n return self.__cmp__(other) > 0\n\n def __unicode__(self):\n return \"<VersionDelta %s>\" % self.tag\n\n def __str__(self):\n return self.__unicode__()\n\nclass ReleaseDelta(object):\n NONE = VersionDelta( (0,0,0), lambda x: x, \"[NONE]\")\n FIX = VersionDelta( (0,0,1), Version.next_patch, \"[FIX]\")\n MINOR = VersionDelta( (0,1,0), Version.next_minor, \"[MINOR]\")\n MAJOR = VersionDelta( (1,0,0), Version.next_major, \"[MAJOR]\")\n\n \"\"\" how new commits affect project version \"\"\"\n log = logging.getLogger(\"ReleaseDelta\")\n\n def __init__(self, vbr, only_if_changes=False, magic_pre=False, since_tag=None,\n reduced_zero=True, commit_map=None,\n pre_release=None, build=None):\n self.vbr = vbr\n self.is_dirty = vbr.is_dirty\n self.only_if_changes = only_if_changes\n self.magic_pre = magic_pre\n self.since_tag = since_tag\n self.pre_release = pre_release\n self.build = build\n self.commit_map = commit_map\n\n if reduced_zero and (self.vbr.version.major == 0):\n self.log.debug(\"While the project is in %s version, all changes have reduced impact\" % self.vbr.version)\n self.MAJOR.xform = self.MINOR.xform\n self.MAJOR.delta = self.MINOR.delta\n self.MINOR.xform = self.FIX.xform\n self.MINOR.delta = self.FIX.delta\n\n def commits(self):\n # for commit, subject in [\n # ( '123456', '[MINOR]' )\n # ]:\n # yield commit, subject\n\n for commit, subject in self.vbr.recent_commits(self.since_tag):\n if self.commit_map and (commit in self.commit_map):\n subject = self.commit_map[commit]\n logging.debug(\"Override %s with %s\" % (commit, subject))\n\n yield commit, subject\n\n def commit_deltas(self):\n for commitID, subject in self.commits():\n delta = self.FIX\n source = \"by default\"\n\n for commitDelta in [self.MAJOR, self.MINOR, self.FIX]:\n if commitDelta.tag in subject:\n delta = commitDelta\n source = \"from commit message\"\n break\n\n if source == \"by default\":\n if subject.strip().startswith('feat('):\n delta = self.MINOR\n source = \"from feature marker\"\n elif subject.strip().startswith('break:'):\n delta = self.MAJOR\n source = \"from breakage marker\"\n\n self.log.debug(\"commit %s: %s %s\\n-- [%s]\" % (commitID, delta.tag, source, subject))\n\n yield delta, commitID, subject\n\n def version_change(self):\n finalDelta = None\n commits = []\n\n for delta, commitID, subject in self.commit_deltas():\n self.log.debug(\"folding %s: %s\" % (commitID, delta))\n\n commits.append((delta, commitID, subject))\n\n if finalDelta is None:\n self.log.debug(\"%s: initial change %s\" % (commitID, delta))\n finalDelta = delta\n elif delta > finalDelta:\n self.log.debug(\"%s: %s overrides %s\" % (commitID, delta, finalDelta))\n\n finalDelta = delta\n\n if not commits:\n self.log.debug(\"version_change: no commits since %s\" % self.vbr.version)\n return None, None\n else:\n self.log.debug(\"folding %d commit%s into %s: delta %s\" % \n (len(commits), \"\" if len(commits) == 1 else \"s\", \n finalDelta, finalDelta.delta))\n\n return finalDelta, commits\n\n @property\n def next_version(self):\n version = self.vbr.version\n self.log.debug(\"version start: %s\" % version)\n\n finalDelta, commits = self.version_change()\n\n # if not finalDelta and self.is_dirty:\n # finalDelta = ReleaseDelta.NONE\n # commits = []\n\n if finalDelta or self.is_dirty:\n if commits:\n self.log.debug(\"final commit list: %s\" % \n \"\\n\".join(map(lambda x: \"%s %s: %s\" % (x[0].tag, x[1], x[2]),\n commits)))\n\n if finalDelta:\n self.log.debug(\"final change: %s %s\" % (finalDelta, finalDelta.delta))\n\n version = finalDelta.xform(version)\n else:\n version = ReleaseDelta.NONE.xform(version)\n\n self.log.debug(\"version: %s\" % version)\n\n if finalDelta and self.magic_pre:\n version.prerelease = [ 'b%d' % self.vbr.commit_count, self.vbr.current_commit ]\n\n # pre = self.vbr.version.prerelease\n\n # self.log.debug(\"magic check: '%s'\" % str(pre))\n\n # if pre:\n # pre = pre[0]\n\n # if pre and pre.startswith('b'):\n # if finalDelta > self.FIX:\n # pre = \"b1\"\n # else:\n # pre = \"b\" + str(int(pre[1:]) + 1)\n\n # self.log.debug(\"magic prerelease: %s\" % str(pre))\n # version.prerelease = [pre]\n # else:\n # version.prerelease = [ \"b\" + ]\n elif self.pre_release:\n version.prerelease = [ self.pre_release ]\n\n if self.is_dirty:\n self.log.debug(\"dirty build\")\n\n if not version.prerelease:\n version.prerelease = []\n\n if 'DIRTY' not in version.prerelease:\n version.prerelease = list(version.prerelease)\n version.prerelease.append('DIRTY')\n\n self.log.debug(\"final prerelease: %s\" % str(version.prerelease))\n\n if self.build:\n version.build = [ self.build ]\n\n self.log.debug(\"final build: %s\" % str(version.build))\n\n self.log.debug(\"version has to change from %s to %s\" %\n (self.vbr.version, version))\n\n return version\n elif self.only_if_changes:\n return None\n else:\n # Just return the old version.\n return version\n\nclass VersionedRepo (object):\n \"\"\" Representation of a git repo that follows our versioning rules \"\"\"\n\n def __init__(self, repo_root):\n self.log = logging.getLogger(\"VersionedRepo\")\n\n self.repo = Repo(repo_root, search_parent_directories=True)\n self.is_dirty = self.repo.is_dirty()\n self.branches = {}\n\n def get_branch(self, branch_name):\n # Grab a branch of this repo\n\n key = branch_name if branch_name else '*current*'\n source = 'cache'\n vbr = None\n\n if key in self.branches:\n vbr = self.branches[key]\n\n if not vbr:\n source = 'Git'\n\n head = None\n\n if branch_name:\n print(self.repo.heads)\n\n head = self.repo.heads[branch_name]\n else:\n # head = self.repo.active_branch\n head = self.repo.head \n\n if not head:\n self.log.warning(\"get_branch: no branch %s\" % branch_name)\n\n vbr = VersionedBranch(self.repo, head)\n\n self.branches[key] = vbr\n\n self.log.debug(\"get_branch: got %s from %s\" % (key, source))\n\n return vbr\n\n def tag_version(self, version, commit):\n tag_name = str(version)\n\n new_tag = self.repo.create_tag(tag_name, commit)\n\n return new_tag\n\nif __name__ == '__main__':\n from docopt import docopt\n\n __doc__ = \"\"\"versioner.py\n\n Manipulate version tags\n\n Usage: \n versioner.py [-n] [--verbose] [options]\n\n Options:\n --bump figure out a new version number\n --branch=<branchname> set which branch to work on\n --magic-pre do magic autoincrementing prerelease numbers\n --pre=<pre-release-tag> explicitly set the prerelease number\n --build=<build-tag> explicitly set the build number\n --since=<since-tag> override the tag of the last release\n --map=<mappings> override what kind of change given commits are (see below)\n --only-if-changes don't build if there are no changes since last tag\n --scout-json=<output> write an app.json for Scout\n\n Without --bump, versioner.py will simply output the current version number.\n\n Mappings are commit=kind[,commit=kind[,...]] where commit is a unique SHA prefix\n and kind is FIX, MINOR, or MAJOR.\n \"\"\"\n\n args = docopt(__doc__, version=\"versioner {0}\".format(\"0.1.0\"))\n\n dryrun = args[\"-n\"]\n\n level = logging.INFO\n\n if args[\"--verbose\"]:\n level = logging.DEBUG\n\n logging.basicConfig(level=level)\n\n vr = VersionedRepo(os.getcwd())\n vbr = vr.get_branch(args.get('--branch', None))\n\n if not args['--bump']:\n print(vbr.version)\n sys.exit(0)\n\n commit_map = {}\n\n if args[\"--map\"]:\n shown_format_error = False\n\n for element in args[\"--map\"].split(\",\"):\n if '=' in element:\n commit, kind = element.split('=')\n\n commit_map[commit] = \"[%s]\" % kind\n logging.debug(\"Forcing %s to %s\" % (commit, commit_map[commit]))\n elif not shown_format_error:\n logging.error(\"Map elements must be commit=kind\")\n shown_format_error = True\n\n next_version = vbr.next_version(magic_pre=args.get('--magic-pre', False),\n pre_release=args.get('--pre', None),\n build=args.get('--build', None),\n since_tag=args.get('--since', None),\n only_if_changes=args.get('--only-if-changes', False),\n reduced_zero=False,\n commit_map=commit_map)\n\n if args['--scout-json']:\n app_json = {\n \"application\": \"ambassador\",\n \"latest_version\": str(next_version),\n \"notices\": []\n }\n\n json.dump(app_json, open(args['--scout-json'], \"w\"), indent=4, sort_keys=True)\n\n if next_version:\n print(next_version)\n else:\n sys.stderr.write(\"no changes since %s\\n\" % vbr.version)\n sys.exit(1)\n", "path": "scripts/versioner.py" } ]
[ { "content": "#!/usr/bin/env python\n\nimport sys\n\nimport json\nimport logging\nimport os\nimport re\nimport subprocess\n\nfrom semantic_version import Version\nfrom git import Repo\n\ndry_run = True\n\nclass VersionedBranch (object):\n \"\"\" A branch for which we're going to wrangle versions based on tags. \"\"\"\n\n def __init__(self, git_repo, git_head):\n \"\"\" Here git_repo and git_head are gitpython objects, not just strings. \"\"\"\n self.log = logging.getLogger(\"VersionedBranch\")\n\n self.repo = git_repo\n self.head = git_head\n self.branch_name = git_head.name\n\n try:\n branch_info = self.repo.git.describe(tags=True, dirty=True, long=True).split('-')\n except Exception as e:\n self.log.warning(\"VersionedBranch: %s could not be described: %s\" % (self.branch_name, e))\n\n if not branch_info:\n self.log.warning(\"VersionedBranch: %s has no description info?\" % self.branch_name)\n\n self.log.debug(\"VersionedBranch: %s gets %s\" % (self.branch_name, branch_info))\n\n try:\n self._version_tag = self.repo.tags[branch_info[0]]\n except Exception as e:\n self.log.warning(\"VersionedBranch: %s has no valid tag %s?\" % (self.branch_name, branch_info[0]))\n\n self.log.debug(\"%s _version_tag: %s\" % (self.branch_name, self._version_tag.name))\n\n self._version = None\n self._versioned_commit = None\n\n self._current_commit = branch_info[2][1:]\n self.log.debug(\"%s _current_commit: %s\" %\n (self.branch_name, self._current_commit))\n\n self._commit_count = int(branch_info[1])\n self.log.debug(\"%s _commit_count: %s\" % (self.branch_name, self._commit_count))\n\n self.is_dirty = True if (len(branch_info) > 3) else False\n self.log.debug(\"%s is_dirty: %s\" % (self.branch_name, self.is_dirty))\n\n @property\n def version_tag(self):\n if self._version_tag is None:\n self.log.warning(\"version_tag: %s got no tag\" % self.branch_name)\n\n return self._version_tag\n \n @property\n def version(self):\n if (self._version is None) and (self.version_tag is not None):\n self._version = Version(self.version_tag.name[1:])\n self.log.debug(\"version: %s => %s\" % (self.branch_name, self._version))\n\n return self._version\n\n @property\n def versioned_commit(self):\n if (self._versioned_commit is None) and (self.version_tag is not None):\n self._versioned_commit = self._version_tag.commit\n self.log.debug(\"versioned_commit: %s => %s @ %s\" % \n (self.branch_name, self.version, self._versioned_commit))\n\n return self._versioned_commit\n\n @property\n def current_commit(self):\n if not self._current_commit:\n self._current_commit = self.head.commit\n self.log.debug(\"current_commit: %s => %s\" %\n (self.branch_name, self._current_commit))\n\n return self._current_commit\n\n @property\n def commit_count(self):\n if not self._commit_count:\n self.log.warning(\"commit_count: %s got no count\" % self.branch_name)\n\n return self._commit_count\n\n def __unicode__(self):\n return (\"<VersionedBranch %s @ %s [%s @ %s]>\" %\n (self.branch_name, str(self.current_commit)[0:8],\n self.version, str(self.versioned_commit)[0:8]))\n\n def __str__(self):\n return str(self)\n\n def recent_commits(self, since_tag=None):\n if not since_tag:\n since_tag = self.version_tag.name\n\n for line in self.repo.git.log(\"--reverse\", \"--oneline\", self.branch_name,\n \"--not\", since_tag).split(\"\\n\"):\n self.log.debug(\"line: %s\" % line)\n\n if line:\n commitID, subject = line.split(\" \", 1)\n\n yield commitID, subject\n\n def next_version(self, magic_pre=False, since_tag=None, reduced_zero=True,\n only_if_changes=False, pre_release=None, build=None,\n commit_map=None):\n rdelta = ReleaseDelta(self, only_if_changes=only_if_changes, magic_pre=magic_pre,\n since_tag=since_tag, reduced_zero=reduced_zero,\n commit_map=commit_map, pre_release=pre_release, build=build)\n\n return rdelta.next_version\n\nclass VersionDelta(object):\n def __init__(self, scale, xform, tag):\n self.scale = scale\n self.xform = xform\n self.tag = tag\n self.delta = scale\n\n def __cmp__(self, other):\n if self.scale < other.scale:\n return -1\n elif self.scale > other.scale:\n return 1\n else:\n return 0\n\n def __lt__(self, other):\n return self.__cmp__(other) < 0\n\n def __gt__(self, other):\n return self.__cmp__(other) > 0\n\n def __unicode__(self):\n return \"<VersionDelta %s>\" % self.tag\n\n def __str__(self):\n return self.__unicode__()\n\nclass ReleaseDelta(object):\n NONE = VersionDelta( (0,0,0), lambda x: x, \"[NONE]\")\n FIX = VersionDelta( (0,0,1), Version.next_patch, \"[FIX]\")\n MINOR = VersionDelta( (0,1,0), Version.next_minor, \"[MINOR]\")\n MAJOR = VersionDelta( (1,0,0), Version.next_major, \"[MAJOR]\")\n\n \"\"\" how new commits affect project version \"\"\"\n log = logging.getLogger(\"ReleaseDelta\")\n\n def __init__(self, vbr, only_if_changes=False, magic_pre=False, since_tag=None,\n reduced_zero=True, commit_map=None,\n pre_release=None, build=None):\n self.vbr = vbr\n self.is_dirty = vbr.is_dirty\n self.only_if_changes = only_if_changes\n self.magic_pre = magic_pre\n self.since_tag = since_tag\n self.pre_release = pre_release\n self.build = build\n self.commit_map = commit_map\n\n if reduced_zero and (self.vbr.version.major == 0):\n self.log.debug(\"While the project is in %s version, all changes have reduced impact\" % self.vbr.version)\n self.MAJOR.xform = self.MINOR.xform\n self.MAJOR.delta = self.MINOR.delta\n self.MINOR.xform = self.FIX.xform\n self.MINOR.delta = self.FIX.delta\n\n def commits(self):\n # for commit, subject in [\n # ( '123456', '[MINOR]' )\n # ]:\n # yield commit, subject\n\n for commit, subject in self.vbr.recent_commits(self.since_tag):\n if self.commit_map and (commit in self.commit_map):\n subject = self.commit_map[commit]\n logging.debug(\"Override %s with %s\" % (commit, subject))\n\n yield commit, subject\n\n def commit_deltas(self):\n for commitID, subject in self.commits():\n delta = self.FIX\n source = \"by default\"\n\n for commitDelta in [self.MAJOR, self.MINOR, self.FIX]:\n if commitDelta.tag in subject:\n delta = commitDelta\n source = \"from commit message\"\n break\n\n if source == \"by default\":\n if subject.strip().startswith('feat('):\n delta = self.MINOR\n source = \"from feature marker\"\n elif subject.strip().startswith('break:'):\n delta = self.MAJOR\n source = \"from breakage marker\"\n\n self.log.debug(\"commit %s: %s %s\\n-- [%s]\" % (commitID, delta.tag, source, subject))\n\n yield delta, commitID, subject\n\n def version_change(self):\n finalDelta = None\n commits = []\n\n for delta, commitID, subject in self.commit_deltas():\n self.log.debug(\"folding %s: %s\" % (commitID, delta))\n\n commits.append((delta, commitID, subject))\n\n if finalDelta is None:\n self.log.debug(\"%s: initial change %s\" % (commitID, delta))\n finalDelta = delta\n elif delta > finalDelta:\n self.log.debug(\"%s: %s overrides %s\" % (commitID, delta, finalDelta))\n\n finalDelta = delta\n\n if not commits:\n self.log.debug(\"version_change: no commits since %s\" % self.vbr.version)\n return None, None\n else:\n self.log.debug(\"folding %d commit%s into %s: delta %s\" % \n (len(commits), \"\" if len(commits) == 1 else \"s\", \n finalDelta, finalDelta.delta))\n\n return finalDelta, commits\n\n @property\n def next_version(self):\n version = self.vbr.version\n self.log.debug(\"version start: %s\" % version)\n\n finalDelta, commits = self.version_change()\n\n # if not finalDelta and self.is_dirty:\n # finalDelta = ReleaseDelta.NONE\n # commits = []\n\n if finalDelta or self.is_dirty:\n if commits:\n self.log.debug(\"final commit list: %s\" % \n \"\\n\".join(map(lambda x: \"%s %s: %s\" % (x[0].tag, x[1], x[2]),\n commits)))\n\n if finalDelta:\n self.log.debug(\"final change: %s %s\" % (finalDelta, finalDelta.delta))\n\n version = finalDelta.xform(version)\n else:\n version = ReleaseDelta.NONE.xform(version)\n\n self.log.debug(\"version: %s\" % version)\n\n if finalDelta and self.magic_pre:\n version.prerelease = [ 'b%d' % self.vbr.commit_count, self.vbr.current_commit ]\n\n # pre = self.vbr.version.prerelease\n\n # self.log.debug(\"magic check: '%s'\" % str(pre))\n\n # if pre:\n # pre = pre[0]\n\n # if pre and pre.startswith('b'):\n # if finalDelta > self.FIX:\n # pre = \"b1\"\n # else:\n # pre = \"b\" + str(int(pre[1:]) + 1)\n\n # self.log.debug(\"magic prerelease: %s\" % str(pre))\n # version.prerelease = [pre]\n # else:\n # version.prerelease = [ \"b\" + ]\n elif self.pre_release:\n version.prerelease = [ self.pre_release ]\n\n if self.is_dirty:\n self.log.debug(\"dirty build\")\n\n if not version.prerelease:\n version.prerelease = []\n\n if 'DIRTY' not in version.prerelease:\n version.prerelease = list(version.prerelease)\n version.prerelease.append('DIRTY')\n\n self.log.debug(\"final prerelease: %s\" % str(version.prerelease))\n\n if self.build:\n version.build = [ self.build ]\n\n self.log.debug(\"final build: %s\" % str(version.build))\n\n self.log.debug(\"version has to change from %s to %s\" %\n (self.vbr.version, version))\n\n return version\n elif self.only_if_changes:\n return None\n else:\n # Just return the old version.\n return version\n\nclass VersionedRepo (object):\n \"\"\" Representation of a git repo that follows our versioning rules \"\"\"\n\n def __init__(self, repo_root):\n self.log = logging.getLogger(\"VersionedRepo\")\n\n self.repo = Repo(repo_root, search_parent_directories=True)\n self.is_dirty = self.repo.is_dirty()\n self.branches = {}\n\n def get_branch(self, branch_name):\n # Grab a branch of this repo\n\n key = branch_name if branch_name else '*current*'\n source = 'cache'\n vbr = None\n\n if key in self.branches:\n vbr = self.branches[key]\n\n if not vbr:\n source = 'Git'\n\n head = None\n\n if branch_name:\n print(self.repo.heads)\n\n head = self.repo.heads[branch_name]\n else:\n # head = self.repo.active_branch\n head = self.repo.head \n\n if not head:\n self.log.warning(\"get_branch: no branch %s\" % branch_name)\n\n vbr = VersionedBranch(self.repo, head)\n\n self.branches[key] = vbr\n\n self.log.debug(\"get_branch: got %s from %s\" % (key, source))\n\n return vbr\n\n def tag_version(self, version, commit):\n tag_name = str(version)\n\n new_tag = self.repo.create_tag(tag_name, commit)\n\n return new_tag\n\nif __name__ == '__main__':\n from docopt import docopt\n\n __doc__ = \"\"\"versioner.py\n\n Manipulate version tags\n\n Usage: \n versioner.py [-n] [--verbose] [options]\n\n Options:\n --bump figure out a new version number\n --branch=<branchname> set which branch to work on\n --magic-pre do magic autoincrementing prerelease numbers\n --pre=<pre-release-tag> explicitly set the prerelease number\n --build=<build-tag> explicitly set the build number\n --since=<since-tag> override the tag of the last release\n --map=<mappings> override what kind of change given commits are (see below)\n --only-if-changes don't build if there are no changes since last tag\n --scout-json=<output> write an app.json for Scout\n\n Without --bump, versioner.py will simply output the current version number.\n\n Mappings are commit=kind[,commit=kind[,...]] where commit is a unique SHA prefix\n and kind is FIX, MINOR, or MAJOR.\n \"\"\"\n\n args = docopt(__doc__, version=\"versioner {0}\".format(\"0.1.0\"))\n\n dryrun = args[\"-n\"]\n\n level = logging.INFO\n\n if args[\"--verbose\"]:\n level = logging.DEBUG\n\n logging.basicConfig(level=level)\n\n vr = VersionedRepo(os.getcwd())\n vbr = vr.get_branch(args.get('--branch', None))\n\n if not args['--bump']:\n print(vbr.version)\n sys.exit(0)\n\n commit_map = {}\n\n if args[\"--map\"]:\n shown_format_error = False\n\n for element in args[\"--map\"].split(\",\"):\n if '=' in element:\n commit, kind = element.split('=')\n\n commit_map[commit] = \"[%s]\" % kind\n logging.debug(\"Forcing %s to %s\" % (commit, commit_map[commit]))\n elif not shown_format_error:\n logging.error(\"Map elements must be commit=kind\")\n shown_format_error = True\n\n next_version = vbr.next_version(magic_pre=args.get('--magic-pre', False),\n pre_release=args.get('--pre', None),\n build=args.get('--build', None),\n since_tag=args.get('--since', None),\n only_if_changes=args.get('--only-if-changes', False),\n reduced_zero=False,\n commit_map=commit_map)\n\n if args['--scout-json']:\n app_json = {\n \"application\": \"ambassador\",\n \"latest_version\": str(next_version),\n \"notices\": []\n }\n\n json.dump(app_json, open(args['--scout-json'], \"w\"), indent=4, sort_keys=True)\n\n if next_version:\n print(next_version)\n else:\n sys.stderr.write(\"no changes since %s\\n\" % vbr.version)\n sys.exit(1)\n", "path": "scripts/versioner.py" } ]
diff --git a/scripts/versioner.py b/scripts/versioner.py index 52c80c0091..bc796e10e3 100755 --- a/scripts/versioner.py +++ b/scripts/versioner.py @@ -100,7 +100,7 @@ def __unicode__(self): self.version, str(self.versioned_commit)[0:8])) def __str__(self): - return unicode(self) + return str(self) def recent_commits(self, since_tag=None): if not since_tag:
ManageIQ__integration_tests-8533
is_displayed for catalog all page is not working as expected Currently the is_display of catalog all page returns True even if the view is on Add catalog page.
[ { "content": "import attr\nfrom navmazing import NavigateToAttribute\nfrom navmazing import NavigateToSibling\nfrom widgetastic.utils import Parameter\nfrom widgetastic.widget import Text\nfrom widgetastic_patternfly import Button\nfrom widgetastic_patternfly import CandidateNotFound\nfrom widgetastic_patternfly import Input\n\nfrom . import ServicesCatalogView\nfrom cfme.common import Taggable\nfrom cfme.modeling.base import BaseCollection\nfrom cfme.modeling.base import BaseEntity\nfrom cfme.utils.appliance.implementations.ui import CFMENavigateStep\nfrom cfme.utils.appliance.implementations.ui import navigate_to\nfrom cfme.utils.appliance.implementations.ui import navigator\nfrom cfme.utils.pretty import Pretty\nfrom cfme.utils.update import Updateable\nfrom cfme.utils.wait import wait_for\nfrom widgetastic_manageiq import MultiBoxSelect\n\n\nclass CatalogsMultiBoxSelect(MultiBoxSelect):\n move_into_button = Button(title=Parameter(\"@move_into\"))\n move_from_button = Button(title=Parameter(\"@move_from\"))\n\n\nclass CatalogForm(ServicesCatalogView):\n title = Text('#explorer_title_text')\n\n name = Input(name='name')\n description = Input(name=\"description\")\n assign_catalog_items = CatalogsMultiBoxSelect(\n move_into=\"Move Selected buttons right\",\n move_from=\"Move Selected buttons left\",\n available_items=\"available_fields\",\n chosen_items=\"selected_fields\"\n )\n\n save_button = Button('Save')\n cancel_button = Button('Cancel')\n\n\nclass CatalogsView(ServicesCatalogView):\n title = Text(\"#explorer_title_text\")\n\n @property\n def is_displayed(self):\n return (\n self.in_explorer and\n self.catalogs.is_opened and\n self.catalogs.tree.currently_selected == [\"All Catalogs\"])\n\n\nclass DetailsCatalogView(ServicesCatalogView):\n title = Text(\"#explorer_title_text\")\n\n @property\n def is_displayed(self):\n return (\n self.in_explorer and self.catalogs.is_opened and\n self.title.text == 'Catalog \"{}\"'.format(self.context[\"object\"].name)\n )\n\n\nclass AddCatalogView(CatalogForm):\n\n add_button = Button(\"Add\")\n\n @property\n def is_displayed(self):\n return (\n self.in_explorer and self.catalogs.is_opened and\n self.title.text == 'Adding a new Catalog'\n )\n\n\nclass EditCatalogView(CatalogForm):\n\n save_button = Button('Save')\n reset_button = Button('Reset')\n\n @property\n def is_displayed(self):\n return (\n self.in_explorer and self.catalogs.is_opened and\n self.title.text == 'Editing Catalog \"{}\"'.format(self.context[\"object\"].name)\n )\n\n\[email protected]\nclass Catalog(BaseEntity, Updateable, Pretty, Taggable):\n\n name = attr.ib()\n description = attr.ib()\n items = attr.ib(default=None)\n\n def update(self, updates):\n view = navigate_to(self, 'Edit')\n changed = view.fill(updates)\n if changed:\n view.save_button.click()\n else:\n view.cancel_button.click()\n view = self.create_view(DetailsCatalogView, override=updates, wait='10s')\n view.flash.assert_no_error()\n if changed:\n view.flash.assert_message(\n 'Catalog \"{}\" was saved'.format(updates.get('name', self.name)))\n else:\n view.flash.assert_message(\n 'Edit of Catalog \"{}\" was cancelled by the user'.format(self.name))\n\n def delete(self):\n view = navigate_to(self, \"Details\")\n view.configuration.item_select('Remove Catalog', handle_alert=True)\n view = self.create_view(CatalogsView, wait='10s')\n view.flash.assert_no_error()\n view.flash.assert_success_message(\n 'Catalog \"{}\": Delete successful'.format(self.description or self.name))\n\n @property\n def exists(self):\n try:\n navigate_to(self, 'Details')\n return True\n except (NameError, CandidateNotFound):\n return False\n\n\[email protected]\nclass CatalogCollection(BaseCollection):\n \"\"\"A collection for the :py:class:`cfme.services.catalogs.catalog.Catalog`\"\"\"\n ENTITY = Catalog\n\n def create(self, name, description, items=None):\n \"\"\"Create a catalog.\n\n Args:\n name: The name of the catalog\n description: The description of the catalog\n items: Items in the catalog\n \"\"\"\n view = navigate_to(self, 'Add')\n view.fill({\n 'name': name,\n 'description': description,\n 'assign_catalog_items': items\n })\n view.add_button.click()\n catalog = self.instantiate(name=name, description=description, items=items)\n view = self.create_view(CatalogsView)\n assert view.is_displayed\n view.flash.assert_no_error()\n return catalog\n\n\[email protected](CatalogCollection)\nclass All(CFMENavigateStep):\n VIEW = CatalogsView\n prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')\n\n def step(self, *args, **kwargs):\n self.prerequisite_view.navigation.select('Services', 'Catalogs')\n self.view.catalogs.tree.click_path(\"All Catalogs\")\n\n\[email protected](CatalogCollection)\nclass Add(CFMENavigateStep):\n VIEW = AddCatalogView\n prerequisite = NavigateToSibling('All')\n\n def step(self, *args, **kwargs):\n self.prerequisite_view.configuration.item_select('Add a New Catalog')\n\n\[email protected](Catalog)\nclass Details(CFMENavigateStep):\n VIEW = DetailsCatalogView\n prerequisite = NavigateToAttribute('parent', 'All')\n\n def step(self, *args, **kwargs):\n self.prerequisite_view.catalogs.tree.click_path(\"All Catalogs\", self.obj.name)\n\n\[email protected](Catalog)\nclass Edit(CFMENavigateStep):\n VIEW = EditCatalogView\n prerequisite = NavigateToSibling('Details')\n\n def step(self, *args, **kwargs):\n self.prerequisite_view.configuration.item_select('Edit this Item')\n", "path": "cfme/services/catalogs/catalog.py" } ]
[ { "content": "import attr\nfrom navmazing import NavigateToAttribute\nfrom navmazing import NavigateToSibling\nfrom widgetastic.utils import Parameter\nfrom widgetastic.widget import Text\nfrom widgetastic_patternfly import Button\nfrom widgetastic_patternfly import CandidateNotFound\nfrom widgetastic_patternfly import Input\n\nfrom . import ServicesCatalogView\nfrom cfme.common import Taggable\nfrom cfme.modeling.base import BaseCollection\nfrom cfme.modeling.base import BaseEntity\nfrom cfme.utils.appliance.implementations.ui import CFMENavigateStep\nfrom cfme.utils.appliance.implementations.ui import navigate_to\nfrom cfme.utils.appliance.implementations.ui import navigator\nfrom cfme.utils.pretty import Pretty\nfrom cfme.utils.update import Updateable\nfrom cfme.utils.wait import wait_for\nfrom widgetastic_manageiq import MultiBoxSelect\n\n\nclass CatalogsMultiBoxSelect(MultiBoxSelect):\n move_into_button = Button(title=Parameter(\"@move_into\"))\n move_from_button = Button(title=Parameter(\"@move_from\"))\n\n\nclass CatalogForm(ServicesCatalogView):\n title = Text('#explorer_title_text')\n\n name = Input(name='name')\n description = Input(name=\"description\")\n assign_catalog_items = CatalogsMultiBoxSelect(\n move_into=\"Move Selected buttons right\",\n move_from=\"Move Selected buttons left\",\n available_items=\"available_fields\",\n chosen_items=\"selected_fields\"\n )\n\n save_button = Button('Save')\n cancel_button = Button('Cancel')\n\n\nclass CatalogsView(ServicesCatalogView):\n title = Text(\"#explorer_title_text\")\n\n @property\n def is_displayed(self):\n return (\n self.in_explorer and\n self.catalogs.is_opened and\n self.title.text == \"All Catalogs\" and\n self.catalogs.tree.currently_selected == [\"All Catalogs\"])\n\n\nclass DetailsCatalogView(ServicesCatalogView):\n title = Text(\"#explorer_title_text\")\n\n @property\n def is_displayed(self):\n return (\n self.in_explorer and self.catalogs.is_opened and\n self.title.text == 'Catalog \"{}\"'.format(self.context[\"object\"].name)\n )\n\n\nclass AddCatalogView(CatalogForm):\n\n add_button = Button(\"Add\")\n\n @property\n def is_displayed(self):\n return (\n self.in_explorer and self.catalogs.is_opened and\n self.title.text == 'Adding a new Catalog'\n )\n\n\nclass EditCatalogView(CatalogForm):\n\n save_button = Button('Save')\n reset_button = Button('Reset')\n\n @property\n def is_displayed(self):\n return (\n self.in_explorer and self.catalogs.is_opened and\n self.title.text == 'Editing Catalog \"{}\"'.format(self.context[\"object\"].name)\n )\n\n\[email protected]\nclass Catalog(BaseEntity, Updateable, Pretty, Taggable):\n\n name = attr.ib()\n description = attr.ib()\n items = attr.ib(default=None)\n\n def update(self, updates):\n view = navigate_to(self, 'Edit')\n changed = view.fill(updates)\n if changed:\n view.save_button.click()\n else:\n view.cancel_button.click()\n view = self.create_view(DetailsCatalogView, override=updates, wait='10s')\n view.flash.assert_no_error()\n if changed:\n view.flash.assert_message(\n 'Catalog \"{}\" was saved'.format(updates.get('name', self.name)))\n else:\n view.flash.assert_message(\n 'Edit of Catalog \"{}\" was cancelled by the user'.format(self.name))\n\n def delete(self):\n view = navigate_to(self, \"Details\")\n view.configuration.item_select('Remove Catalog', handle_alert=True)\n view = self.create_view(CatalogsView, wait='10s')\n view.flash.assert_no_error()\n view.flash.assert_success_message(\n 'Catalog \"{}\": Delete successful'.format(self.description or self.name))\n\n @property\n def exists(self):\n try:\n navigate_to(self, 'Details')\n return True\n except (NameError, CandidateNotFound):\n return False\n\n\[email protected]\nclass CatalogCollection(BaseCollection):\n \"\"\"A collection for the :py:class:`cfme.services.catalogs.catalog.Catalog`\"\"\"\n ENTITY = Catalog\n\n def create(self, name, description, items=None):\n \"\"\"Create a catalog.\n\n Args:\n name: The name of the catalog\n description: The description of the catalog\n items: Items in the catalog\n \"\"\"\n view = navigate_to(self, 'Add')\n view.fill({\n 'name': name,\n 'description': description,\n 'assign_catalog_items': items\n })\n view.add_button.click()\n catalog = self.instantiate(name=name, description=description, items=items)\n view = self.create_view(CatalogsView)\n assert view.is_displayed\n view.flash.assert_no_error()\n return catalog\n\n\[email protected](CatalogCollection)\nclass All(CFMENavigateStep):\n VIEW = CatalogsView\n prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')\n\n def step(self, *args, **kwargs):\n self.prerequisite_view.navigation.select('Services', 'Catalogs')\n self.view.catalogs.tree.click_path(\"All Catalogs\")\n\n\[email protected](CatalogCollection)\nclass Add(CFMENavigateStep):\n VIEW = AddCatalogView\n prerequisite = NavigateToSibling('All')\n\n def step(self, *args, **kwargs):\n self.prerequisite_view.configuration.item_select('Add a New Catalog')\n\n\[email protected](Catalog)\nclass Details(CFMENavigateStep):\n VIEW = DetailsCatalogView\n prerequisite = NavigateToAttribute('parent', 'All')\n\n def step(self, *args, **kwargs):\n self.prerequisite_view.catalogs.tree.click_path(\"All Catalogs\", self.obj.name)\n\n\[email protected](Catalog)\nclass Edit(CFMENavigateStep):\n VIEW = EditCatalogView\n prerequisite = NavigateToSibling('Details')\n\n def step(self, *args, **kwargs):\n self.prerequisite_view.configuration.item_select('Edit this Item')\n", "path": "cfme/services/catalogs/catalog.py" } ]
diff --git a/cfme/services/catalogs/catalog.py b/cfme/services/catalogs/catalog.py index 6367d52997..f248e20199 100644 --- a/cfme/services/catalogs/catalog.py +++ b/cfme/services/catalogs/catalog.py @@ -49,6 +49,7 @@ def is_displayed(self): return ( self.in_explorer and self.catalogs.is_opened and + self.title.text == "All Catalogs" and self.catalogs.tree.currently_selected == ["All Catalogs"])
flairNLP__flair-2170
Close log_handler after training is complete. **Describe the bug** We are removing the log_handler [here](https://github.com/flairNLP/flair/blob/master/flair/trainers/trainer.py#L633) but not closing the handler leading to `ResourceWarning: unclosed file <_io.BufferedReader name='training.log`. Hence we are not able to programatically access the training.log (For ex - Unable to upload the file to s3 using botocore) **To Reproduce** Training just any model leads to this Warning. **Expected behavior** The `log_handler` to be closed before training exits **Screenshots** If applicable, add screenshots to help explain your problem. **Environment (please complete the following information):** - MacOS 10.15.6 - Version: flair-0.8
[ { "content": "import copy\nimport logging\nfrom pathlib import Path\nfrom typing import List, Union\nimport time\nimport datetime\nimport sys\nimport inspect\n\nimport torch\nfrom torch.optim.sgd import SGD\nfrom torch.utils.data.dataset import ConcatDataset\n\ntry:\n from apex import amp\nexcept ImportError:\n amp = None\n\nimport flair\nimport flair.nn\nfrom flair.data import MultiCorpus, Corpus\nfrom flair.datasets import DataLoader\nfrom flair.optim import ExpAnnealLR\nfrom flair.training_utils import (\n init_output_file,\n WeightExtractor,\n log_line,\n add_file_handler,\n Result,\n store_embeddings,\n AnnealOnPlateau,\n)\nfrom torch.optim.lr_scheduler import OneCycleLR\nfrom flair.models import SequenceTagger\nimport random\n\nlog = logging.getLogger(\"flair\")\n\n\nclass ModelTrainer:\n def __init__(\n self,\n model: flair.nn.Model,\n corpus: Corpus,\n optimizer: torch.optim.Optimizer = SGD,\n epoch: int = 0,\n use_tensorboard: bool = False,\n ):\n \"\"\"\n Initialize a model trainer\n :param model: The model that you want to train. The model should inherit from flair.nn.Model\n :param corpus: The dataset used to train the model, should be of type Corpus\n :param optimizer: The optimizer to use (typically SGD or Adam)\n :param epoch: The starting epoch (normally 0 but could be higher if you continue training model)\n :param use_tensorboard: If True, writes out tensorboard information\n \"\"\"\n self.model: flair.nn.Model = model\n self.corpus: Corpus = corpus\n self.optimizer: torch.optim.Optimizer = optimizer\n self.epoch: int = epoch\n self.use_tensorboard: bool = use_tensorboard\n\n def train(\n self,\n base_path: Union[Path, str],\n learning_rate: float = 0.1,\n mini_batch_size: int = 32,\n mini_batch_chunk_size: int = None,\n max_epochs: int = 100,\n scheduler=AnnealOnPlateau,\n cycle_momentum: bool = False,\n anneal_factor: float = 0.5,\n patience: int = 3,\n initial_extra_patience=0,\n min_learning_rate: float = 0.0001,\n train_with_dev: bool = False,\n train_with_test: bool = False,\n monitor_train: bool = False,\n monitor_test: bool = False,\n embeddings_storage_mode: str = \"cpu\",\n checkpoint: bool = False,\n save_final_model: bool = True,\n anneal_with_restarts: bool = False,\n anneal_with_prestarts: bool = False,\n batch_growth_annealing: bool = False,\n shuffle: bool = True,\n param_selection_mode: bool = False,\n write_weights: bool = False,\n num_workers: int = 6,\n sampler=None,\n use_amp: bool = False,\n amp_opt_level: str = \"O1\",\n eval_on_train_fraction=0.0,\n eval_on_train_shuffle=False,\n save_model_each_k_epochs: int = 0,\n **kwargs,\n ) -> dict:\n \"\"\"\n Trains any class that implements the flair.nn.Model interface.\n :param base_path: Main path to which all output during training is logged and models are saved\n :param learning_rate: Initial learning rate (or max, if scheduler is OneCycleLR)\n :param mini_batch_size: Size of mini-batches during training\n :param mini_batch_chunk_size: If mini-batches are larger than this number, they get broken down into chunks of this size for processing purposes\n :param max_epochs: Maximum number of epochs to train. Terminates training if this number is surpassed.\n :param scheduler: The learning rate scheduler to use\n :param cycle_momentum: If scheduler is OneCycleLR, whether the scheduler should cycle also the momentum\n :param anneal_factor: The factor by which the learning rate is annealed\n :param patience: Patience is the number of epochs with no improvement the Trainer waits\n until annealing the learning rate\n :param min_learning_rate: If the learning rate falls below this threshold, training terminates\n :param train_with_dev: If True, training is performed using both train+dev data\n :param monitor_train: If True, training data is evaluated at end of each epoch\n :param monitor_test: If True, test data is evaluated at end of each epoch\n :param embeddings_storage_mode: One of 'none' (all embeddings are deleted and freshly recomputed),\n 'cpu' (embeddings are stored on CPU) or 'gpu' (embeddings are stored on GPU)\n :param checkpoint: If True, a full checkpoint is saved at end of each epoch\n :param save_final_model: If True, final model is saved\n :param anneal_with_restarts: If True, the last best model is restored when annealing the learning rate\n :param shuffle: If True, data is shuffled during training\n :param param_selection_mode: If True, testing is performed against dev data. Use this mode when doing\n parameter selection.\n :param num_workers: Number of workers in your data loader.\n :param sampler: You can pass a data sampler here for special sampling of data.\n :param eval_on_train_fraction: the fraction of train data to do the evaluation on,\n if 0. the evaluation is not performed on fraction of training data,\n if 'dev' the size is determined from dev set size\n :param eval_on_train_shuffle: if True the train data fraction is determined on the start of training\n and kept fixed during training, otherwise it's sampled at beginning of each epoch\n :param save_model_each_k_epochs: Each k epochs, a model state will be written out. If set to '5', a model will\n be saved each 5 epochs. Default is 0 which means no model saving.\n :param save_model_epoch_step: Each save_model_epoch_step'th epoch the thus far trained model will be saved\n :param kwargs: Other arguments for the Optimizer\n :return:\n \"\"\"\n\n if self.use_tensorboard:\n try:\n from torch.utils.tensorboard import SummaryWriter\n\n writer = SummaryWriter()\n except:\n log_line(log)\n log.warning(\n \"ATTENTION! PyTorch >= 1.1.0 and pillow are required for TensorBoard support!\"\n )\n log_line(log)\n self.use_tensorboard = False\n pass\n\n if use_amp:\n if sys.version_info < (3, 0):\n raise RuntimeError(\"Apex currently only supports Python 3. Aborting.\")\n if amp is None:\n raise RuntimeError(\n \"Failed to import apex. Please install apex from https://www.github.com/nvidia/apex \"\n \"to enable mixed-precision training.\"\n )\n\n if mini_batch_chunk_size is None:\n mini_batch_chunk_size = mini_batch_size\n if learning_rate < min_learning_rate:\n min_learning_rate = learning_rate / 10\n\n initial_learning_rate = learning_rate\n\n # cast string to Path\n if type(base_path) is str:\n base_path = Path(base_path)\n\n log_handler = add_file_handler(log, base_path / \"training.log\")\n\n log_line(log)\n log.info(f'Model: \"{self.model}\"')\n log_line(log)\n log.info(f'Corpus: \"{self.corpus}\"')\n log_line(log)\n log.info(\"Parameters:\")\n log.info(f' - learning_rate: \"{learning_rate}\"')\n log.info(f' - mini_batch_size: \"{mini_batch_size}\"')\n log.info(f' - patience: \"{patience}\"')\n log.info(f' - anneal_factor: \"{anneal_factor}\"')\n log.info(f' - max_epochs: \"{max_epochs}\"')\n log.info(f' - shuffle: \"{shuffle}\"')\n log.info(f' - train_with_dev: \"{train_with_dev}\"')\n log.info(f' - batch_growth_annealing: \"{batch_growth_annealing}\"')\n log_line(log)\n log.info(f'Model training base path: \"{base_path}\"')\n log_line(log)\n log.info(f\"Device: {flair.device}\")\n log_line(log)\n log.info(f\"Embeddings storage mode: {embeddings_storage_mode}\")\n if isinstance(self.model, SequenceTagger) and self.model.weight_dict and self.model.use_crf:\n log_line(log)\n log.warning(f'WARNING: Specified class weights will not take effect when using CRF')\n\n # determine what splits (train, dev, test) to evaluate and log\n log_train = True if monitor_train else False\n log_test = (\n True\n if (not param_selection_mode and self.corpus.test and monitor_test)\n else False\n )\n log_dev = False if train_with_dev or not self.corpus.dev else True\n log_train_part = (\n True\n if (eval_on_train_fraction == \"dev\" or eval_on_train_fraction > 0.0)\n else False\n )\n\n if log_train_part:\n train_part_size = (\n len(self.corpus.dev)\n if eval_on_train_fraction == \"dev\"\n else int(len(self.corpus.train) * eval_on_train_fraction)\n )\n assert train_part_size > 0\n if not eval_on_train_shuffle:\n train_part_indices = list(range(train_part_size))\n train_part = torch.utils.data.dataset.Subset(\n self.corpus.train, train_part_indices\n )\n\n # prepare loss logging file and set up header\n loss_txt = init_output_file(base_path, \"loss.tsv\")\n\n weight_extractor = WeightExtractor(base_path)\n\n optimizer: torch.optim.Optimizer = self.optimizer(\n self.model.parameters(), lr=learning_rate, **kwargs\n )\n\n if use_amp:\n self.model, optimizer = amp.initialize(\n self.model, optimizer, opt_level=amp_opt_level\n )\n\n # minimize training loss if training with dev data, else maximize dev score\n anneal_mode = \"min\" if train_with_dev else \"max\"\n\n if scheduler == OneCycleLR:\n dataset_size = len(self.corpus.train)\n if train_with_dev:\n dataset_size += len(self.corpus.dev)\n lr_scheduler = OneCycleLR(optimizer,\n max_lr=learning_rate,\n steps_per_epoch=dataset_size // mini_batch_size + 1,\n epochs=max_epochs - self.epoch,\n # if we load a checkpoint, we have already trained for self.epoch\n pct_start=0.0,\n cycle_momentum=cycle_momentum)\n else:\n lr_scheduler = scheduler(\n optimizer,\n factor=anneal_factor,\n patience=patience,\n initial_extra_patience=initial_extra_patience,\n mode=anneal_mode,\n verbose=True,\n )\n\n if (isinstance(lr_scheduler, OneCycleLR) and batch_growth_annealing):\n raise ValueError(\"Batch growth with OneCycle policy is not implemented.\")\n\n train_data = self.corpus.train\n\n # if training also uses dev/train data, include in training set\n if train_with_dev or train_with_test:\n\n parts = [self.corpus.train]\n if train_with_dev: parts.append(self.corpus.dev)\n if train_with_test: parts.append(self.corpus.test)\n\n train_data = ConcatDataset(parts)\n\n # initialize sampler if provided\n if sampler is not None:\n # init with default values if only class is provided\n if inspect.isclass(sampler):\n sampler = sampler()\n # set dataset to sample from\n sampler.set_dataset(train_data)\n shuffle = False\n\n dev_score_history = []\n dev_loss_history = []\n train_loss_history = []\n\n micro_batch_size = mini_batch_chunk_size\n\n # At any point you can hit Ctrl + C to break out of training early.\n try:\n previous_learning_rate = learning_rate\n momentum = 0\n for group in optimizer.param_groups:\n if \"momentum\" in group:\n momentum = group[\"momentum\"]\n\n for self.epoch in range(self.epoch + 1, max_epochs + 1):\n log_line(log)\n\n if anneal_with_prestarts:\n last_epoch_model_state_dict = copy.deepcopy(self.model.state_dict())\n\n if eval_on_train_shuffle:\n train_part_indices = list(range(self.corpus.train))\n random.shuffle(train_part_indices)\n train_part_indices = train_part_indices[:train_part_size]\n train_part = torch.utils.data.dataset.Subset(\n self.corpus.train, train_part_indices\n )\n\n # get new learning rate\n for group in optimizer.param_groups:\n learning_rate = group[\"lr\"]\n\n if learning_rate != previous_learning_rate and batch_growth_annealing:\n mini_batch_size *= 2\n\n # reload last best model if annealing with restarts is enabled\n if (\n (anneal_with_restarts or anneal_with_prestarts)\n and learning_rate != previous_learning_rate\n and (base_path / \"best-model.pt\").exists()\n ):\n if anneal_with_restarts:\n log.info(\"resetting to best model\")\n self.model.load_state_dict(\n self.model.load(base_path / \"best-model.pt\").state_dict()\n )\n if anneal_with_prestarts:\n log.info(\"resetting to pre-best model\")\n self.model.load_state_dict(\n self.model.load(base_path / \"pre-best-model.pt\").state_dict()\n )\n\n previous_learning_rate = learning_rate\n\n # stop training if learning rate becomes too small\n if (not isinstance(lr_scheduler, OneCycleLR)) and learning_rate < min_learning_rate:\n log_line(log)\n log.info(\"learning rate too small - quitting training!\")\n log_line(log)\n break\n\n batch_loader = DataLoader(\n train_data,\n batch_size=mini_batch_size,\n shuffle=shuffle if self.epoch > 1 else False, # never shuffle the first epoch\n num_workers=num_workers,\n sampler=sampler,\n )\n\n self.model.train()\n\n train_loss: float = 0\n\n seen_batches = 0\n total_number_of_batches = len(batch_loader)\n\n modulo = max(1, int(total_number_of_batches / 10))\n\n # process mini-batches\n batch_time = 0\n for batch_no, batch in enumerate(batch_loader):\n\n start_time = time.time()\n\n # zero the gradients on the model and optimizer\n self.model.zero_grad()\n optimizer.zero_grad()\n\n # if necessary, make batch_steps\n batch_steps = [batch]\n if len(batch) > micro_batch_size:\n batch_steps = [\n batch[x: x + micro_batch_size]\n for x in range(0, len(batch), micro_batch_size)\n ]\n\n # forward and backward for batch\n for batch_step in batch_steps:\n\n # forward pass\n loss = self.model.forward_loss(batch_step)\n\n # Backward\n if use_amp:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n # do the optimizer step\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), 5.0)\n optimizer.step()\n\n # do the scheduler step if one-cycle\n if isinstance(lr_scheduler, OneCycleLR):\n lr_scheduler.step()\n # get new learning rate\n for group in optimizer.param_groups:\n learning_rate = group[\"lr\"]\n if \"momentum\" in group:\n momentum = group[\"momentum\"]\n\n seen_batches += 1\n train_loss += loss.item()\n\n # depending on memory mode, embeddings are moved to CPU, GPU or deleted\n store_embeddings(batch, embeddings_storage_mode)\n\n batch_time += time.time() - start_time\n if seen_batches % modulo == 0:\n momentum_info = f' - momentum: {momentum:.4f}' if cycle_momentum else ''\n log.info(\n f\"epoch {self.epoch} - iter {seen_batches}/{total_number_of_batches} - loss \"\n f\"{train_loss / seen_batches:.8f} - samples/sec: {mini_batch_size * modulo / batch_time:.2f}\"\n f\" - lr: {learning_rate:.6f}{momentum_info}\"\n )\n batch_time = 0\n iteration = self.epoch * total_number_of_batches + batch_no\n if not param_selection_mode and write_weights:\n weight_extractor.extract_weights(\n self.model.state_dict(), iteration\n )\n\n train_loss /= seen_batches\n\n self.model.eval()\n\n log_line(log)\n log.info(\n f\"EPOCH {self.epoch} done: loss {train_loss:.4f} - lr {learning_rate:.7f}\"\n )\n\n if self.use_tensorboard:\n writer.add_scalar(\"train_loss\", train_loss, self.epoch)\n\n # anneal against train loss if training with dev, otherwise anneal against dev score\n current_score = train_loss\n\n # evaluate on train / dev / test split depending on training settings\n result_line: str = \"\"\n\n if log_train:\n train_eval_result, train_loss = self.model.evaluate(\n self.corpus.train,\n mini_batch_size=mini_batch_chunk_size,\n num_workers=num_workers,\n embedding_storage_mode=embeddings_storage_mode,\n )\n result_line += f\"\\t{train_eval_result.log_line}\"\n\n # depending on memory mode, embeddings are moved to CPU, GPU or deleted\n store_embeddings(self.corpus.train, embeddings_storage_mode)\n\n if log_train_part:\n train_part_eval_result, train_part_loss = self.model.evaluate(\n train_part,\n mini_batch_size=mini_batch_chunk_size,\n num_workers=num_workers,\n embedding_storage_mode=embeddings_storage_mode,\n )\n result_line += (\n f\"\\t{train_part_loss}\\t{train_part_eval_result.log_line}\"\n )\n log.info(\n f\"TRAIN_SPLIT : loss {train_part_loss} - score {round(train_part_eval_result.main_score, 4)}\"\n )\n\n if log_dev:\n dev_eval_result, dev_loss = self.model.evaluate(\n self.corpus.dev,\n mini_batch_size=mini_batch_chunk_size,\n num_workers=num_workers,\n out_path=base_path / \"dev.tsv\",\n embedding_storage_mode=embeddings_storage_mode,\n )\n result_line += f\"\\t{dev_loss}\\t{dev_eval_result.log_line}\"\n log.info(\n f\"DEV : loss {dev_loss} - score {round(dev_eval_result.main_score, 4)}\"\n )\n # calculate scores using dev data if available\n # append dev score to score history\n dev_score_history.append(dev_eval_result.main_score)\n dev_loss_history.append(dev_loss.item())\n\n current_score = dev_eval_result.main_score\n\n # depending on memory mode, embeddings are moved to CPU, GPU or deleted\n store_embeddings(self.corpus.dev, embeddings_storage_mode)\n\n if self.use_tensorboard:\n writer.add_scalar(\"dev_loss\", dev_loss, self.epoch)\n writer.add_scalar(\n \"dev_score\", dev_eval_result.main_score, self.epoch\n )\n\n if log_test:\n test_eval_result, test_loss = self.model.evaluate(\n self.corpus.test,\n mini_batch_size=mini_batch_chunk_size,\n num_workers=num_workers,\n out_path=base_path / \"test.tsv\",\n embedding_storage_mode=embeddings_storage_mode,\n )\n result_line += f\"\\t{test_loss}\\t{test_eval_result.log_line}\"\n log.info(\n f\"TEST : loss {test_loss} - score {round(test_eval_result.main_score, 4)}\"\n )\n\n # depending on memory mode, embeddings are moved to CPU, GPU or deleted\n store_embeddings(self.corpus.test, embeddings_storage_mode)\n\n if self.use_tensorboard:\n writer.add_scalar(\"test_loss\", test_loss, self.epoch)\n writer.add_scalar(\n \"test_score\", test_eval_result.main_score, self.epoch\n )\n\n # determine learning rate annealing through scheduler. Use auxiliary metric for AnnealOnPlateau\n if log_dev and isinstance(lr_scheduler, AnnealOnPlateau):\n lr_scheduler.step(current_score, dev_loss)\n elif not isinstance(lr_scheduler, OneCycleLR):\n lr_scheduler.step(current_score)\n\n train_loss_history.append(train_loss)\n\n # determine bad epoch number\n try:\n bad_epochs = lr_scheduler.num_bad_epochs\n except:\n bad_epochs = 0\n for group in optimizer.param_groups:\n new_learning_rate = group[\"lr\"]\n if new_learning_rate != previous_learning_rate:\n bad_epochs = patience + 1\n if previous_learning_rate == initial_learning_rate: bad_epochs += initial_extra_patience\n\n # log bad epochs\n log.info(f\"BAD EPOCHS (no improvement): {bad_epochs}\")\n\n # output log file\n with open(loss_txt, \"a\") as f:\n\n # make headers on first epoch\n if self.epoch == 1:\n f.write(\n f\"EPOCH\\tTIMESTAMP\\tBAD_EPOCHS\\tLEARNING_RATE\\tTRAIN_LOSS\"\n )\n\n if log_train:\n f.write(\n \"\\tTRAIN_\"\n + \"\\tTRAIN_\".join(\n train_eval_result.log_header.split(\"\\t\")\n )\n )\n if log_train_part:\n f.write(\n \"\\tTRAIN_PART_LOSS\\tTRAIN_PART_\"\n + \"\\tTRAIN_PART_\".join(\n train_part_eval_result.log_header.split(\"\\t\")\n )\n )\n if log_dev:\n f.write(\n \"\\tDEV_LOSS\\tDEV_\"\n + \"\\tDEV_\".join(dev_eval_result.log_header.split(\"\\t\"))\n )\n if log_test:\n f.write(\n \"\\tTEST_LOSS\\tTEST_\"\n + \"\\tTEST_\".join(\n test_eval_result.log_header.split(\"\\t\")\n )\n )\n\n f.write(\n f\"\\n{self.epoch}\\t{datetime.datetime.now():%H:%M:%S}\\t{bad_epochs}\\t{learning_rate:.4f}\\t{train_loss}\"\n )\n f.write(result_line)\n\n # if checkpoint is enabled, save model at each epoch\n if checkpoint and not param_selection_mode:\n self.save_checkpoint(base_path / \"checkpoint.pt\")\n\n # if we use dev data, remember best model based on dev evaluation score\n if (\n (not train_with_dev or anneal_with_restarts or anneal_with_prestarts)\n and not param_selection_mode\n and not isinstance(lr_scheduler, OneCycleLR)\n and current_score == lr_scheduler.best\n and bad_epochs == 0\n ):\n print(\"saving best model\")\n self.model.save(base_path / \"best-model.pt\")\n\n if anneal_with_prestarts:\n current_state_dict = self.model.state_dict()\n self.model.load_state_dict(last_epoch_model_state_dict)\n self.model.save(base_path / \"pre-best-model.pt\")\n self.model.load_state_dict(current_state_dict)\n\n if save_model_each_k_epochs > 0 and not self.epoch % save_model_each_k_epochs:\n print(\"saving model of current epoch\")\n model_name = \"model_epoch_\" + str(self.epoch) + \".pt\"\n self.model.save(base_path / model_name)\n\n # if we do not use dev data for model selection, save final model\n if save_final_model and not param_selection_mode:\n self.model.save(base_path / \"final-model.pt\")\n\n except KeyboardInterrupt:\n log_line(log)\n log.info(\"Exiting from training early.\")\n\n if self.use_tensorboard:\n writer.close()\n\n if not param_selection_mode:\n log.info(\"Saving model ...\")\n self.model.save(base_path / \"final-model.pt\")\n log.info(\"Done.\")\n\n # test best model if test data is present\n if self.corpus.test and not train_with_test:\n final_score = self.final_test(base_path, mini_batch_chunk_size, num_workers)\n else:\n final_score = 0\n log.info(\"Test data not provided setting final score to 0\")\n\n log.removeHandler(log_handler)\n\n if self.use_tensorboard:\n writer.close()\n\n return {\n \"test_score\": final_score,\n \"dev_score_history\": dev_score_history,\n \"train_loss_history\": train_loss_history,\n \"dev_loss_history\": dev_loss_history,\n }\n\n def save_checkpoint(self, model_file: Union[str, Path]):\n corpus = self.corpus\n self.corpus = None\n torch.save(self, str(model_file), pickle_protocol=4)\n self.corpus = corpus\n\n @classmethod\n def load_checkpoint(cls, checkpoint: Union[Path, str], corpus: Corpus):\n model: ModelTrainer = torch.load(checkpoint, map_location=flair.device)\n model.corpus = corpus\n return model\n\n def final_test(\n self, base_path: Union[Path, str], eval_mini_batch_size: int, num_workers: int = 8\n ):\n if type(base_path) is str:\n base_path = Path(base_path)\n\n log_line(log)\n log.info(\"Testing using best model ...\")\n\n self.model.eval()\n\n if (base_path / \"best-model.pt\").exists():\n self.model = self.model.load(base_path / \"best-model.pt\")\n\n test_results, test_loss = self.model.evaluate(\n self.corpus.test,\n mini_batch_size=eval_mini_batch_size,\n num_workers=num_workers,\n out_path=base_path / \"test.tsv\",\n embedding_storage_mode=\"none\",\n )\n\n test_results: Result = test_results\n log.info(test_results.log_line)\n log.info(test_results.detailed_results)\n log_line(log)\n\n # if we are training over multiple datasets, do evaluation for each\n if type(self.corpus) is MultiCorpus:\n for subcorpus in self.corpus.corpora:\n log_line(log)\n if subcorpus.test:\n subcorpus_results, subcorpus_loss = self.model.evaluate(\n subcorpus.test,\n mini_batch_size=eval_mini_batch_size,\n num_workers=num_workers,\n out_path=base_path / f\"{subcorpus.name}-test.tsv\",\n embedding_storage_mode=\"none\",\n )\n log.info(subcorpus.name)\n log.info(subcorpus_results.log_line)\n\n # get and return the final test score of best model\n final_score = test_results.main_score\n\n return final_score\n\n def find_learning_rate(\n self,\n base_path: Union[Path, str],\n file_name: str = \"learning_rate.tsv\",\n start_learning_rate: float = 1e-7,\n end_learning_rate: float = 10,\n iterations: int = 100,\n mini_batch_size: int = 32,\n stop_early: bool = True,\n smoothing_factor: float = 0.98,\n **kwargs,\n ) -> Path:\n best_loss = None\n moving_avg_loss = 0\n\n # cast string to Path\n if type(base_path) is str:\n base_path = Path(base_path)\n learning_rate_tsv = init_output_file(base_path, file_name)\n\n with open(learning_rate_tsv, \"a\") as f:\n f.write(\"ITERATION\\tTIMESTAMP\\tLEARNING_RATE\\tTRAIN_LOSS\\n\")\n\n optimizer = self.optimizer(\n self.model.parameters(), lr=start_learning_rate, **kwargs\n )\n\n train_data = self.corpus.train\n\n scheduler = ExpAnnealLR(optimizer, end_learning_rate, iterations)\n\n model_state = self.model.state_dict()\n self.model.train()\n\n step = 0\n while step < iterations:\n batch_loader = DataLoader(\n train_data, batch_size=mini_batch_size, shuffle=True\n )\n for batch in batch_loader:\n step += 1\n\n # forward pass\n loss = self.model.forward_loss(batch)\n\n # update optimizer and scheduler\n optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), 5.0)\n optimizer.step()\n scheduler.step(step)\n\n print(scheduler.get_lr())\n learning_rate = scheduler.get_lr()[0]\n\n loss_item = loss.item()\n if step == 1:\n best_loss = loss_item\n else:\n if smoothing_factor > 0:\n moving_avg_loss = (\n smoothing_factor * moving_avg_loss\n + (1 - smoothing_factor) * loss_item\n )\n loss_item = moving_avg_loss / (\n 1 - smoothing_factor ** (step + 1)\n )\n if loss_item < best_loss:\n best_loss = loss\n\n if step > iterations:\n break\n\n if stop_early and (loss_item > 4 * best_loss or torch.isnan(loss)):\n log_line(log)\n log.info(\"loss diverged - stopping early!\")\n step = iterations\n break\n\n with open(str(learning_rate_tsv), \"a\") as f:\n f.write(\n f\"{step}\\t{datetime.datetime.now():%H:%M:%S}\\t{learning_rate}\\t{loss_item}\\n\"\n )\n\n self.model.load_state_dict(model_state)\n self.model.to(flair.device)\n\n log_line(log)\n log.info(f\"learning rate finder finished - plot {learning_rate_tsv}\")\n log_line(log)\n\n return Path(learning_rate_tsv)\n", "path": "flair/trainers/trainer.py" } ]
[ { "content": "import copy\nimport logging\nfrom pathlib import Path\nfrom typing import List, Union\nimport time\nimport datetime\nimport sys\nimport inspect\n\nimport torch\nfrom torch.optim.sgd import SGD\nfrom torch.utils.data.dataset import ConcatDataset\n\ntry:\n from apex import amp\nexcept ImportError:\n amp = None\n\nimport flair\nimport flair.nn\nfrom flair.data import MultiCorpus, Corpus\nfrom flair.datasets import DataLoader\nfrom flair.optim import ExpAnnealLR\nfrom flair.training_utils import (\n init_output_file,\n WeightExtractor,\n log_line,\n add_file_handler,\n Result,\n store_embeddings,\n AnnealOnPlateau,\n)\nfrom torch.optim.lr_scheduler import OneCycleLR\nfrom flair.models import SequenceTagger\nimport random\n\nlog = logging.getLogger(\"flair\")\n\n\nclass ModelTrainer:\n def __init__(\n self,\n model: flair.nn.Model,\n corpus: Corpus,\n optimizer: torch.optim.Optimizer = SGD,\n epoch: int = 0,\n use_tensorboard: bool = False,\n ):\n \"\"\"\n Initialize a model trainer\n :param model: The model that you want to train. The model should inherit from flair.nn.Model\n :param corpus: The dataset used to train the model, should be of type Corpus\n :param optimizer: The optimizer to use (typically SGD or Adam)\n :param epoch: The starting epoch (normally 0 but could be higher if you continue training model)\n :param use_tensorboard: If True, writes out tensorboard information\n \"\"\"\n self.model: flair.nn.Model = model\n self.corpus: Corpus = corpus\n self.optimizer: torch.optim.Optimizer = optimizer\n self.epoch: int = epoch\n self.use_tensorboard: bool = use_tensorboard\n\n def train(\n self,\n base_path: Union[Path, str],\n learning_rate: float = 0.1,\n mini_batch_size: int = 32,\n mini_batch_chunk_size: int = None,\n max_epochs: int = 100,\n scheduler=AnnealOnPlateau,\n cycle_momentum: bool = False,\n anneal_factor: float = 0.5,\n patience: int = 3,\n initial_extra_patience=0,\n min_learning_rate: float = 0.0001,\n train_with_dev: bool = False,\n train_with_test: bool = False,\n monitor_train: bool = False,\n monitor_test: bool = False,\n embeddings_storage_mode: str = \"cpu\",\n checkpoint: bool = False,\n save_final_model: bool = True,\n anneal_with_restarts: bool = False,\n anneal_with_prestarts: bool = False,\n batch_growth_annealing: bool = False,\n shuffle: bool = True,\n param_selection_mode: bool = False,\n write_weights: bool = False,\n num_workers: int = 6,\n sampler=None,\n use_amp: bool = False,\n amp_opt_level: str = \"O1\",\n eval_on_train_fraction=0.0,\n eval_on_train_shuffle=False,\n save_model_each_k_epochs: int = 0,\n **kwargs,\n ) -> dict:\n \"\"\"\n Trains any class that implements the flair.nn.Model interface.\n :param base_path: Main path to which all output during training is logged and models are saved\n :param learning_rate: Initial learning rate (or max, if scheduler is OneCycleLR)\n :param mini_batch_size: Size of mini-batches during training\n :param mini_batch_chunk_size: If mini-batches are larger than this number, they get broken down into chunks of this size for processing purposes\n :param max_epochs: Maximum number of epochs to train. Terminates training if this number is surpassed.\n :param scheduler: The learning rate scheduler to use\n :param cycle_momentum: If scheduler is OneCycleLR, whether the scheduler should cycle also the momentum\n :param anneal_factor: The factor by which the learning rate is annealed\n :param patience: Patience is the number of epochs with no improvement the Trainer waits\n until annealing the learning rate\n :param min_learning_rate: If the learning rate falls below this threshold, training terminates\n :param train_with_dev: If True, training is performed using both train+dev data\n :param monitor_train: If True, training data is evaluated at end of each epoch\n :param monitor_test: If True, test data is evaluated at end of each epoch\n :param embeddings_storage_mode: One of 'none' (all embeddings are deleted and freshly recomputed),\n 'cpu' (embeddings are stored on CPU) or 'gpu' (embeddings are stored on GPU)\n :param checkpoint: If True, a full checkpoint is saved at end of each epoch\n :param save_final_model: If True, final model is saved\n :param anneal_with_restarts: If True, the last best model is restored when annealing the learning rate\n :param shuffle: If True, data is shuffled during training\n :param param_selection_mode: If True, testing is performed against dev data. Use this mode when doing\n parameter selection.\n :param num_workers: Number of workers in your data loader.\n :param sampler: You can pass a data sampler here for special sampling of data.\n :param eval_on_train_fraction: the fraction of train data to do the evaluation on,\n if 0. the evaluation is not performed on fraction of training data,\n if 'dev' the size is determined from dev set size\n :param eval_on_train_shuffle: if True the train data fraction is determined on the start of training\n and kept fixed during training, otherwise it's sampled at beginning of each epoch\n :param save_model_each_k_epochs: Each k epochs, a model state will be written out. If set to '5', a model will\n be saved each 5 epochs. Default is 0 which means no model saving.\n :param save_model_epoch_step: Each save_model_epoch_step'th epoch the thus far trained model will be saved\n :param kwargs: Other arguments for the Optimizer\n :return:\n \"\"\"\n\n if self.use_tensorboard:\n try:\n from torch.utils.tensorboard import SummaryWriter\n\n writer = SummaryWriter()\n except:\n log_line(log)\n log.warning(\n \"ATTENTION! PyTorch >= 1.1.0 and pillow are required for TensorBoard support!\"\n )\n log_line(log)\n self.use_tensorboard = False\n pass\n\n if use_amp:\n if sys.version_info < (3, 0):\n raise RuntimeError(\"Apex currently only supports Python 3. Aborting.\")\n if amp is None:\n raise RuntimeError(\n \"Failed to import apex. Please install apex from https://www.github.com/nvidia/apex \"\n \"to enable mixed-precision training.\"\n )\n\n if mini_batch_chunk_size is None:\n mini_batch_chunk_size = mini_batch_size\n if learning_rate < min_learning_rate:\n min_learning_rate = learning_rate / 10\n\n initial_learning_rate = learning_rate\n\n # cast string to Path\n if type(base_path) is str:\n base_path = Path(base_path)\n\n log_handler = add_file_handler(log, base_path / \"training.log\")\n\n log_line(log)\n log.info(f'Model: \"{self.model}\"')\n log_line(log)\n log.info(f'Corpus: \"{self.corpus}\"')\n log_line(log)\n log.info(\"Parameters:\")\n log.info(f' - learning_rate: \"{learning_rate}\"')\n log.info(f' - mini_batch_size: \"{mini_batch_size}\"')\n log.info(f' - patience: \"{patience}\"')\n log.info(f' - anneal_factor: \"{anneal_factor}\"')\n log.info(f' - max_epochs: \"{max_epochs}\"')\n log.info(f' - shuffle: \"{shuffle}\"')\n log.info(f' - train_with_dev: \"{train_with_dev}\"')\n log.info(f' - batch_growth_annealing: \"{batch_growth_annealing}\"')\n log_line(log)\n log.info(f'Model training base path: \"{base_path}\"')\n log_line(log)\n log.info(f\"Device: {flair.device}\")\n log_line(log)\n log.info(f\"Embeddings storage mode: {embeddings_storage_mode}\")\n if isinstance(self.model, SequenceTagger) and self.model.weight_dict and self.model.use_crf:\n log_line(log)\n log.warning(f'WARNING: Specified class weights will not take effect when using CRF')\n\n # determine what splits (train, dev, test) to evaluate and log\n log_train = True if monitor_train else False\n log_test = (\n True\n if (not param_selection_mode and self.corpus.test and monitor_test)\n else False\n )\n log_dev = False if train_with_dev or not self.corpus.dev else True\n log_train_part = (\n True\n if (eval_on_train_fraction == \"dev\" or eval_on_train_fraction > 0.0)\n else False\n )\n\n if log_train_part:\n train_part_size = (\n len(self.corpus.dev)\n if eval_on_train_fraction == \"dev\"\n else int(len(self.corpus.train) * eval_on_train_fraction)\n )\n assert train_part_size > 0\n if not eval_on_train_shuffle:\n train_part_indices = list(range(train_part_size))\n train_part = torch.utils.data.dataset.Subset(\n self.corpus.train, train_part_indices\n )\n\n # prepare loss logging file and set up header\n loss_txt = init_output_file(base_path, \"loss.tsv\")\n\n weight_extractor = WeightExtractor(base_path)\n\n optimizer: torch.optim.Optimizer = self.optimizer(\n self.model.parameters(), lr=learning_rate, **kwargs\n )\n\n if use_amp:\n self.model, optimizer = amp.initialize(\n self.model, optimizer, opt_level=amp_opt_level\n )\n\n # minimize training loss if training with dev data, else maximize dev score\n anneal_mode = \"min\" if train_with_dev else \"max\"\n\n if scheduler == OneCycleLR:\n dataset_size = len(self.corpus.train)\n if train_with_dev:\n dataset_size += len(self.corpus.dev)\n lr_scheduler = OneCycleLR(optimizer,\n max_lr=learning_rate,\n steps_per_epoch=dataset_size // mini_batch_size + 1,\n epochs=max_epochs - self.epoch,\n # if we load a checkpoint, we have already trained for self.epoch\n pct_start=0.0,\n cycle_momentum=cycle_momentum)\n else:\n lr_scheduler = scheduler(\n optimizer,\n factor=anneal_factor,\n patience=patience,\n initial_extra_patience=initial_extra_patience,\n mode=anneal_mode,\n verbose=True,\n )\n\n if (isinstance(lr_scheduler, OneCycleLR) and batch_growth_annealing):\n raise ValueError(\"Batch growth with OneCycle policy is not implemented.\")\n\n train_data = self.corpus.train\n\n # if training also uses dev/train data, include in training set\n if train_with_dev or train_with_test:\n\n parts = [self.corpus.train]\n if train_with_dev: parts.append(self.corpus.dev)\n if train_with_test: parts.append(self.corpus.test)\n\n train_data = ConcatDataset(parts)\n\n # initialize sampler if provided\n if sampler is not None:\n # init with default values if only class is provided\n if inspect.isclass(sampler):\n sampler = sampler()\n # set dataset to sample from\n sampler.set_dataset(train_data)\n shuffle = False\n\n dev_score_history = []\n dev_loss_history = []\n train_loss_history = []\n\n micro_batch_size = mini_batch_chunk_size\n\n # At any point you can hit Ctrl + C to break out of training early.\n try:\n previous_learning_rate = learning_rate\n momentum = 0\n for group in optimizer.param_groups:\n if \"momentum\" in group:\n momentum = group[\"momentum\"]\n\n for self.epoch in range(self.epoch + 1, max_epochs + 1):\n log_line(log)\n\n if anneal_with_prestarts:\n last_epoch_model_state_dict = copy.deepcopy(self.model.state_dict())\n\n if eval_on_train_shuffle:\n train_part_indices = list(range(self.corpus.train))\n random.shuffle(train_part_indices)\n train_part_indices = train_part_indices[:train_part_size]\n train_part = torch.utils.data.dataset.Subset(\n self.corpus.train, train_part_indices\n )\n\n # get new learning rate\n for group in optimizer.param_groups:\n learning_rate = group[\"lr\"]\n\n if learning_rate != previous_learning_rate and batch_growth_annealing:\n mini_batch_size *= 2\n\n # reload last best model if annealing with restarts is enabled\n if (\n (anneal_with_restarts or anneal_with_prestarts)\n and learning_rate != previous_learning_rate\n and (base_path / \"best-model.pt\").exists()\n ):\n if anneal_with_restarts:\n log.info(\"resetting to best model\")\n self.model.load_state_dict(\n self.model.load(base_path / \"best-model.pt\").state_dict()\n )\n if anneal_with_prestarts:\n log.info(\"resetting to pre-best model\")\n self.model.load_state_dict(\n self.model.load(base_path / \"pre-best-model.pt\").state_dict()\n )\n\n previous_learning_rate = learning_rate\n\n # stop training if learning rate becomes too small\n if (not isinstance(lr_scheduler, OneCycleLR)) and learning_rate < min_learning_rate:\n log_line(log)\n log.info(\"learning rate too small - quitting training!\")\n log_line(log)\n break\n\n batch_loader = DataLoader(\n train_data,\n batch_size=mini_batch_size,\n shuffle=shuffle if self.epoch > 1 else False, # never shuffle the first epoch\n num_workers=num_workers,\n sampler=sampler,\n )\n\n self.model.train()\n\n train_loss: float = 0\n\n seen_batches = 0\n total_number_of_batches = len(batch_loader)\n\n modulo = max(1, int(total_number_of_batches / 10))\n\n # process mini-batches\n batch_time = 0\n for batch_no, batch in enumerate(batch_loader):\n\n start_time = time.time()\n\n # zero the gradients on the model and optimizer\n self.model.zero_grad()\n optimizer.zero_grad()\n\n # if necessary, make batch_steps\n batch_steps = [batch]\n if len(batch) > micro_batch_size:\n batch_steps = [\n batch[x: x + micro_batch_size]\n for x in range(0, len(batch), micro_batch_size)\n ]\n\n # forward and backward for batch\n for batch_step in batch_steps:\n\n # forward pass\n loss = self.model.forward_loss(batch_step)\n\n # Backward\n if use_amp:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n # do the optimizer step\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), 5.0)\n optimizer.step()\n\n # do the scheduler step if one-cycle\n if isinstance(lr_scheduler, OneCycleLR):\n lr_scheduler.step()\n # get new learning rate\n for group in optimizer.param_groups:\n learning_rate = group[\"lr\"]\n if \"momentum\" in group:\n momentum = group[\"momentum\"]\n\n seen_batches += 1\n train_loss += loss.item()\n\n # depending on memory mode, embeddings are moved to CPU, GPU or deleted\n store_embeddings(batch, embeddings_storage_mode)\n\n batch_time += time.time() - start_time\n if seen_batches % modulo == 0:\n momentum_info = f' - momentum: {momentum:.4f}' if cycle_momentum else ''\n log.info(\n f\"epoch {self.epoch} - iter {seen_batches}/{total_number_of_batches} - loss \"\n f\"{train_loss / seen_batches:.8f} - samples/sec: {mini_batch_size * modulo / batch_time:.2f}\"\n f\" - lr: {learning_rate:.6f}{momentum_info}\"\n )\n batch_time = 0\n iteration = self.epoch * total_number_of_batches + batch_no\n if not param_selection_mode and write_weights:\n weight_extractor.extract_weights(\n self.model.state_dict(), iteration\n )\n\n train_loss /= seen_batches\n\n self.model.eval()\n\n log_line(log)\n log.info(\n f\"EPOCH {self.epoch} done: loss {train_loss:.4f} - lr {learning_rate:.7f}\"\n )\n\n if self.use_tensorboard:\n writer.add_scalar(\"train_loss\", train_loss, self.epoch)\n\n # anneal against train loss if training with dev, otherwise anneal against dev score\n current_score = train_loss\n\n # evaluate on train / dev / test split depending on training settings\n result_line: str = \"\"\n\n if log_train:\n train_eval_result, train_loss = self.model.evaluate(\n self.corpus.train,\n mini_batch_size=mini_batch_chunk_size,\n num_workers=num_workers,\n embedding_storage_mode=embeddings_storage_mode,\n )\n result_line += f\"\\t{train_eval_result.log_line}\"\n\n # depending on memory mode, embeddings are moved to CPU, GPU or deleted\n store_embeddings(self.corpus.train, embeddings_storage_mode)\n\n if log_train_part:\n train_part_eval_result, train_part_loss = self.model.evaluate(\n train_part,\n mini_batch_size=mini_batch_chunk_size,\n num_workers=num_workers,\n embedding_storage_mode=embeddings_storage_mode,\n )\n result_line += (\n f\"\\t{train_part_loss}\\t{train_part_eval_result.log_line}\"\n )\n log.info(\n f\"TRAIN_SPLIT : loss {train_part_loss} - score {round(train_part_eval_result.main_score, 4)}\"\n )\n\n if log_dev:\n dev_eval_result, dev_loss = self.model.evaluate(\n self.corpus.dev,\n mini_batch_size=mini_batch_chunk_size,\n num_workers=num_workers,\n out_path=base_path / \"dev.tsv\",\n embedding_storage_mode=embeddings_storage_mode,\n )\n result_line += f\"\\t{dev_loss}\\t{dev_eval_result.log_line}\"\n log.info(\n f\"DEV : loss {dev_loss} - score {round(dev_eval_result.main_score, 4)}\"\n )\n # calculate scores using dev data if available\n # append dev score to score history\n dev_score_history.append(dev_eval_result.main_score)\n dev_loss_history.append(dev_loss.item())\n\n current_score = dev_eval_result.main_score\n\n # depending on memory mode, embeddings are moved to CPU, GPU or deleted\n store_embeddings(self.corpus.dev, embeddings_storage_mode)\n\n if self.use_tensorboard:\n writer.add_scalar(\"dev_loss\", dev_loss, self.epoch)\n writer.add_scalar(\n \"dev_score\", dev_eval_result.main_score, self.epoch\n )\n\n if log_test:\n test_eval_result, test_loss = self.model.evaluate(\n self.corpus.test,\n mini_batch_size=mini_batch_chunk_size,\n num_workers=num_workers,\n out_path=base_path / \"test.tsv\",\n embedding_storage_mode=embeddings_storage_mode,\n )\n result_line += f\"\\t{test_loss}\\t{test_eval_result.log_line}\"\n log.info(\n f\"TEST : loss {test_loss} - score {round(test_eval_result.main_score, 4)}\"\n )\n\n # depending on memory mode, embeddings are moved to CPU, GPU or deleted\n store_embeddings(self.corpus.test, embeddings_storage_mode)\n\n if self.use_tensorboard:\n writer.add_scalar(\"test_loss\", test_loss, self.epoch)\n writer.add_scalar(\n \"test_score\", test_eval_result.main_score, self.epoch\n )\n\n # determine learning rate annealing through scheduler. Use auxiliary metric for AnnealOnPlateau\n if log_dev and isinstance(lr_scheduler, AnnealOnPlateau):\n lr_scheduler.step(current_score, dev_loss)\n elif not isinstance(lr_scheduler, OneCycleLR):\n lr_scheduler.step(current_score)\n\n train_loss_history.append(train_loss)\n\n # determine bad epoch number\n try:\n bad_epochs = lr_scheduler.num_bad_epochs\n except:\n bad_epochs = 0\n for group in optimizer.param_groups:\n new_learning_rate = group[\"lr\"]\n if new_learning_rate != previous_learning_rate:\n bad_epochs = patience + 1\n if previous_learning_rate == initial_learning_rate: bad_epochs += initial_extra_patience\n\n # log bad epochs\n log.info(f\"BAD EPOCHS (no improvement): {bad_epochs}\")\n\n # output log file\n with open(loss_txt, \"a\") as f:\n\n # make headers on first epoch\n if self.epoch == 1:\n f.write(\n f\"EPOCH\\tTIMESTAMP\\tBAD_EPOCHS\\tLEARNING_RATE\\tTRAIN_LOSS\"\n )\n\n if log_train:\n f.write(\n \"\\tTRAIN_\"\n + \"\\tTRAIN_\".join(\n train_eval_result.log_header.split(\"\\t\")\n )\n )\n if log_train_part:\n f.write(\n \"\\tTRAIN_PART_LOSS\\tTRAIN_PART_\"\n + \"\\tTRAIN_PART_\".join(\n train_part_eval_result.log_header.split(\"\\t\")\n )\n )\n if log_dev:\n f.write(\n \"\\tDEV_LOSS\\tDEV_\"\n + \"\\tDEV_\".join(dev_eval_result.log_header.split(\"\\t\"))\n )\n if log_test:\n f.write(\n \"\\tTEST_LOSS\\tTEST_\"\n + \"\\tTEST_\".join(\n test_eval_result.log_header.split(\"\\t\")\n )\n )\n\n f.write(\n f\"\\n{self.epoch}\\t{datetime.datetime.now():%H:%M:%S}\\t{bad_epochs}\\t{learning_rate:.4f}\\t{train_loss}\"\n )\n f.write(result_line)\n\n # if checkpoint is enabled, save model at each epoch\n if checkpoint and not param_selection_mode:\n self.save_checkpoint(base_path / \"checkpoint.pt\")\n\n # if we use dev data, remember best model based on dev evaluation score\n if (\n (not train_with_dev or anneal_with_restarts or anneal_with_prestarts)\n and not param_selection_mode\n and not isinstance(lr_scheduler, OneCycleLR)\n and current_score == lr_scheduler.best\n and bad_epochs == 0\n ):\n print(\"saving best model\")\n self.model.save(base_path / \"best-model.pt\")\n\n if anneal_with_prestarts:\n current_state_dict = self.model.state_dict()\n self.model.load_state_dict(last_epoch_model_state_dict)\n self.model.save(base_path / \"pre-best-model.pt\")\n self.model.load_state_dict(current_state_dict)\n\n if save_model_each_k_epochs > 0 and not self.epoch % save_model_each_k_epochs:\n print(\"saving model of current epoch\")\n model_name = \"model_epoch_\" + str(self.epoch) + \".pt\"\n self.model.save(base_path / model_name)\n\n # if we do not use dev data for model selection, save final model\n if save_final_model and not param_selection_mode:\n self.model.save(base_path / \"final-model.pt\")\n\n except KeyboardInterrupt:\n log_line(log)\n log.info(\"Exiting from training early.\")\n\n if self.use_tensorboard:\n writer.close()\n\n if not param_selection_mode:\n log.info(\"Saving model ...\")\n self.model.save(base_path / \"final-model.pt\")\n log.info(\"Done.\")\n\n # test best model if test data is present\n if self.corpus.test and not train_with_test:\n final_score = self.final_test(base_path, mini_batch_chunk_size, num_workers)\n else:\n final_score = 0\n log.info(\"Test data not provided setting final score to 0\")\n\n log_handler.close()\n\n log.removeHandler(log_handler)\n\n if self.use_tensorboard:\n writer.close()\n\n return {\n \"test_score\": final_score,\n \"dev_score_history\": dev_score_history,\n \"train_loss_history\": train_loss_history,\n \"dev_loss_history\": dev_loss_history,\n }\n\n def save_checkpoint(self, model_file: Union[str, Path]):\n corpus = self.corpus\n self.corpus = None\n torch.save(self, str(model_file), pickle_protocol=4)\n self.corpus = corpus\n\n @classmethod\n def load_checkpoint(cls, checkpoint: Union[Path, str], corpus: Corpus):\n model: ModelTrainer = torch.load(checkpoint, map_location=flair.device)\n model.corpus = corpus\n return model\n\n def final_test(\n self, base_path: Union[Path, str], eval_mini_batch_size: int, num_workers: int = 8\n ):\n if type(base_path) is str:\n base_path = Path(base_path)\n\n log_line(log)\n log.info(\"Testing using best model ...\")\n\n self.model.eval()\n\n if (base_path / \"best-model.pt\").exists():\n self.model = self.model.load(base_path / \"best-model.pt\")\n\n test_results, test_loss = self.model.evaluate(\n self.corpus.test,\n mini_batch_size=eval_mini_batch_size,\n num_workers=num_workers,\n out_path=base_path / \"test.tsv\",\n embedding_storage_mode=\"none\",\n )\n\n test_results: Result = test_results\n log.info(test_results.log_line)\n log.info(test_results.detailed_results)\n log_line(log)\n\n # if we are training over multiple datasets, do evaluation for each\n if type(self.corpus) is MultiCorpus:\n for subcorpus in self.corpus.corpora:\n log_line(log)\n if subcorpus.test:\n subcorpus_results, subcorpus_loss = self.model.evaluate(\n subcorpus.test,\n mini_batch_size=eval_mini_batch_size,\n num_workers=num_workers,\n out_path=base_path / f\"{subcorpus.name}-test.tsv\",\n embedding_storage_mode=\"none\",\n )\n log.info(subcorpus.name)\n log.info(subcorpus_results.log_line)\n\n # get and return the final test score of best model\n final_score = test_results.main_score\n\n return final_score\n\n def find_learning_rate(\n self,\n base_path: Union[Path, str],\n file_name: str = \"learning_rate.tsv\",\n start_learning_rate: float = 1e-7,\n end_learning_rate: float = 10,\n iterations: int = 100,\n mini_batch_size: int = 32,\n stop_early: bool = True,\n smoothing_factor: float = 0.98,\n **kwargs,\n ) -> Path:\n best_loss = None\n moving_avg_loss = 0\n\n # cast string to Path\n if type(base_path) is str:\n base_path = Path(base_path)\n learning_rate_tsv = init_output_file(base_path, file_name)\n\n with open(learning_rate_tsv, \"a\") as f:\n f.write(\"ITERATION\\tTIMESTAMP\\tLEARNING_RATE\\tTRAIN_LOSS\\n\")\n\n optimizer = self.optimizer(\n self.model.parameters(), lr=start_learning_rate, **kwargs\n )\n\n train_data = self.corpus.train\n\n scheduler = ExpAnnealLR(optimizer, end_learning_rate, iterations)\n\n model_state = self.model.state_dict()\n self.model.train()\n\n step = 0\n while step < iterations:\n batch_loader = DataLoader(\n train_data, batch_size=mini_batch_size, shuffle=True\n )\n for batch in batch_loader:\n step += 1\n\n # forward pass\n loss = self.model.forward_loss(batch)\n\n # update optimizer and scheduler\n optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), 5.0)\n optimizer.step()\n scheduler.step(step)\n\n print(scheduler.get_lr())\n learning_rate = scheduler.get_lr()[0]\n\n loss_item = loss.item()\n if step == 1:\n best_loss = loss_item\n else:\n if smoothing_factor > 0:\n moving_avg_loss = (\n smoothing_factor * moving_avg_loss\n + (1 - smoothing_factor) * loss_item\n )\n loss_item = moving_avg_loss / (\n 1 - smoothing_factor ** (step + 1)\n )\n if loss_item < best_loss:\n best_loss = loss\n\n if step > iterations:\n break\n\n if stop_early and (loss_item > 4 * best_loss or torch.isnan(loss)):\n log_line(log)\n log.info(\"loss diverged - stopping early!\")\n step = iterations\n break\n\n with open(str(learning_rate_tsv), \"a\") as f:\n f.write(\n f\"{step}\\t{datetime.datetime.now():%H:%M:%S}\\t{learning_rate}\\t{loss_item}\\n\"\n )\n\n self.model.load_state_dict(model_state)\n self.model.to(flair.device)\n\n log_line(log)\n log.info(f\"learning rate finder finished - plot {learning_rate_tsv}\")\n log_line(log)\n\n return Path(learning_rate_tsv)\n", "path": "flair/trainers/trainer.py" } ]
diff --git a/flair/trainers/trainer.py b/flair/trainers/trainer.py index 021ac53633..3b22478fbc 100644 --- a/flair/trainers/trainer.py +++ b/flair/trainers/trainer.py @@ -630,6 +630,8 @@ def train( final_score = 0 log.info("Test data not provided setting final score to 0") + log_handler.close() + log.removeHandler(log_handler) if self.use_tensorboard:
ansible-collections__amazon.aws-1332
aws_rds inventory plugin does not return any results due to regression in 478022695b333043857a6929b350a2a3c07ae567 ### Summary aws_rds inventory plugin does not return any results due to regression in 478022695b333043857a6929b350a2a3c07ae567. This commit, ostensibly named "linting", has actually removed the following line in plugins/inventory/aws_rds.py: ``` config_data = self._read_config_data(path) ``` This causes the inventory plugin to return no data. Restoring this line fixes the expected output. ### Issue Type Bug Report ### Component Name aws_rds inventory plugin ### Ansible Version ``` ansible [core 2.14.1] config file = /home/ansible/ansible/ansible.cfg configured module search path = ['/home/ansible/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.9/dist-packages/ansible ansible collection location = /home/ansible/.ansible/collections:/usr/share/ansible/collections executable location = /usr/local/bin/ansible python version = 3.9.2 (default, Feb 28 2021, 17:03:44) [GCC 10.2.1 20210110] (/usr/bin/python3) jinja version = 3.1.2 libyaml = True ``` ### Collection Versions ``` # /usr/local/lib/python3.9/dist-packages/ansible_collections Collection Version ----------------------------- ------- amazon.aws 5.1.0 ansible.netcommon 4.1.0 ansible.posix 1.4.0 ansible.utils 2.8.0 ansible.windows 1.12.0 arista.eos 6.0.0 awx.awx 21.10.0 azure.azcollection 1.14.0 check_point.mgmt 4.0.0 chocolatey.chocolatey 1.3.1 cisco.aci 2.3.0 cisco.asa 4.0.0 cisco.dnac 6.6.1 cisco.intersight 1.0.22 cisco.ios 4.0.0 cisco.iosxr 4.0.3 cisco.ise 2.5.9 cisco.meraki 2.13.0 cisco.mso 2.1.0 cisco.nso 1.0.3 cisco.nxos 4.0.1 cisco.ucs 1.8.0 cloud.common 2.1.2 cloudscale_ch.cloud 2.2.3 community.aws 5.0.0 community.azure 2.0.0 community.ciscosmb 1.0.5 community.crypto 2.9.0 community.digitalocean 1.22.0 community.dns 2.4.2 community.docker 3.3.1 community.fortios 1.0.0 community.general 6.1.0 community.google 1.0.0 community.grafana 1.5.3 community.hashi_vault 4.0.0 community.hrobot 1.6.0 community.libvirt 1.2.0 community.mongodb 1.4.2 community.mysql 3.5.1 community.network 5.0.0 community.okd 2.2.0 community.postgresql 2.3.1 community.proxysql 1.4.0 community.rabbitmq 1.2.3 community.routeros 2.5.0 community.sap 1.0.0 community.sap_libs 1.4.0 community.skydive 1.0.0 community.sops 1.5.0 community.vmware 3.2.0 community.windows 1.11.1 community.zabbix 1.9.0 containers.podman 1.10.1 cyberark.conjur 1.2.0 cyberark.pas 1.0.14 dellemc.enterprise_sonic 2.0.0 dellemc.openmanage 6.3.0 dellemc.os10 1.1.1 dellemc.os6 1.0.7 dellemc.os9 1.0.4 f5networks.f5_modules 1.21.0 fortinet.fortimanager 2.1.7 fortinet.fortios 2.2.1 frr.frr 2.0.0 gluster.gluster 1.0.2 google.cloud 1.0.2 grafana.grafana 1.1.0 hetzner.hcloud 1.9.0 hpe.nimble 1.1.4 ibm.qradar 2.1.0 ibm.spectrum_virtualize 1.10.0 infinidat.infinibox 1.3.12 infoblox.nios_modules 1.4.1 inspur.ispim 1.2.0 inspur.sm 2.3.0 junipernetworks.junos 4.1.0 kubernetes.core 2.3.2 lowlydba.sqlserver 1.2.1 mellanox.onyx 1.0.0 netapp.aws 21.7.0 netapp.azure 21.10.0 netapp.cloudmanager 21.21.0 netapp.elementsw 21.7.0 netapp.ontap 22.0.1 netapp.storagegrid 21.11.1 netapp.um_info 21.8.0 netapp_eseries.santricity 1.3.1 netbox.netbox 3.9.0 ngine_io.cloudstack 2.3.0 ngine_io.exoscale 1.0.0 ngine_io.vultr 1.1.2 openstack.cloud 1.10.0 openvswitch.openvswitch 2.1.0 ovirt.ovirt 2.4.1 purestorage.flasharray 1.15.0 purestorage.flashblade 1.10.0 purestorage.fusion 1.2.0 sensu.sensu_go 1.13.1 splunk.es 2.1.0 t_systems_mms.icinga_director 1.31.4 theforeman.foreman 3.7.0 vmware.vmware_rest 2.2.0 vultr.cloud 1.3.1 vyos.vyos 4.0.0 wti.remote 1.0.4 # /home/ansible/.ansible/collections/ansible_collections Collection Version ----------------- ------- amazon.aws 5.1.0 ansible.posix 1.4.0 community.general 6.1.0 community.mysql 3.5.1 mafalb.squid 0.2.0 ``` ### AWS SDK versions ``` Name: boto Version: 2.49.0 Summary: Amazon Web Services Library Home-page: https://github.com/boto/boto/ Author: Mitch Garnaat Author-email: [email protected] License: MIT Location: /usr/lib/python3/dist-packages Requires: Required-by: cloudwatchmon --- Name: boto3 Version: 1.26.32 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services Author-email: None License: Apache License 2.0 Location: /usr/local/lib/python3.9/dist-packages Requires: jmespath, botocore, s3transfer Required-by: --- Name: botocore Version: 1.29.32 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services Author-email: None License: Apache License 2.0 Location: /usr/local/lib/python3.9/dist-packages Requires: urllib3, python-dateutil, jmespath Required-by: s3transfer, boto3 ``` ### Configuration ``` plugin: aws_rds regions: - us-east-1 keyed_groups: - prefix: tag key: tags compose: ansible_host: endpoint.address ``` ### OS / Environment _No response_ ### Steps to Reproduce ``` ansible-inventory -vvvvvvvv -i inventory/aws_rds.yaml --list ``` ### Expected Results I expected some inventory results to appear ### Actual Results ``` ansible-inventory [core 2.14.1] config file = /home/ansible/ansible/ansible.cfg configured module search path = ['/home/ansible/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.9/dist-packages/ansible ansible collection location = /home/ansible/.ansible/collections:/usr/share/ansible/collections executable location = /usr/local/bin/ansible-inventory python version = 3.9.2 (default, Feb 28 2021, 17:03:44) [GCC 10.2.1 20210110] (/usr/bin/python3) jinja version = 3.1.2 libyaml = True Using /home/ansible/ansible/ansible.cfg as config file Reading vault password file: /home/ansible/ansible/.vault_pass setting up inventory plugins redirecting (type: inventory) ansible.builtin.aws_ec2 to amazon.aws.aws_ec2 Loading collection amazon.aws from /home/ansible/.ansible/collections/ansible_collections/amazon/aws redirecting (type: inventory) ansible.builtin.aws_rds to amazon.aws.aws_rds ansible_collections.amazon.aws.plugins.inventory.aws_ec2 declined parsing /home/ansible/ansible/inventory/aws_rds.yaml as it did not pass its verify_file() method Parsed /home/ansible/ansible/inventory/aws_rds.yaml inventory source with ansible_collections.amazon.aws.plugins.inventory.aws_rds plugin { "_meta": { "hostvars": {} }, "all": { "children": [ "aws_rds", "ungrouped" ] } } ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
[ { "content": "# Copyright (c) 2018 Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = '''\nname: aws_rds\nshort_description: RDS instance inventory source\ndescription:\n - Get instances and clusters from Amazon Web Services RDS.\n - Uses a YAML configuration file that ends with aws_rds.(yml|yaml).\noptions:\n regions:\n description:\n - A list of regions in which to describe RDS instances and clusters. Available regions are listed here\n U(https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html).\n default: []\n filters:\n description:\n - A dictionary of filter value pairs. Available filters are listed here\n U(https://docs.aws.amazon.com/cli/latest/reference/rds/describe-db-instances.html#options). If you filter by\n db-cluster-id and I(include_clusters) is True it will apply to clusters as well.\n default: {}\n strict_permissions:\n description:\n - By default if an AccessDenied exception is encountered this plugin will fail. You can set strict_permissions to\n False in the inventory config file which will allow the restrictions to be gracefully skipped.\n type: bool\n default: True\n include_clusters:\n description: Whether or not to query for Aurora clusters as well as instances.\n type: bool\n default: False\n statuses:\n description: A list of desired states for instances/clusters to be added to inventory. Set to ['all'] as a shorthand to find everything.\n type: list\n elements: str\n default:\n - creating\n - available\n iam_role_arn:\n description:\n - The ARN of the IAM role to assume to perform the inventory lookup. You should still provide\n AWS credentials with enough privilege to perform the AssumeRole action.\n hostvars_prefix:\n description:\n - The prefix for host variables names coming from AWS.\n type: str\n version_added: 3.1.0\n hostvars_suffix:\n description:\n - The suffix for host variables names coming from AWS.\n type: str\n version_added: 3.1.0\nnotes:\n - Ansible versions prior to 2.10 should use the fully qualified plugin name 'amazon.aws.aws_rds'.\nextends_documentation_fragment:\n - inventory_cache\n - constructed\n - amazon.aws.boto3\n - amazon.aws.aws_credentials\nauthor:\n - Sloane Hertel (@s-hertel)\n'''\n\nEXAMPLES = '''\nplugin: aws_rds\nregions:\n - us-east-1\n - ca-central-1\nkeyed_groups:\n - key: 'db_parameter_groups|json_query(\"[].db_parameter_group_name\")'\n prefix: rds_parameter_group\n - key: engine\n prefix: rds\n - key: tags\n - key: region\nhostvars_prefix: aws_\nhostvars_suffix: _rds\n'''\n\ntry:\n import boto3\n import botocore\nexcept ImportError:\n pass # will be captured by imported HAS_BOTO3\n\nfrom ansible.errors import AnsibleError\nfrom ansible.module_utils._text import to_native\nfrom ansible.module_utils.basic import missing_required_lib\nfrom ansible.plugins.inventory import BaseInventoryPlugin\nfrom ansible.plugins.inventory import Cacheable\nfrom ansible.plugins.inventory import Constructable\n\nfrom ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code\nfrom ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3\nfrom ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list\nfrom ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict\nfrom ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict\n\n\nclass InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):\n\n NAME = 'amazon.aws.aws_rds'\n\n def __init__(self):\n super(InventoryModule, self).__init__()\n self.credentials = {}\n self.boto_profile = None\n self.iam_role_arn = None\n\n def _get_connection(self, credentials, region='us-east-1'):\n try:\n connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region, **credentials)\n except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:\n if self.boto_profile:\n try:\n connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region)\n except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:\n raise AnsibleError(\"Insufficient credentials found: %s\" % to_native(e))\n else:\n raise AnsibleError(\"Insufficient credentials found: %s\" % to_native(e))\n return connection\n\n def _boto3_assume_role(self, credentials, region):\n \"\"\"\n Assume an IAM role passed by iam_role_arn parameter\n :return: a dict containing the credentials of the assumed role\n \"\"\"\n\n iam_role_arn = self.iam_role_arn\n\n try:\n sts_connection = boto3.session.Session(profile_name=self.boto_profile).client('sts', region, **credentials)\n sts_session = sts_connection.assume_role(RoleArn=iam_role_arn, RoleSessionName='ansible_aws_rds_dynamic_inventory')\n return dict(\n aws_access_key_id=sts_session['Credentials']['AccessKeyId'],\n aws_secret_access_key=sts_session['Credentials']['SecretAccessKey'],\n aws_session_token=sts_session['Credentials']['SessionToken']\n )\n except botocore.exceptions.ClientError as e:\n raise AnsibleError(\"Unable to assume IAM role: %s\" % to_native(e))\n\n def _boto3_conn(self, regions):\n '''\n :param regions: A list of regions to create a boto3 client\n\n Generator that yields a boto3 client and the region\n '''\n iam_role_arn = self.iam_role_arn\n credentials = self.credentials\n for region in regions:\n try:\n if iam_role_arn is not None:\n assumed_credentials = self._boto3_assume_role(credentials, region)\n else:\n assumed_credentials = credentials\n connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region, **assumed_credentials)\n except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:\n if self.boto_profile:\n try:\n connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region)\n except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:\n raise AnsibleError(\"Insufficient credentials found: %s\" % to_native(e))\n else:\n raise AnsibleError(\"Insufficient credentials found: %s\" % to_native(e))\n yield connection, region\n\n def _get_hosts_by_region(self, connection, filters, strict):\n\n def _add_tags_for_hosts(connection, hosts, strict):\n for host in hosts:\n if 'DBInstanceArn' in host:\n resource_arn = host['DBInstanceArn']\n else:\n resource_arn = host['DBClusterArn']\n\n try:\n tags = connection.list_tags_for_resource(ResourceName=resource_arn)['TagList']\n except is_boto3_error_code('AccessDenied') as e:\n if not strict:\n tags = []\n else:\n raise e\n host['Tags'] = tags\n\n def wrapper(f, *args, **kwargs):\n try:\n results = f(*args, **kwargs)\n if 'DBInstances' in results:\n results = results['DBInstances']\n else:\n results = results['DBClusters']\n _add_tags_for_hosts(connection, results, strict)\n except is_boto3_error_code('AccessDenied') as e: # pylint: disable=duplicate-except\n if not strict:\n results = []\n else:\n raise AnsibleError(\"Failed to query RDS: {0}\".format(to_native(e)))\n except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except\n raise AnsibleError(\"Failed to query RDS: {0}\".format(to_native(e)))\n return results\n return wrapper\n\n def _get_all_hosts(self, regions, instance_filters, cluster_filters, strict, statuses, gather_clusters=False):\n '''\n :param regions: a list of regions in which to describe hosts\n :param instance_filters: a list of boto3 filter dictionaries\n :param cluster_filters: a list of boto3 filter dictionaries\n :param strict: a boolean determining whether to fail or ignore 403 error codes\n :param statuses: a list of statuses that the returned hosts should match\n :return A list of host dictionaries\n '''\n all_instances = []\n all_clusters = []\n for connection, _region in self._boto3_conn(regions):\n paginator = connection.get_paginator('describe_db_instances')\n all_instances.extend(\n self._get_hosts_by_region(connection, instance_filters, strict)\n (paginator.paginate(Filters=instance_filters).build_full_result)\n )\n if gather_clusters:\n all_clusters.extend(\n self._get_hosts_by_region(connection, cluster_filters, strict)\n (connection.describe_db_clusters, **{'Filters': cluster_filters})\n )\n sorted_hosts = list(\n sorted(all_instances, key=lambda x: x['DBInstanceIdentifier']) +\n sorted(all_clusters, key=lambda x: x['DBClusterIdentifier'])\n )\n return self.find_hosts_with_valid_statuses(sorted_hosts, statuses)\n\n def find_hosts_with_valid_statuses(self, hosts, statuses):\n if 'all' in statuses:\n return hosts\n valid_hosts = []\n for host in hosts:\n if host.get('DBInstanceStatus') in statuses:\n valid_hosts.append(host)\n elif host.get('Status') in statuses:\n valid_hosts.append(host)\n return valid_hosts\n\n def _populate(self, hosts):\n group = 'aws_rds'\n self.inventory.add_group(group)\n if hosts:\n self._add_hosts(hosts=hosts, group=group)\n self.inventory.add_child('all', group)\n\n def _populate_from_source(self, source_data):\n hostvars = source_data.pop('_meta', {}).get('hostvars', {})\n for group in source_data:\n if group == 'all':\n continue\n else:\n self.inventory.add_group(group)\n hosts = source_data[group].get('hosts', [])\n for host in hosts:\n self._populate_host_vars([host], hostvars.get(host, {}), group)\n self.inventory.add_child('all', group)\n\n def _get_hostname(self, host):\n if host.get('DBInstanceIdentifier'):\n return host['DBInstanceIdentifier']\n else:\n return host['DBClusterIdentifier']\n\n def _format_inventory(self, hosts):\n results = {'_meta': {'hostvars': {}}}\n group = 'aws_rds'\n results[group] = {'hosts': []}\n for host in hosts:\n hostname = self._get_hostname(host)\n results[group]['hosts'].append(hostname)\n h = self.inventory.get_host(hostname)\n results['_meta']['hostvars'][h.name] = h.vars\n return results\n\n def _add_hosts(self, hosts, group):\n '''\n :param hosts: a list of hosts to be added to a group\n :param group: the name of the group to which the hosts belong\n '''\n for host in hosts:\n hostname = self._get_hostname(host)\n host = camel_dict_to_snake_dict(host, ignore_list=['Tags'])\n host['tags'] = boto3_tag_list_to_ansible_dict(host.get('tags', []))\n\n # Allow easier grouping by region\n if 'availability_zone' in host:\n host['region'] = host['availability_zone'][:-1]\n elif 'availability_zones' in host:\n host['region'] = host['availability_zones'][0][:-1]\n\n self.inventory.add_host(hostname, group=group)\n hostvars_prefix = self.get_option(\"hostvars_prefix\")\n hostvars_suffix = self.get_option(\"hostvars_suffix\")\n new_vars = dict()\n for hostvar, hostval in host.items():\n if hostvars_prefix:\n hostvar = hostvars_prefix + hostvar\n if hostvars_suffix:\n hostvar = hostvar + hostvars_suffix\n new_vars[hostvar] = hostval\n self.inventory.set_variable(hostname, hostvar, hostval)\n host.update(new_vars)\n\n # Use constructed if applicable\n strict = self.get_option('strict')\n # Composed variables\n self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)\n # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group\n self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)\n # Create groups based on variable values and add the corresponding hosts to it\n self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)\n\n def _set_credentials(self):\n '''\n '''\n self.boto_profile = self.get_option('aws_profile')\n aws_access_key_id = self.get_option('aws_access_key')\n aws_secret_access_key = self.get_option('aws_secret_key')\n aws_security_token = self.get_option('aws_security_token')\n self.iam_role_arn = self.get_option('iam_role_arn')\n\n if not self.boto_profile and not (aws_access_key_id and aws_secret_access_key):\n session = botocore.session.get_session()\n if session.get_credentials() is not None:\n aws_access_key_id = session.get_credentials().access_key\n aws_secret_access_key = session.get_credentials().secret_key\n aws_security_token = session.get_credentials().token\n\n if not self.boto_profile and not (aws_access_key_id and aws_secret_access_key):\n raise AnsibleError(\"Insufficient boto credentials found. Please provide them in your \"\n \"inventory configuration file or set them as environment variables.\")\n\n if aws_access_key_id:\n self.credentials['aws_access_key_id'] = aws_access_key_id\n if aws_secret_access_key:\n self.credentials['aws_secret_access_key'] = aws_secret_access_key\n if aws_security_token:\n self.credentials['aws_session_token'] = aws_security_token\n\n def verify_file(self, path):\n '''\n :param loader: an ansible.parsing.dataloader.DataLoader object\n :param path: the path to the inventory config file\n :return the contents of the config file\n '''\n if super(InventoryModule, self).verify_file(path):\n if path.endswith(('aws_rds.yml', 'aws_rds.yaml')):\n return True\n return False\n\n def parse(self, inventory, loader, path, cache=True):\n super(InventoryModule, self).parse(inventory, loader, path)\n\n if not HAS_BOTO3:\n raise AnsibleError(missing_required_lib('botocore and boto3'))\n\n self._set_credentials()\n\n # get user specifications\n regions = self.get_option('regions')\n filters = self.get_option('filters')\n strict_permissions = self.get_option('strict_permissions')\n statuses = self.get_option('statuses')\n include_clusters = self.get_option('include_clusters')\n instance_filters = ansible_dict_to_boto3_filter_list(filters)\n cluster_filters = []\n if 'db-cluster-id' in filters and include_clusters:\n cluster_filters = ansible_dict_to_boto3_filter_list({'db-cluster-id': filters['db-cluster-id']})\n\n cache_key = self.get_cache_key(path)\n # false when refresh_cache or --flush-cache is used\n if cache:\n # get the user-specified directive\n cache = self.get_option('cache')\n\n # Generate inventory\n formatted_inventory = {}\n cache_needs_update = False\n if cache:\n try:\n results = self._cache[cache_key]\n except KeyError:\n # if cache expires or cache file doesn't exist\n cache_needs_update = True\n else:\n self._populate_from_source(results)\n\n if not cache or cache_needs_update:\n results = self._get_all_hosts(regions, instance_filters, cluster_filters, strict_permissions, statuses, include_clusters)\n self._populate(results)\n formatted_inventory = self._format_inventory(results)\n\n # If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used\n # when the user is using caching, update the cached inventory\n if cache_needs_update or (not cache and self.get_option('cache')):\n self._cache[cache_key] = formatted_inventory\n", "path": "plugins/inventory/aws_rds.py" } ]
[ { "content": "# Copyright (c) 2018 Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = '''\nname: aws_rds\nshort_description: RDS instance inventory source\ndescription:\n - Get instances and clusters from Amazon Web Services RDS.\n - Uses a YAML configuration file that ends with aws_rds.(yml|yaml).\noptions:\n regions:\n description:\n - A list of regions in which to describe RDS instances and clusters. Available regions are listed here\n U(https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html).\n default: []\n filters:\n description:\n - A dictionary of filter value pairs. Available filters are listed here\n U(https://docs.aws.amazon.com/cli/latest/reference/rds/describe-db-instances.html#options). If you filter by\n db-cluster-id and I(include_clusters) is True it will apply to clusters as well.\n default: {}\n strict_permissions:\n description:\n - By default if an AccessDenied exception is encountered this plugin will fail. You can set strict_permissions to\n False in the inventory config file which will allow the restrictions to be gracefully skipped.\n type: bool\n default: True\n include_clusters:\n description: Whether or not to query for Aurora clusters as well as instances.\n type: bool\n default: False\n statuses:\n description: A list of desired states for instances/clusters to be added to inventory. Set to ['all'] as a shorthand to find everything.\n type: list\n elements: str\n default:\n - creating\n - available\n iam_role_arn:\n description:\n - The ARN of the IAM role to assume to perform the inventory lookup. You should still provide\n AWS credentials with enough privilege to perform the AssumeRole action.\n hostvars_prefix:\n description:\n - The prefix for host variables names coming from AWS.\n type: str\n version_added: 3.1.0\n hostvars_suffix:\n description:\n - The suffix for host variables names coming from AWS.\n type: str\n version_added: 3.1.0\nnotes:\n - Ansible versions prior to 2.10 should use the fully qualified plugin name 'amazon.aws.aws_rds'.\nextends_documentation_fragment:\n - inventory_cache\n - constructed\n - amazon.aws.boto3\n - amazon.aws.aws_credentials\nauthor:\n - Sloane Hertel (@s-hertel)\n'''\n\nEXAMPLES = '''\nplugin: aws_rds\nregions:\n - us-east-1\n - ca-central-1\nkeyed_groups:\n - key: 'db_parameter_groups|json_query(\"[].db_parameter_group_name\")'\n prefix: rds_parameter_group\n - key: engine\n prefix: rds\n - key: tags\n - key: region\nhostvars_prefix: aws_\nhostvars_suffix: _rds\n'''\n\ntry:\n import boto3\n import botocore\nexcept ImportError:\n pass # will be captured by imported HAS_BOTO3\n\nfrom ansible.errors import AnsibleError\nfrom ansible.module_utils._text import to_native\nfrom ansible.module_utils.basic import missing_required_lib\nfrom ansible.plugins.inventory import BaseInventoryPlugin\nfrom ansible.plugins.inventory import Cacheable\nfrom ansible.plugins.inventory import Constructable\n\nfrom ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code\nfrom ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3\nfrom ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list\nfrom ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict\nfrom ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict\n\n\nclass InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):\n\n NAME = 'amazon.aws.aws_rds'\n\n def __init__(self):\n super(InventoryModule, self).__init__()\n self.credentials = {}\n self.boto_profile = None\n self.iam_role_arn = None\n\n def _get_connection(self, credentials, region='us-east-1'):\n try:\n connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region, **credentials)\n except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:\n if self.boto_profile:\n try:\n connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region)\n except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:\n raise AnsibleError(\"Insufficient credentials found: %s\" % to_native(e))\n else:\n raise AnsibleError(\"Insufficient credentials found: %s\" % to_native(e))\n return connection\n\n def _boto3_assume_role(self, credentials, region):\n \"\"\"\n Assume an IAM role passed by iam_role_arn parameter\n :return: a dict containing the credentials of the assumed role\n \"\"\"\n\n iam_role_arn = self.iam_role_arn\n\n try:\n sts_connection = boto3.session.Session(profile_name=self.boto_profile).client('sts', region, **credentials)\n sts_session = sts_connection.assume_role(RoleArn=iam_role_arn, RoleSessionName='ansible_aws_rds_dynamic_inventory')\n return dict(\n aws_access_key_id=sts_session['Credentials']['AccessKeyId'],\n aws_secret_access_key=sts_session['Credentials']['SecretAccessKey'],\n aws_session_token=sts_session['Credentials']['SessionToken']\n )\n except botocore.exceptions.ClientError as e:\n raise AnsibleError(\"Unable to assume IAM role: %s\" % to_native(e))\n\n def _boto3_conn(self, regions):\n '''\n :param regions: A list of regions to create a boto3 client\n\n Generator that yields a boto3 client and the region\n '''\n iam_role_arn = self.iam_role_arn\n credentials = self.credentials\n for region in regions:\n try:\n if iam_role_arn is not None:\n assumed_credentials = self._boto3_assume_role(credentials, region)\n else:\n assumed_credentials = credentials\n connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region, **assumed_credentials)\n except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:\n if self.boto_profile:\n try:\n connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region)\n except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:\n raise AnsibleError(\"Insufficient credentials found: %s\" % to_native(e))\n else:\n raise AnsibleError(\"Insufficient credentials found: %s\" % to_native(e))\n yield connection, region\n\n def _get_hosts_by_region(self, connection, filters, strict):\n\n def _add_tags_for_hosts(connection, hosts, strict):\n for host in hosts:\n if 'DBInstanceArn' in host:\n resource_arn = host['DBInstanceArn']\n else:\n resource_arn = host['DBClusterArn']\n\n try:\n tags = connection.list_tags_for_resource(ResourceName=resource_arn)['TagList']\n except is_boto3_error_code('AccessDenied') as e:\n if not strict:\n tags = []\n else:\n raise e\n host['Tags'] = tags\n\n def wrapper(f, *args, **kwargs):\n try:\n results = f(*args, **kwargs)\n if 'DBInstances' in results:\n results = results['DBInstances']\n else:\n results = results['DBClusters']\n _add_tags_for_hosts(connection, results, strict)\n except is_boto3_error_code('AccessDenied') as e: # pylint: disable=duplicate-except\n if not strict:\n results = []\n else:\n raise AnsibleError(\"Failed to query RDS: {0}\".format(to_native(e)))\n except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except\n raise AnsibleError(\"Failed to query RDS: {0}\".format(to_native(e)))\n return results\n return wrapper\n\n def _get_all_hosts(self, regions, instance_filters, cluster_filters, strict, statuses, gather_clusters=False):\n '''\n :param regions: a list of regions in which to describe hosts\n :param instance_filters: a list of boto3 filter dictionaries\n :param cluster_filters: a list of boto3 filter dictionaries\n :param strict: a boolean determining whether to fail or ignore 403 error codes\n :param statuses: a list of statuses that the returned hosts should match\n :return A list of host dictionaries\n '''\n all_instances = []\n all_clusters = []\n for connection, _region in self._boto3_conn(regions):\n paginator = connection.get_paginator('describe_db_instances')\n all_instances.extend(\n self._get_hosts_by_region(connection, instance_filters, strict)\n (paginator.paginate(Filters=instance_filters).build_full_result)\n )\n if gather_clusters:\n all_clusters.extend(\n self._get_hosts_by_region(connection, cluster_filters, strict)\n (connection.describe_db_clusters, **{'Filters': cluster_filters})\n )\n sorted_hosts = list(\n sorted(all_instances, key=lambda x: x['DBInstanceIdentifier']) +\n sorted(all_clusters, key=lambda x: x['DBClusterIdentifier'])\n )\n return self.find_hosts_with_valid_statuses(sorted_hosts, statuses)\n\n def find_hosts_with_valid_statuses(self, hosts, statuses):\n if 'all' in statuses:\n return hosts\n valid_hosts = []\n for host in hosts:\n if host.get('DBInstanceStatus') in statuses:\n valid_hosts.append(host)\n elif host.get('Status') in statuses:\n valid_hosts.append(host)\n return valid_hosts\n\n def _populate(self, hosts):\n group = 'aws_rds'\n self.inventory.add_group(group)\n if hosts:\n self._add_hosts(hosts=hosts, group=group)\n self.inventory.add_child('all', group)\n\n def _populate_from_source(self, source_data):\n hostvars = source_data.pop('_meta', {}).get('hostvars', {})\n for group in source_data:\n if group == 'all':\n continue\n else:\n self.inventory.add_group(group)\n hosts = source_data[group].get('hosts', [])\n for host in hosts:\n self._populate_host_vars([host], hostvars.get(host, {}), group)\n self.inventory.add_child('all', group)\n\n def _get_hostname(self, host):\n if host.get('DBInstanceIdentifier'):\n return host['DBInstanceIdentifier']\n else:\n return host['DBClusterIdentifier']\n\n def _format_inventory(self, hosts):\n results = {'_meta': {'hostvars': {}}}\n group = 'aws_rds'\n results[group] = {'hosts': []}\n for host in hosts:\n hostname = self._get_hostname(host)\n results[group]['hosts'].append(hostname)\n h = self.inventory.get_host(hostname)\n results['_meta']['hostvars'][h.name] = h.vars\n return results\n\n def _add_hosts(self, hosts, group):\n '''\n :param hosts: a list of hosts to be added to a group\n :param group: the name of the group to which the hosts belong\n '''\n for host in hosts:\n hostname = self._get_hostname(host)\n host = camel_dict_to_snake_dict(host, ignore_list=['Tags'])\n host['tags'] = boto3_tag_list_to_ansible_dict(host.get('tags', []))\n\n # Allow easier grouping by region\n if 'availability_zone' in host:\n host['region'] = host['availability_zone'][:-1]\n elif 'availability_zones' in host:\n host['region'] = host['availability_zones'][0][:-1]\n\n self.inventory.add_host(hostname, group=group)\n hostvars_prefix = self.get_option(\"hostvars_prefix\")\n hostvars_suffix = self.get_option(\"hostvars_suffix\")\n new_vars = dict()\n for hostvar, hostval in host.items():\n if hostvars_prefix:\n hostvar = hostvars_prefix + hostvar\n if hostvars_suffix:\n hostvar = hostvar + hostvars_suffix\n new_vars[hostvar] = hostval\n self.inventory.set_variable(hostname, hostvar, hostval)\n host.update(new_vars)\n\n # Use constructed if applicable\n strict = self.get_option('strict')\n # Composed variables\n self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)\n # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group\n self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)\n # Create groups based on variable values and add the corresponding hosts to it\n self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)\n\n def _set_credentials(self):\n '''\n '''\n self.boto_profile = self.get_option('aws_profile')\n aws_access_key_id = self.get_option('aws_access_key')\n aws_secret_access_key = self.get_option('aws_secret_key')\n aws_security_token = self.get_option('aws_security_token')\n self.iam_role_arn = self.get_option('iam_role_arn')\n\n if not self.boto_profile and not (aws_access_key_id and aws_secret_access_key):\n session = botocore.session.get_session()\n if session.get_credentials() is not None:\n aws_access_key_id = session.get_credentials().access_key\n aws_secret_access_key = session.get_credentials().secret_key\n aws_security_token = session.get_credentials().token\n\n if not self.boto_profile and not (aws_access_key_id and aws_secret_access_key):\n raise AnsibleError(\"Insufficient boto credentials found. Please provide them in your \"\n \"inventory configuration file or set them as environment variables.\")\n\n if aws_access_key_id:\n self.credentials['aws_access_key_id'] = aws_access_key_id\n if aws_secret_access_key:\n self.credentials['aws_secret_access_key'] = aws_secret_access_key\n if aws_security_token:\n self.credentials['aws_session_token'] = aws_security_token\n\n def verify_file(self, path):\n '''\n :param loader: an ansible.parsing.dataloader.DataLoader object\n :param path: the path to the inventory config file\n :return the contents of the config file\n '''\n if super(InventoryModule, self).verify_file(path):\n if path.endswith(('aws_rds.yml', 'aws_rds.yaml')):\n return True\n return False\n\n def parse(self, inventory, loader, path, cache=True):\n super(InventoryModule, self).parse(inventory, loader, path)\n\n if not HAS_BOTO3:\n raise AnsibleError(missing_required_lib('botocore and boto3'))\n\n self._read_config_data(path)\n self._set_credentials()\n\n # get user specifications\n regions = self.get_option('regions')\n filters = self.get_option('filters')\n strict_permissions = self.get_option('strict_permissions')\n statuses = self.get_option('statuses')\n include_clusters = self.get_option('include_clusters')\n instance_filters = ansible_dict_to_boto3_filter_list(filters)\n cluster_filters = []\n if 'db-cluster-id' in filters and include_clusters:\n cluster_filters = ansible_dict_to_boto3_filter_list({'db-cluster-id': filters['db-cluster-id']})\n\n cache_key = self.get_cache_key(path)\n # false when refresh_cache or --flush-cache is used\n if cache:\n # get the user-specified directive\n cache = self.get_option('cache')\n\n # Generate inventory\n formatted_inventory = {}\n cache_needs_update = False\n if cache:\n try:\n results = self._cache[cache_key]\n except KeyError:\n # if cache expires or cache file doesn't exist\n cache_needs_update = True\n else:\n self._populate_from_source(results)\n\n if not cache or cache_needs_update:\n results = self._get_all_hosts(regions, instance_filters, cluster_filters, strict_permissions, statuses, include_clusters)\n self._populate(results)\n formatted_inventory = self._format_inventory(results)\n\n # If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used\n # when the user is using caching, update the cached inventory\n if cache_needs_update or (not cache and self.get_option('cache')):\n self._cache[cache_key] = formatted_inventory\n", "path": "plugins/inventory/aws_rds.py" } ]
diff --git a/changelogs/fragments/1304-aws_rds-config.yml b/changelogs/fragments/1304-aws_rds-config.yml new file mode 100644 index 00000000000..bd32f37f82b --- /dev/null +++ b/changelogs/fragments/1304-aws_rds-config.yml @@ -0,0 +1,2 @@ +bugfixes: +- aws_rds - fixes bug in RDS inventory plugin where config file was ignored (https://github.com/ansible-collections/amazon.aws/issues/1304). diff --git a/plugins/inventory/aws_rds.py b/plugins/inventory/aws_rds.py index e03464168f7..02f86073a0a 100644 --- a/plugins/inventory/aws_rds.py +++ b/plugins/inventory/aws_rds.py @@ -360,6 +360,7 @@ def parse(self, inventory, loader, path, cache=True): if not HAS_BOTO3: raise AnsibleError(missing_required_lib('botocore and boto3')) + self._read_config_data(path) self._set_credentials() # get user specifications
PlasmaPy__PlasmaPy-2506
Fix linkcheck GitHub Action along with minor updates to workflows #2490 ended up disabling the workflow dispatch option for doing a linkcheck. This PR reverts some of #2490 so that the linkcheck workflow does not attempt to run in PRs, and instead adds a workflow step for CI that does the linkcheck. I also took a moment to make the formatting of workflow files a bit more consistent, as well as some other minor updates.
[ { "content": "\"\"\"\nVarious decorators to validate input/output arguments to functions.\n\"\"\"\n\n__all__ = [\"validate_class_attributes\", \"validate_quantities\", \"ValidateQuantities\"]\n\nimport functools\nimport inspect\nimport warnings\nfrom collections.abc import Iterable\nfrom typing import Any\n\nimport astropy.units as u\n\nfrom plasmapy.utils.decorators.checks import CheckUnits, CheckValues\nfrom plasmapy.utils.decorators.helpers import preserve_signature\n\n\nclass ValidateQuantities(CheckUnits, CheckValues):\n \"\"\"\n A decorator class to 'validate' -- control and convert -- the units and values\n of input and return arguments to a function or method. Arguments are expected to\n be astropy :class:`~astropy.units.quantity.Quantity` objects.\n\n Parameters\n ----------\n validations_on_return: dictionary of validation specifications\n Specifications for unit and value validations on the return of the\n function being wrapped. (see `quantity validations`_ for valid\n specifications.\n\n **validations: dictionary of validation specifications\n Specifications for unit and value validations on the input arguments of the\n function being wrapped. Each keyword argument in ``validations`` is the\n name of a function argument to be validated and the keyword value contains\n the unit and value validation specifications.\n\n .. _`quantity validations`:\n\n Unit and value validations can be defined by passing one of the astropy\n :mod:`~astropy.units`, a list of astropy units, or a dictionary containing\n the keys defined below. Units can also be defined with function annotations,\n but must be consistent with decorator ``**validations`` arguments if used\n concurrently. If a key is omitted, then the default value will be assumed.\n\n ====================== ======= ================================================\n Key Type Description\n ====================== ======= ================================================\n units list of desired astropy :mod:`~astropy.units`\n equivalencies | [DEFAULT `None`] A list of equivalent pairs to\n try if\n | the units are not directly convertible.\n | (see :mod:`~astropy.units.equivalencies`,\n and/or `astropy equivalencies`_)\n pass_equivalent_units `bool` | [DEFAULT `False`] allow equivalent units\n | to pass\n can_be_negative `bool` [DEFAULT `True`] values can be negative\n can_be_complex `bool` [DEFAULT `False`] values can be complex numbers\n can_be_inf `bool` [DEFAULT `True`] values can be :data:`~numpy.inf`\n can_be_nan `bool` [DEFAULT `True`] values can be :data:`~numpy.nan`\n none_shall_pass `bool` [DEFAULT `False`] values can be a python `None`\n can_be_zero `bool` [DEFAULT `True`] values can be zero\n ====================== ======= ================================================\n\n Notes\n -----\n * Validation of function arguments ``*args`` and ``**kwargs`` is not supported.\n * `None` values will pass when `None` is included in the list of specified units,\n is set as a default value for the function argument, or ``none_shall_pass`` is\n set to `True`. If ``none_shall_pass`` is doubly/triply defined through the\n mentioned options, then they all must be consistent with each other.\n * If units are not specified in ``validations``, then the decorator will attempt\n to identify desired units by examining the function annotations.\n\n Examples\n --------\n Define unit and value validations with decorator parameters::\n\n import astropy.units as u\n from plasmapy.utils.decorators import ValidateQuantities\n\n\n @ValidateQuantities(\n mass={\"units\": u.g, \"can_be_negative\": False},\n vel=u.cm / u.s,\n validations_on_return=[u.g * u.cm / u.s, u.kg * u.m / u.s],\n )\n def foo(mass, vel):\n return mass * vel\n\n\n # on a method\n class Foo:\n @ValidateQuantities(\n mass={\"units\": u.g, \"can_be_negative\": False},\n vel=u.cm / u.s,\n validations_on_return=[u.g * u.cm / u.s, u.kg * u.m / u.s],\n )\n def bar(self, mass, vel):\n return mass * vel\n\n Define units with function annotations::\n\n @ValidateQuantities(mass={\"can_be_negative\": False})\n def foo(mass: u.g, vel: u.cm / u.s) -> u.g * u.cm / u.s:\n return mass * vel\n\n\n # on a method\n class Foo:\n @ValidateQuantities(mass={\"can_be_negative\": False})\n def bar(self, mass: u.g, vel: u.cm / u.s) -> u.g * u.cm / u.s:\n return mass * vel\n\n Allow `None` values to pass::\n\n @ValidateQuantities(checks_on_return=[u.cm, None])\n def foo(arg1: u.cm = None):\n return arg1\n\n Allow return values to have equivalent units::\n\n @ValidateQuantities(\n arg1={\"units\": u.cm},\n checks_on_return={\"units\": u.km, \"pass_equivalent_units\": True},\n )\n def foo(arg1):\n return arg1\n\n Allow equivalent units to pass with specified equivalencies::\n\n @ValidateQuantities(\n arg1={\n \"units\": u.K,\n \"equivalencies\": u.temperature(),\n \"pass_equivalent_units\": True,\n }\n )\n def foo(arg1):\n return arg1\n\n .. _astropy equivalencies:\n https://docs.astropy.org/en/stable/units/equivalencies.html\n \"\"\"\n\n def __init__(\n self, validations_on_return=None, **validations: dict[str, Any]\n ) -> None:\n if \"checks_on_return\" in validations:\n raise TypeError(\n \"keyword argument 'checks_on_return' is not allowed, \"\n \"use 'validations_on_return' to set validations \"\n \"on the return variable\"\n )\n\n self._validations = validations\n\n checks = validations.copy()\n if validations_on_return is not None:\n self._validations[\"validations_on_return\"] = validations_on_return\n checks[\"checks_on_return\"] = validations_on_return\n\n super().__init__(**checks)\n\n def __call__(self, f):\n \"\"\"\n Decorate a function.\n\n Parameters\n ----------\n f\n Function to be wrapped\n\n Returns\n -------\n function\n wrapped function of ``f``\n \"\"\"\n self.f = f\n wrapped_sign = inspect.signature(f)\n\n @preserve_signature\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n # combine args and kwargs into dictionary\n bound_args = wrapped_sign.bind(*args, **kwargs)\n bound_args.apply_defaults()\n\n # get conditioned validations\n validations = self._get_validations(bound_args)\n\n # validate (input) argument units and values\n for arg_name in validations:\n # skip check of output/return\n if arg_name == \"validations_on_return\":\n continue\n\n # validate argument & update for conversion\n arg = self._validate_quantity(\n bound_args.arguments[arg_name], arg_name, validations[arg_name]\n )\n bound_args.arguments[arg_name] = arg\n\n # call function\n _return = f(**bound_args.arguments)\n\n # validate output\n if \"validations_on_return\" in validations:\n _return = self._validate_quantity(\n _return,\n \"validations_on_return\",\n validations[\"validations_on_return\"],\n )\n\n return _return\n\n return wrapper\n\n def _get_validations(\n self, bound_args: inspect.BoundArguments\n ) -> dict[str, dict[str, Any]]:\n \"\"\"\n Review :attr:`validations` and function bound arguments to build a complete\n 'validations' dictionary. If a validation key is omitted from the argument\n validations, then a default value is assumed (see `quantity validations`_).\n\n Parameters\n ----------\n bound_args: :class:`inspect.BoundArguments`\n arguments passed into the function being wrapped\n\n .. code-block:: python\n\n bound_args = inspect.signature(f).bind(*args, **kwargs)\n\n Returns\n -------\n Dict[str, Dict[str, Any]]\n A complete 'validations' dictionary for validating function input arguments\n and return.\n \"\"\"\n unit_checks = self._get_unit_checks(bound_args)\n value_checks = self._get_value_checks(bound_args)\n\n # combine all validations\n # * `unit_checks` will encompass all argument \"checks\" defined either by\n # function annotations or **validations.\n # * `value_checks` may miss some arguments if **validations only defines\n # unit validations or some validations come from function annotations\n validations = unit_checks.copy()\n for arg_name in validations:\n # augment 'none_shall_pass' (if needed)\n try:\n # if 'none_shall_pass' was in the original passed-in validations,\n # then override the value determined by CheckUnits\n _none_shall_pass = self.validations[arg_name][\"none_shall_pass\"]\n # if validations[arg_name]['none_shall_pass'] != _none_shall_pass:\n if (\n _none_shall_pass is False\n and validations[arg_name][\"none_shall_pass\"] is True\n ):\n raise ValueError(\n f\"Validation 'none_shall_pass' for argument '{arg_name}' is \"\n f\"inconsistent between function annotations \"\n f\"({validations[arg_name]['none_shall_pass']}) and decorator \"\n f\"argument ({_none_shall_pass}).\"\n )\n validations[arg_name][\"none_shall_pass\"] = _none_shall_pass\n except (KeyError, TypeError):\n # 'none_shall_pass' was not in the original passed-in validations, so\n # rely on the value determined by CheckUnits\n pass\n finally:\n try:\n del value_checks[arg_name][\"none_shall_pass\"]\n except KeyError:\n dvc = self._CheckValues__check_defaults.copy()\n del dvc[\"none_shall_pass\"]\n value_checks[arg_name] = dvc\n\n # update the validations dictionary\n validations[arg_name].update(value_checks[arg_name])\n\n if \"checks_on_return\" in validations:\n validations[\"validations_on_return\"] = validations.pop(\"checks_on_return\")\n\n return validations\n\n def _validate_quantity( # noqa: C901\n self,\n arg,\n arg_name: str,\n arg_validations: dict[str, Any],\n ):\n \"\"\"\n Perform validations `arg_validations` on function argument `arg`\n named `arg_name`.\n\n Parameters\n ----------\n arg\n The argument to be validated.\n\n arg_name: str\n The name of the argument to be validated\n\n arg_validations: Dict[str, Any]\n The requested validations for the argument\n\n Raises\n ------\n TypeError\n if argument is not an Astropy :class:`~astropy.units.Quantity`\n or not convertible to a :class:`~astropy.units.Quantity`\n\n ValueError\n if validations fail\n \"\"\"\n # rename to work with \"check\" methods\n if arg_name == \"validations_on_return\":\n arg_name = \"checks_on_return\"\n\n # initialize str for error message\n if arg_name == \"checks_on_return\":\n err_msg = \"The return value \"\n else:\n err_msg = f\"The argument '{arg_name}' \"\n err_msg += f\"to function {self.f.__name__}()\"\n\n # initialize TypeError message\n typeerror_msg = (\n f\"{err_msg} should be an astropy Quantity with units\"\n f\" equivalent to one of [\"\n )\n for ii, unit in enumerate(arg_validations[\"units\"]):\n typeerror_msg += f\"{unit}\"\n\n if ii != len(arg_validations[\"units\"]) - 1:\n typeerror_msg += \", \"\n typeerror_msg += \"]\"\n\n # add units to arg if possible\n # * a None value will be taken care of by `_check_unit_core`\n #\n if arg is None or hasattr(arg, \"unit\"):\n pass\n elif len(arg_validations[\"units\"]) != 1:\n raise TypeError(typeerror_msg)\n else:\n try:\n arg = arg * arg_validations[\"units\"][0]\n except (TypeError, ValueError) as ex:\n raise TypeError(typeerror_msg) from ex\n else:\n warnings.warn(\n u.UnitsWarning(\n f\"{err_msg} has no specified units. Assuming units of \"\n f\"{arg_validations['units'][0]}. To silence this warning, \"\n f\"explicitly pass in an astropy Quantity \"\n f\"(e.g. 5. * astropy.units.cm) \"\n f\"(see http://docs.astropy.org/en/stable/units/)\"\n )\n )\n\n # check units\n arg, unit, equiv, err = self._check_unit_core(arg, arg_name, arg_validations)\n\n # convert quantity\n if (\n arg is not None\n and unit is not None\n and not arg_validations[\"pass_equivalent_units\"]\n ):\n arg = arg.to(unit, equivalencies=equiv)\n elif err is not None:\n raise err\n\n self._check_value(arg, arg_name, arg_validations)\n\n return arg\n\n @property\n def validations(self):\n \"\"\"\n Requested validations on the decorated function's input arguments and\n return variable.\n \"\"\"\n return self._validations\n\n\ndef validate_quantities(func=None, validations_on_return=None, **validations):\n \"\"\"\n A decorator to 'validate' — control and convert — the units and values\n of input and return arguments to a function or method. Arguments are expected to\n be astropy :class:`~astropy.units.quantity.Quantity` objects.\n\n Parameters\n ----------\n func:\n The function to be decorated\n\n validations_on_return: dictionary of validation specifications\n Specifications for unit and value validations on the return of the\n function being wrapped. (see `quantity validations`_ for valid\n specifications.\n\n **validations: dictionary of validation specifications\n Specifications for unit and value validations on the input arguments of the\n function being wrapped. Each keyword argument in ``validations`` is the\n name of a function argument to be validated and the keyword value contains\n the unit and value validation specifications.\n\n .. _`quantity validations`:\n\n Unit and value validations can be defined by passing one of the astropy\n :mod:`~astropy.units`, a list of astropy units, or a dictionary containing\n the keys defined below. Units can also be defined with function annotations,\n but must be consistent with decorator ``**validations`` arguments if used\n concurrently. If a key is omitted, then the default value will be assumed.\n\n ====================== ======= ================================================\n Key Type Description\n ====================== ======= ================================================\n units list of desired astropy :mod:`~astropy.units`\n equivalencies | [DEFAULT `None`] A list of equivalent pairs to\n try if\n | the units are not directly convertible.\n | (see :mod:`~astropy.units.equivalencies`,\n and/or `astropy equivalencies`_)\n pass_equivalent_units `bool` | [DEFAULT `False`] allow equivalent units\n | to pass\n can_be_negative `bool` [DEFAULT `True`] values can be negative\n can_be_complex `bool` [DEFAULT `False`] values can be complex numbers\n can_be_inf `bool` [DEFAULT `True`] values can be :data:`~numpy.inf`\n can_be_nan `bool` [DEFAULT `True`] values can be :data:`~numpy.nan`\n none_shall_pass `bool` [DEFAULT `False`] values can be a python `None`\n can_be_zero `bool` [DEFAULT `True`] values can be zero\n ====================== ======= ================================================\n\n Notes\n -----\n * Validation of function arguments ``*args`` and ``**kwargs`` is not supported.\n * `None` values will pass when `None` is included in the list of specified units,\n is set as a default value for the function argument, or ``none_shall_pass`` is\n set to `True`. If ``none_shall_pass`` is doubly/triply defined through the\n mentioned options, then they all must be consistent with each other.\n * If units are not specified in ``validations``, then the decorator will attempt\n to identify desired units by examining the function annotations.\n * Full functionality is defined by the class :class:`ValidateQuantities`.\n\n Examples\n --------\n Define unit and value validations with decorator parameters::\n\n import astropy.units as u\n from plasmapy.utils.decorators import validate_quantities\n\n\n @validate_quantities(\n mass={\"units\": u.g, \"can_be_negative\": False},\n vel=u.cm / u.s,\n validations_on_return=[u.g * u.cm / u.s, u.kg * u.m / u.s],\n )\n def foo(mass, vel):\n return mass * vel\n\n\n # on a method\n class Foo:\n @validate_quantities(\n mass={\"units\": u.g, \"can_be_negative\": False},\n vel=u.cm / u.s,\n validations_on_return=[u.g * u.cm / u.s, u.kg * u.m / u.s],\n )\n def bar(self, mass, vel):\n return mass * vel\n\n Define units with function annotations::\n\n @validate_quantities(mass={\"can_be_negative\": False})\n def foo(mass: u.g, vel: u.cm / u.s) -> u.g * u.cm / u.s:\n return mass * vel\n\n\n # rely only on annotations\n @validate_quantities\n def foo(x: u.cm, time: u.s) -> u.cm / u.s:\n return x / time\n\n\n # on a method\n class Foo:\n @validate_quantities(mass={\"can_be_negative\": False})\n def bar(self, mass: u.g, vel: u.cm / u.s) -> u.g * u.cm / u.s:\n return mass * vel\n\n Define units using type hint annotations::\n\n @validate_quantities\n def foo(x: u.Quantity[u.m], time: u.Quantity[u.s]) -> u.Quantity[u.m / u.s]:\n return x / time\n\n Allow `None` values to pass::\n\n @validate_quantities(arg2={\"none_shall_pass\": True}, checks_on_return=[u.cm, None])\n def foo(arg1: u.cm, arg2: u.cm = None):\n return None\n\n Allow return values to have equivalent units::\n\n @validate_quantities(\n arg1={\"units\": u.cm},\n checks_on_return={\"units\": u.km, \"pass_equivalent_units\": True},\n )\n def foo(arg1):\n return arg1\n\n Allow equivalent units to pass with specified equivalencies::\n\n @validate_quantities(\n arg1={\n \"units\": u.K,\n \"equivalencies\": u.temperature(),\n \"pass_equivalent_units\": True,\n }\n )\n def foo(arg1):\n return arg1\n\n .. _astropy equivalencies:\n https://docs.astropy.org/en/stable/units/equivalencies.html\n \"\"\"\n\n if validations_on_return is not None:\n validations[\"validations_on_return\"] = validations_on_return\n\n if func is not None:\n # `validate_quantities` called as a function\n return ValidateQuantities(**validations)(func)\n\n # `validate_quantities` called as a decorator \"sugar-syntax\"\n return ValidateQuantities(**validations)\n\n\ndef get_attributes_not_provided(\n self,\n expected_attributes: list[str] | None = None,\n both_or_either_attributes: list[Iterable[str]] | None = None,\n mutually_exclusive_attributes: list[Iterable[str]] | None = None,\n):\n \"\"\"\n Collect attributes that weren't provided during instantiation needed\n to access a method.\n \"\"\"\n\n attributes_not_provided = []\n\n if expected_attributes is not None:\n attributes_not_provided.extend(\n attribute\n for attribute in expected_attributes\n if getattr(self, attribute) is None\n )\n if both_or_either_attributes is not None:\n for attribute_tuple in both_or_either_attributes:\n number_of_attributes_provided = sum(\n getattr(self, attribute) is not None for attribute in attribute_tuple\n )\n if number_of_attributes_provided == 0:\n attributes_not_provided.append(\n f\"at least one of {' or '.join(attribute_tuple)}\"\n )\n\n if mutually_exclusive_attributes is not None:\n for attribute_tuple in mutually_exclusive_attributes:\n number_of_attributes_provided = sum(\n getattr(self, attribute) is not None for attribute in attribute_tuple\n )\n if number_of_attributes_provided != 1:\n attributes_not_provided.append(\n f\"exactly one of {' or '.join(attribute_tuple)}\"\n )\n\n return attributes_not_provided\n\n\ndef validate_class_attributes(\n expected_attributes: list[str] | None = None,\n both_or_either_attributes: list[Iterable[str]] | None = None,\n mutually_exclusive_attributes: list[Iterable[str]] | None = None,\n):\n \"\"\"\n A decorator responsible for raising errors if the expected arguments weren't\n provided during class instantiation.\n \"\"\"\n\n def decorator(attribute):\n def wrapper(self, *args, **kwargs):\n arguments_not_provided = get_attributes_not_provided(\n self,\n expected_attributes,\n both_or_either_attributes,\n mutually_exclusive_attributes,\n )\n\n if len(arguments_not_provided) > 0:\n raise ValueError(\n f\"{attribute.__name__} expected the following \"\n f\"additional arguments: {', '.join(arguments_not_provided)}\"\n )\n\n return attribute(self, *args, **kwargs)\n\n return wrapper\n\n return decorator\n", "path": "src/plasmapy/utils/decorators/validators.py" } ]
[ { "content": "\"\"\"\nVarious decorators to validate input/output arguments to functions.\n\"\"\"\n\n__all__ = [\"validate_class_attributes\", \"validate_quantities\", \"ValidateQuantities\"]\n\nimport functools\nimport inspect\nimport warnings\nfrom collections.abc import Iterable\nfrom typing import Any\n\nimport astropy.units as u\n\nfrom plasmapy.utils.decorators.checks import CheckUnits, CheckValues\nfrom plasmapy.utils.decorators.helpers import preserve_signature\n\n\nclass ValidateQuantities(CheckUnits, CheckValues):\n \"\"\"\n A decorator class to 'validate' -- control and convert -- the units and values\n of input and return arguments to a function or method. Arguments are expected to\n be astropy :class:`~astropy.units.quantity.Quantity` objects.\n\n Parameters\n ----------\n validations_on_return: dictionary of validation specifications\n Specifications for unit and value validations on the return of the\n function being wrapped. (see `quantity validations`_ for valid\n specifications.\n\n **validations: dictionary of validation specifications\n Specifications for unit and value validations on the input arguments of the\n function being wrapped. Each keyword argument in ``validations`` is the\n name of a function argument to be validated and the keyword value contains\n the unit and value validation specifications.\n\n .. _`quantity validations`:\n\n Unit and value validations can be defined by passing one of the astropy\n :mod:`~astropy.units`, a list of astropy units, or a dictionary containing\n the keys defined below. Units can also be defined with function annotations,\n but must be consistent with decorator ``**validations`` arguments if used\n concurrently. If a key is omitted, then the default value will be assumed.\n\n ====================== ======= ================================================\n Key Type Description\n ====================== ======= ================================================\n units list of desired astropy :mod:`~astropy.units`\n equivalencies | [DEFAULT `None`] A list of equivalent pairs to\n try if\n | the units are not directly convertible.\n | (see :mod:`~astropy.units.equivalencies`,\n and/or `astropy equivalencies`_)\n pass_equivalent_units `bool` | [DEFAULT `False`] allow equivalent units\n | to pass\n can_be_negative `bool` [DEFAULT `True`] values can be negative\n can_be_complex `bool` [DEFAULT `False`] values can be complex numbers\n can_be_inf `bool` [DEFAULT `True`] values can be :data:`~numpy.inf`\n can_be_nan `bool` [DEFAULT `True`] values can be :data:`~numpy.nan`\n none_shall_pass `bool` [DEFAULT `False`] values can be a python `None`\n can_be_zero `bool` [DEFAULT `True`] values can be zero\n ====================== ======= ================================================\n\n Notes\n -----\n * Validation of function arguments ``*args`` and ``**kwargs`` is not supported.\n * `None` values will pass when `None` is included in the list of specified units,\n is set as a default value for the function argument, or ``none_shall_pass`` is\n set to `True`. If ``none_shall_pass`` is doubly/triply defined through the\n mentioned options, then they all must be consistent with each other.\n * If units are not specified in ``validations``, then the decorator will attempt\n to identify desired units by examining the function annotations.\n\n Examples\n --------\n Define unit and value validations with decorator parameters::\n\n import astropy.units as u\n from plasmapy.utils.decorators import ValidateQuantities\n\n\n @ValidateQuantities(\n mass={\"units\": u.g, \"can_be_negative\": False},\n vel=u.cm / u.s,\n validations_on_return=[u.g * u.cm / u.s, u.kg * u.m / u.s],\n )\n def foo(mass, vel):\n return mass * vel\n\n\n # on a method\n class Foo:\n @ValidateQuantities(\n mass={\"units\": u.g, \"can_be_negative\": False},\n vel=u.cm / u.s,\n validations_on_return=[u.g * u.cm / u.s, u.kg * u.m / u.s],\n )\n def bar(self, mass, vel):\n return mass * vel\n\n Define units with function annotations::\n\n @ValidateQuantities(mass={\"can_be_negative\": False})\n def foo(mass: u.g, vel: u.cm / u.s) -> u.g * u.cm / u.s:\n return mass * vel\n\n\n # on a method\n class Foo:\n @ValidateQuantities(mass={\"can_be_negative\": False})\n def bar(self, mass: u.g, vel: u.cm / u.s) -> u.g * u.cm / u.s:\n return mass * vel\n\n Allow `None` values to pass::\n\n @ValidateQuantities(checks_on_return=[u.cm, None])\n def foo(arg1: u.cm = None):\n return arg1\n\n Allow return values to have equivalent units::\n\n @ValidateQuantities(\n arg1={\"units\": u.cm},\n checks_on_return={\"units\": u.km, \"pass_equivalent_units\": True},\n )\n def foo(arg1):\n return arg1\n\n Allow equivalent units to pass with specified equivalencies::\n\n @ValidateQuantities(\n arg1={\n \"units\": u.K,\n \"equivalencies\": u.temperature(),\n \"pass_equivalent_units\": True,\n }\n )\n def foo(arg1):\n return arg1\n\n .. _astropy equivalencies:\n https://docs.astropy.org/en/stable/units/equivalencies.html\n \"\"\"\n\n def __init__(\n self, validations_on_return=None, **validations: dict[str, Any]\n ) -> None:\n if \"checks_on_return\" in validations:\n raise TypeError(\n \"keyword argument 'checks_on_return' is not allowed, \"\n \"use 'validations_on_return' to set validations \"\n \"on the return variable\"\n )\n\n self._validations = validations\n\n checks = validations.copy()\n if validations_on_return is not None:\n self._validations[\"validations_on_return\"] = validations_on_return\n checks[\"checks_on_return\"] = validations_on_return\n\n super().__init__(**checks)\n\n def __call__(self, f):\n \"\"\"\n Decorate a function.\n\n Parameters\n ----------\n f\n Function to be wrapped\n\n Returns\n -------\n function\n wrapped function of ``f``\n \"\"\"\n self.f = f\n wrapped_sign = inspect.signature(f, eval_str=True)\n\n @preserve_signature\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n # combine args and kwargs into dictionary\n bound_args = wrapped_sign.bind(*args, **kwargs)\n bound_args.apply_defaults()\n\n # get conditioned validations\n validations = self._get_validations(bound_args)\n\n # validate (input) argument units and values\n for arg_name in validations:\n # skip check of output/return\n if arg_name == \"validations_on_return\":\n continue\n\n # validate argument & update for conversion\n arg = self._validate_quantity(\n bound_args.arguments[arg_name], arg_name, validations[arg_name]\n )\n bound_args.arguments[arg_name] = arg\n\n # call function\n _return = f(**bound_args.arguments)\n\n # validate output\n if \"validations_on_return\" in validations:\n _return = self._validate_quantity(\n _return,\n \"validations_on_return\",\n validations[\"validations_on_return\"],\n )\n\n return _return\n\n return wrapper\n\n def _get_validations(\n self, bound_args: inspect.BoundArguments\n ) -> dict[str, dict[str, Any]]:\n \"\"\"\n Review :attr:`validations` and function bound arguments to build a complete\n 'validations' dictionary. If a validation key is omitted from the argument\n validations, then a default value is assumed (see `quantity validations`_).\n\n Parameters\n ----------\n bound_args: :class:`inspect.BoundArguments`\n arguments passed into the function being wrapped\n\n .. code-block:: python\n\n bound_args = inspect.signature(f).bind(*args, **kwargs)\n\n Returns\n -------\n Dict[str, Dict[str, Any]]\n A complete 'validations' dictionary for validating function input arguments\n and return.\n \"\"\"\n unit_checks = self._get_unit_checks(bound_args)\n value_checks = self._get_value_checks(bound_args)\n\n # combine all validations\n # * `unit_checks` will encompass all argument \"checks\" defined either by\n # function annotations or **validations.\n # * `value_checks` may miss some arguments if **validations only defines\n # unit validations or some validations come from function annotations\n validations = unit_checks.copy()\n for arg_name in validations:\n # augment 'none_shall_pass' (if needed)\n try:\n # if 'none_shall_pass' was in the original passed-in validations,\n # then override the value determined by CheckUnits\n _none_shall_pass = self.validations[arg_name][\"none_shall_pass\"]\n # if validations[arg_name]['none_shall_pass'] != _none_shall_pass:\n if (\n _none_shall_pass is False\n and validations[arg_name][\"none_shall_pass\"] is True\n ):\n raise ValueError(\n f\"Validation 'none_shall_pass' for argument '{arg_name}' is \"\n f\"inconsistent between function annotations \"\n f\"({validations[arg_name]['none_shall_pass']}) and decorator \"\n f\"argument ({_none_shall_pass}).\"\n )\n validations[arg_name][\"none_shall_pass\"] = _none_shall_pass\n except (KeyError, TypeError):\n # 'none_shall_pass' was not in the original passed-in validations, so\n # rely on the value determined by CheckUnits\n pass\n finally:\n try:\n del value_checks[arg_name][\"none_shall_pass\"]\n except KeyError:\n dvc = self._CheckValues__check_defaults.copy()\n del dvc[\"none_shall_pass\"]\n value_checks[arg_name] = dvc\n\n # update the validations dictionary\n validations[arg_name].update(value_checks[arg_name])\n\n if \"checks_on_return\" in validations:\n validations[\"validations_on_return\"] = validations.pop(\"checks_on_return\")\n\n return validations\n\n def _validate_quantity( # noqa: C901\n self,\n arg,\n arg_name: str,\n arg_validations: dict[str, Any],\n ):\n \"\"\"\n Perform validations `arg_validations` on function argument `arg`\n named `arg_name`.\n\n Parameters\n ----------\n arg\n The argument to be validated.\n\n arg_name: str\n The name of the argument to be validated\n\n arg_validations: Dict[str, Any]\n The requested validations for the argument\n\n Raises\n ------\n TypeError\n if argument is not an Astropy :class:`~astropy.units.Quantity`\n or not convertible to a :class:`~astropy.units.Quantity`\n\n ValueError\n if validations fail\n \"\"\"\n # rename to work with \"check\" methods\n if arg_name == \"validations_on_return\":\n arg_name = \"checks_on_return\"\n\n # initialize str for error message\n if arg_name == \"checks_on_return\":\n err_msg = \"The return value \"\n else:\n err_msg = f\"The argument '{arg_name}' \"\n err_msg += f\"to function {self.f.__name__}()\"\n\n # initialize TypeError message\n typeerror_msg = (\n f\"{err_msg} should be an astropy Quantity with units\"\n f\" equivalent to one of [\"\n )\n for ii, unit in enumerate(arg_validations[\"units\"]):\n typeerror_msg += f\"{unit}\"\n\n if ii != len(arg_validations[\"units\"]) - 1:\n typeerror_msg += \", \"\n typeerror_msg += \"]\"\n\n # add units to arg if possible\n # * a None value will be taken care of by `_check_unit_core`\n #\n if arg is None or hasattr(arg, \"unit\"):\n pass\n elif len(arg_validations[\"units\"]) != 1:\n raise TypeError(typeerror_msg)\n else:\n try:\n arg = arg * arg_validations[\"units\"][0]\n except (TypeError, ValueError) as ex:\n raise TypeError(typeerror_msg) from ex\n else:\n warnings.warn(\n u.UnitsWarning(\n f\"{err_msg} has no specified units. Assuming units of \"\n f\"{arg_validations['units'][0]}. To silence this warning, \"\n f\"explicitly pass in an astropy Quantity \"\n f\"(e.g. 5. * astropy.units.cm) \"\n f\"(see http://docs.astropy.org/en/stable/units/)\"\n )\n )\n\n # check units\n arg, unit, equiv, err = self._check_unit_core(arg, arg_name, arg_validations)\n\n # convert quantity\n if (\n arg is not None\n and unit is not None\n and not arg_validations[\"pass_equivalent_units\"]\n ):\n arg = arg.to(unit, equivalencies=equiv)\n elif err is not None:\n raise err\n\n self._check_value(arg, arg_name, arg_validations)\n\n return arg\n\n @property\n def validations(self):\n \"\"\"\n Requested validations on the decorated function's input arguments and\n return variable.\n \"\"\"\n return self._validations\n\n\ndef validate_quantities(func=None, validations_on_return=None, **validations):\n \"\"\"\n A decorator to 'validate' — control and convert — the units and values\n of input and return arguments to a function or method. Arguments are expected to\n be astropy :class:`~astropy.units.quantity.Quantity` objects.\n\n Parameters\n ----------\n func:\n The function to be decorated\n\n validations_on_return: dictionary of validation specifications\n Specifications for unit and value validations on the return of the\n function being wrapped. (see `quantity validations`_ for valid\n specifications.\n\n **validations: dictionary of validation specifications\n Specifications for unit and value validations on the input arguments of the\n function being wrapped. Each keyword argument in ``validations`` is the\n name of a function argument to be validated and the keyword value contains\n the unit and value validation specifications.\n\n .. _`quantity validations`:\n\n Unit and value validations can be defined by passing one of the astropy\n :mod:`~astropy.units`, a list of astropy units, or a dictionary containing\n the keys defined below. Units can also be defined with function annotations,\n but must be consistent with decorator ``**validations`` arguments if used\n concurrently. If a key is omitted, then the default value will be assumed.\n\n ====================== ======= ================================================\n Key Type Description\n ====================== ======= ================================================\n units list of desired astropy :mod:`~astropy.units`\n equivalencies | [DEFAULT `None`] A list of equivalent pairs to\n try if\n | the units are not directly convertible.\n | (see :mod:`~astropy.units.equivalencies`,\n and/or `astropy equivalencies`_)\n pass_equivalent_units `bool` | [DEFAULT `False`] allow equivalent units\n | to pass\n can_be_negative `bool` [DEFAULT `True`] values can be negative\n can_be_complex `bool` [DEFAULT `False`] values can be complex numbers\n can_be_inf `bool` [DEFAULT `True`] values can be :data:`~numpy.inf`\n can_be_nan `bool` [DEFAULT `True`] values can be :data:`~numpy.nan`\n none_shall_pass `bool` [DEFAULT `False`] values can be a python `None`\n can_be_zero `bool` [DEFAULT `True`] values can be zero\n ====================== ======= ================================================\n\n Notes\n -----\n * Validation of function arguments ``*args`` and ``**kwargs`` is not supported.\n * `None` values will pass when `None` is included in the list of specified units,\n is set as a default value for the function argument, or ``none_shall_pass`` is\n set to `True`. If ``none_shall_pass`` is doubly/triply defined through the\n mentioned options, then they all must be consistent with each other.\n * If units are not specified in ``validations``, then the decorator will attempt\n to identify desired units by examining the function annotations.\n * Full functionality is defined by the class :class:`ValidateQuantities`.\n\n Examples\n --------\n Define unit and value validations with decorator parameters::\n\n import astropy.units as u\n from plasmapy.utils.decorators import validate_quantities\n\n\n @validate_quantities(\n mass={\"units\": u.g, \"can_be_negative\": False},\n vel=u.cm / u.s,\n validations_on_return=[u.g * u.cm / u.s, u.kg * u.m / u.s],\n )\n def foo(mass, vel):\n return mass * vel\n\n\n # on a method\n class Foo:\n @validate_quantities(\n mass={\"units\": u.g, \"can_be_negative\": False},\n vel=u.cm / u.s,\n validations_on_return=[u.g * u.cm / u.s, u.kg * u.m / u.s],\n )\n def bar(self, mass, vel):\n return mass * vel\n\n Define units with function annotations::\n\n @validate_quantities(mass={\"can_be_negative\": False})\n def foo(mass: u.g, vel: u.cm / u.s) -> u.g * u.cm / u.s:\n return mass * vel\n\n\n # rely only on annotations\n @validate_quantities\n def foo(x: u.cm, time: u.s) -> u.cm / u.s:\n return x / time\n\n\n # on a method\n class Foo:\n @validate_quantities(mass={\"can_be_negative\": False})\n def bar(self, mass: u.g, vel: u.cm / u.s) -> u.g * u.cm / u.s:\n return mass * vel\n\n Define units using type hint annotations::\n\n @validate_quantities\n def foo(x: u.Quantity[u.m], time: u.Quantity[u.s]) -> u.Quantity[u.m / u.s]:\n return x / time\n\n Allow `None` values to pass::\n\n @validate_quantities(arg2={\"none_shall_pass\": True}, checks_on_return=[u.cm, None])\n def foo(arg1: u.cm, arg2: u.cm = None):\n return None\n\n Allow return values to have equivalent units::\n\n @validate_quantities(\n arg1={\"units\": u.cm},\n checks_on_return={\"units\": u.km, \"pass_equivalent_units\": True},\n )\n def foo(arg1):\n return arg1\n\n Allow equivalent units to pass with specified equivalencies::\n\n @validate_quantities(\n arg1={\n \"units\": u.K,\n \"equivalencies\": u.temperature(),\n \"pass_equivalent_units\": True,\n }\n )\n def foo(arg1):\n return arg1\n\n .. _astropy equivalencies:\n https://docs.astropy.org/en/stable/units/equivalencies.html\n \"\"\"\n\n if validations_on_return is not None:\n validations[\"validations_on_return\"] = validations_on_return\n\n if func is not None:\n # `validate_quantities` called as a function\n return ValidateQuantities(**validations)(func)\n\n # `validate_quantities` called as a decorator \"sugar-syntax\"\n return ValidateQuantities(**validations)\n\n\ndef get_attributes_not_provided(\n self,\n expected_attributes: list[str] | None = None,\n both_or_either_attributes: list[Iterable[str]] | None = None,\n mutually_exclusive_attributes: list[Iterable[str]] | None = None,\n):\n \"\"\"\n Collect attributes that weren't provided during instantiation needed\n to access a method.\n \"\"\"\n\n attributes_not_provided = []\n\n if expected_attributes is not None:\n attributes_not_provided.extend(\n attribute\n for attribute in expected_attributes\n if getattr(self, attribute) is None\n )\n if both_or_either_attributes is not None:\n for attribute_tuple in both_or_either_attributes:\n number_of_attributes_provided = sum(\n getattr(self, attribute) is not None for attribute in attribute_tuple\n )\n if number_of_attributes_provided == 0:\n attributes_not_provided.append(\n f\"at least one of {' or '.join(attribute_tuple)}\"\n )\n\n if mutually_exclusive_attributes is not None:\n for attribute_tuple in mutually_exclusive_attributes:\n number_of_attributes_provided = sum(\n getattr(self, attribute) is not None for attribute in attribute_tuple\n )\n if number_of_attributes_provided != 1:\n attributes_not_provided.append(\n f\"exactly one of {' or '.join(attribute_tuple)}\"\n )\n\n return attributes_not_provided\n\n\ndef validate_class_attributes(\n expected_attributes: list[str] | None = None,\n both_or_either_attributes: list[Iterable[str]] | None = None,\n mutually_exclusive_attributes: list[Iterable[str]] | None = None,\n):\n \"\"\"\n A decorator responsible for raising errors if the expected arguments weren't\n provided during class instantiation.\n \"\"\"\n\n def decorator(attribute):\n def wrapper(self, *args, **kwargs):\n arguments_not_provided = get_attributes_not_provided(\n self,\n expected_attributes,\n both_or_either_attributes,\n mutually_exclusive_attributes,\n )\n\n if len(arguments_not_provided) > 0:\n raise ValueError(\n f\"{attribute.__name__} expected the following \"\n f\"additional arguments: {', '.join(arguments_not_provided)}\"\n )\n\n return attribute(self, *args, **kwargs)\n\n return wrapper\n\n return decorator\n", "path": "src/plasmapy/utils/decorators/validators.py" } ]
diff --git a/changelog/2506.bugfix.rst b/changelog/2506.bugfix.rst new file mode 100644 index 0000000000..ed7538f243 --- /dev/null +++ b/changelog/2506.bugfix.rst @@ -0,0 +1,2 @@ +- Enabled |validate_quantities| to be compatible with postponed evaluation of + annotations (see :pep:`563`). (:pr:`2479`) diff --git a/src/plasmapy/utils/decorators/validators.py b/src/plasmapy/utils/decorators/validators.py index 94b89c35df..a5ab247bc7 100644 --- a/src/plasmapy/utils/decorators/validators.py +++ b/src/plasmapy/utils/decorators/validators.py @@ -177,7 +177,7 @@ def __call__(self, f): wrapped function of ``f`` """ self.f = f - wrapped_sign = inspect.signature(f) + wrapped_sign = inspect.signature(f, eval_str=True) @preserve_signature @functools.wraps(f) diff --git a/tests/utils/decorators/test_validate_quantities_annotations.py b/tests/utils/decorators/test_validate_quantities_annotations.py new file mode 100644 index 0000000000..d821718a89 --- /dev/null +++ b/tests/utils/decorators/test_validate_quantities_annotations.py @@ -0,0 +1,17 @@ +"""Test that @validate_quantities works with postponed evaluation of annotations.""" + +from __future__ import annotations + +import astropy.units as u + +from plasmapy.utils.decorators.validators import validate_quantities + + +@validate_quantities # type: ignore[misc] +def annotated_function(mass: u.Quantity[u.g]) -> u.Quantity[u.kg]: + return mass + + +def test_validate_quantities_postponed_annotations() -> None: + result = annotated_function(1 * u.g) + assert result.unit == u.kg
getmoto__moto-1286
Resources in mentioned in cloud formation template is not getting created. Hi, I am creating security group through cloud formation template and then trying to retrieve that through boto client but it says that security group does not exists. If i create security group through command line then i am able to fetch it. It seems like resources in cloud formation does not get created when we deploy it.
[ { "content": "from __future__ import unicode_literals\n\nfrom collections import defaultdict\n\nimport string\nimport random\nimport uuid\nfrom jinja2 import Template\n\nfrom moto.core import BaseBackend, BaseModel\n\n\nROUTE53_ID_CHOICE = string.ascii_uppercase + string.digits\n\n\ndef create_route53_zone_id():\n # New ID's look like this Z1RWWTK7Y8UDDQ\n return ''.join([random.choice(ROUTE53_ID_CHOICE) for _ in range(0, 15)])\n\n\nclass HealthCheck(BaseModel):\n\n def __init__(self, health_check_id, health_check_args):\n self.id = health_check_id\n self.ip_address = health_check_args.get(\"ip_address\")\n self.port = health_check_args.get(\"port\", 80)\n self._type = health_check_args.get(\"type\")\n self.resource_path = health_check_args.get(\"resource_path\")\n self.fqdn = health_check_args.get(\"fqdn\")\n self.search_string = health_check_args.get(\"search_string\")\n self.request_interval = health_check_args.get(\"request_interval\", 30)\n self.failure_threshold = health_check_args.get(\"failure_threshold\", 3)\n\n @property\n def physical_resource_id(self):\n return self.id\n\n @classmethod\n def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):\n properties = cloudformation_json['Properties']['HealthCheckConfig']\n health_check_args = {\n \"ip_address\": properties.get('IPAddress'),\n \"port\": properties.get('Port'),\n \"type\": properties['Type'],\n \"resource_path\": properties.get('ResourcePath'),\n \"fqdn\": properties.get('FullyQualifiedDomainName'),\n \"search_string\": properties.get('SearchString'),\n \"request_interval\": properties.get('RequestInterval'),\n \"failure_threshold\": properties.get('FailureThreshold'),\n }\n health_check = route53_backend.create_health_check(health_check_args)\n return health_check\n\n def to_xml(self):\n template = Template(\"\"\"<HealthCheck>\n <Id>{{ health_check.id }}</Id>\n <CallerReference>example.com 192.0.2.17</CallerReference>\n <HealthCheckConfig>\n <IPAddress>{{ health_check.ip_address }}</IPAddress>\n <Port>{{ health_check.port }}</Port>\n <Type>{{ health_check._type }}</Type>\n <ResourcePath>{{ health_check.resource_path }}</ResourcePath>\n <FullyQualifiedDomainName>{{ health_check.fqdn }}</FullyQualifiedDomainName>\n <RequestInterval>{{ health_check.request_interval }}</RequestInterval>\n <FailureThreshold>{{ health_check.failure_threshold }}</FailureThreshold>\n {% if health_check.search_string %}\n <SearchString>{{ health_check.search_string }}</SearchString>\n {% endif %}\n </HealthCheckConfig>\n <HealthCheckVersion>1</HealthCheckVersion>\n </HealthCheck>\"\"\")\n return template.render(health_check=self)\n\n\nclass RecordSet(BaseModel):\n\n def __init__(self, kwargs):\n self.name = kwargs.get('Name')\n self._type = kwargs.get('Type')\n self.ttl = kwargs.get('TTL')\n self.records = kwargs.get('ResourceRecords', [])\n self.set_identifier = kwargs.get('SetIdentifier')\n self.weight = kwargs.get('Weight')\n self.region = kwargs.get('Region')\n self.health_check = kwargs.get('HealthCheckId')\n self.hosted_zone_name = kwargs.get('HostedZoneName')\n self.hosted_zone_id = kwargs.get('HostedZoneId')\n\n @classmethod\n def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):\n properties = cloudformation_json['Properties']\n\n zone_name = properties.get(\"HostedZoneName\")\n if zone_name:\n hosted_zone = route53_backend.get_hosted_zone_by_name(zone_name)\n else:\n hosted_zone = route53_backend.get_hosted_zone(\n properties[\"HostedZoneId\"])\n record_set = hosted_zone.add_rrset(properties)\n return record_set\n\n @classmethod\n def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name):\n cls.delete_from_cloudformation_json(\n original_resource.name, cloudformation_json, region_name)\n return cls.create_from_cloudformation_json(new_resource_name, cloudformation_json, region_name)\n\n @classmethod\n def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):\n # this will break if you changed the zone the record is in,\n # unfortunately\n properties = cloudformation_json['Properties']\n\n zone_name = properties.get(\"HostedZoneName\")\n if zone_name:\n hosted_zone = route53_backend.get_hosted_zone_by_name(zone_name)\n else:\n hosted_zone = route53_backend.get_hosted_zone(\n properties[\"HostedZoneId\"])\n\n try:\n hosted_zone.delete_rrset_by_name(resource_name)\n except KeyError:\n pass\n\n @property\n def physical_resource_id(self):\n return self.name\n\n def to_xml(self):\n template = Template(\"\"\"<ResourceRecordSet>\n <Name>{{ record_set.name }}</Name>\n <Type>{{ record_set._type }}</Type>\n {% if record_set.set_identifier %}\n <SetIdentifier>{{ record_set.set_identifier }}</SetIdentifier>\n {% endif %}\n {% if record_set.weight %}\n <Weight>{{ record_set.weight }}</Weight>\n {% endif %}\n {% if record_set.region %}\n <Region>{{ record_set.region }}</Region>\n {% endif %}\n <TTL>{{ record_set.ttl }}</TTL>\n <ResourceRecords>\n {% for record in record_set.records %}\n <ResourceRecord>\n <Value>{{ record }}</Value>\n </ResourceRecord>\n {% endfor %}\n </ResourceRecords>\n {% if record_set.health_check %}\n <HealthCheckId>{{ record_set.health_check }}</HealthCheckId>\n {% endif %}\n </ResourceRecordSet>\"\"\")\n return template.render(record_set=self)\n\n def delete(self, *args, **kwargs):\n ''' Not exposed as part of the Route 53 API - used for CloudFormation. args are ignored '''\n hosted_zone = route53_backend.get_hosted_zone_by_name(\n self.hosted_zone_name)\n if not hosted_zone:\n hosted_zone = route53_backend.get_hosted_zone(self.hosted_zone_id)\n hosted_zone.delete_rrset_by_name(self.name)\n\n\nclass FakeZone(BaseModel):\n\n def __init__(self, name, id_, private_zone, comment=None):\n self.name = name\n self.id = id_\n if comment is not None:\n self.comment = comment\n self.private_zone = private_zone\n self.rrsets = []\n\n def add_rrset(self, record_set):\n record_set = RecordSet(record_set)\n self.rrsets.append(record_set)\n return record_set\n\n def upsert_rrset(self, record_set):\n new_rrset = RecordSet(record_set)\n for i, rrset in enumerate(self.rrsets):\n if rrset.name == new_rrset.name:\n self.rrsets[i] = new_rrset\n break\n else:\n self.rrsets.append(new_rrset)\n return new_rrset\n\n def delete_rrset_by_name(self, name):\n self.rrsets = [\n record_set for record_set in self.rrsets if record_set.name != name]\n\n def delete_rrset_by_id(self, set_identifier):\n self.rrsets = [\n record_set for record_set in self.rrsets if record_set.set_identifier != set_identifier]\n\n def get_record_sets(self, type_filter, name_filter):\n record_sets = list(self.rrsets) # Copy the list\n if type_filter:\n record_sets = [\n record_set for record_set in record_sets if record_set._type == type_filter]\n if name_filter:\n record_sets = [\n record_set for record_set in record_sets if record_set.name == name_filter]\n\n return record_sets\n\n @property\n def physical_resource_id(self):\n return self.name\n\n @classmethod\n def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):\n properties = cloudformation_json['Properties']\n name = properties[\"Name\"]\n\n hosted_zone = route53_backend.create_hosted_zone(\n name, private_zone=False)\n return hosted_zone\n\n\nclass RecordSetGroup(BaseModel):\n\n def __init__(self, hosted_zone_id, record_sets):\n self.hosted_zone_id = hosted_zone_id\n self.record_sets = record_sets\n\n @property\n def physical_resource_id(self):\n return \"arn:aws:route53:::hostedzone/{0}\".format(self.hosted_zone_id)\n\n @classmethod\n def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):\n properties = cloudformation_json['Properties']\n\n zone_name = properties.get(\"HostedZoneName\")\n if zone_name:\n hosted_zone = route53_backend.get_hosted_zone_by_name(zone_name)\n else:\n hosted_zone = route53_backend.get_hosted_zone(properties[\"HostedZoneId\"])\n record_sets = properties[\"RecordSets\"]\n for record_set in record_sets:\n hosted_zone.add_rrset(record_set)\n\n record_set_group = RecordSetGroup(hosted_zone.id, record_sets)\n return record_set_group\n\n\nclass Route53Backend(BaseBackend):\n\n def __init__(self):\n self.zones = {}\n self.health_checks = {}\n self.resource_tags = defaultdict(dict)\n\n def create_hosted_zone(self, name, private_zone, comment=None):\n new_id = create_route53_zone_id()\n new_zone = FakeZone(\n name, new_id, private_zone=private_zone, comment=comment)\n self.zones[new_id] = new_zone\n return new_zone\n\n def change_tags_for_resource(self, resource_id, tags):\n if 'Tag' in tags:\n if isinstance(tags['Tag'], list):\n for tag in tags['Tag']:\n self.resource_tags[resource_id][tag['Key']] = tag['Value']\n else:\n key, value = (tags['Tag']['Key'], tags['Tag']['Value'])\n self.resource_tags[resource_id][key] = value\n else:\n if 'Key' in tags:\n if isinstance(tags['Key'], list):\n for key in tags['Key']:\n del(self.resource_tags[resource_id][key])\n else:\n del(self.resource_tags[resource_id][tags['Key']])\n\n def list_tags_for_resource(self, resource_id):\n if resource_id in self.resource_tags:\n return self.resource_tags[resource_id]\n\n def get_all_hosted_zones(self):\n return self.zones.values()\n\n def get_hosted_zone(self, id_):\n return self.zones.get(id_.replace(\"/hostedzone/\", \"\"))\n\n def get_hosted_zone_by_name(self, name):\n for zone in self.get_all_hosted_zones():\n if zone.name == name:\n return zone\n\n def delete_hosted_zone(self, id_):\n return self.zones.pop(id_.replace(\"/hostedzone/\", \"\"), None)\n\n def create_health_check(self, health_check_args):\n health_check_id = str(uuid.uuid4())\n health_check = HealthCheck(health_check_id, health_check_args)\n self.health_checks[health_check_id] = health_check\n return health_check\n\n def get_health_checks(self):\n return self.health_checks.values()\n\n def delete_health_check(self, health_check_id):\n return self.health_checks.pop(health_check_id, None)\n\n\nroute53_backend = Route53Backend()\n", "path": "moto/route53/models.py" } ]
[ { "content": "from __future__ import unicode_literals\n\nfrom collections import defaultdict\n\nimport string\nimport random\nimport uuid\nfrom jinja2 import Template\n\nfrom moto.core import BaseBackend, BaseModel\n\n\nROUTE53_ID_CHOICE = string.ascii_uppercase + string.digits\n\n\ndef create_route53_zone_id():\n # New ID's look like this Z1RWWTK7Y8UDDQ\n return ''.join([random.choice(ROUTE53_ID_CHOICE) for _ in range(0, 15)])\n\n\nclass HealthCheck(BaseModel):\n\n def __init__(self, health_check_id, health_check_args):\n self.id = health_check_id\n self.ip_address = health_check_args.get(\"ip_address\")\n self.port = health_check_args.get(\"port\", 80)\n self._type = health_check_args.get(\"type\")\n self.resource_path = health_check_args.get(\"resource_path\")\n self.fqdn = health_check_args.get(\"fqdn\")\n self.search_string = health_check_args.get(\"search_string\")\n self.request_interval = health_check_args.get(\"request_interval\", 30)\n self.failure_threshold = health_check_args.get(\"failure_threshold\", 3)\n\n @property\n def physical_resource_id(self):\n return self.id\n\n @classmethod\n def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):\n properties = cloudformation_json['Properties']['HealthCheckConfig']\n health_check_args = {\n \"ip_address\": properties.get('IPAddress'),\n \"port\": properties.get('Port'),\n \"type\": properties['Type'],\n \"resource_path\": properties.get('ResourcePath'),\n \"fqdn\": properties.get('FullyQualifiedDomainName'),\n \"search_string\": properties.get('SearchString'),\n \"request_interval\": properties.get('RequestInterval'),\n \"failure_threshold\": properties.get('FailureThreshold'),\n }\n health_check = route53_backend.create_health_check(health_check_args)\n return health_check\n\n def to_xml(self):\n template = Template(\"\"\"<HealthCheck>\n <Id>{{ health_check.id }}</Id>\n <CallerReference>example.com 192.0.2.17</CallerReference>\n <HealthCheckConfig>\n <IPAddress>{{ health_check.ip_address }}</IPAddress>\n <Port>{{ health_check.port }}</Port>\n <Type>{{ health_check._type }}</Type>\n <ResourcePath>{{ health_check.resource_path }}</ResourcePath>\n <FullyQualifiedDomainName>{{ health_check.fqdn }}</FullyQualifiedDomainName>\n <RequestInterval>{{ health_check.request_interval }}</RequestInterval>\n <FailureThreshold>{{ health_check.failure_threshold }}</FailureThreshold>\n {% if health_check.search_string %}\n <SearchString>{{ health_check.search_string }}</SearchString>\n {% endif %}\n </HealthCheckConfig>\n <HealthCheckVersion>1</HealthCheckVersion>\n </HealthCheck>\"\"\")\n return template.render(health_check=self)\n\n\nclass RecordSet(BaseModel):\n\n def __init__(self, kwargs):\n self.name = kwargs.get('Name')\n self._type = kwargs.get('Type')\n self.ttl = kwargs.get('TTL')\n self.records = kwargs.get('ResourceRecords', [])\n self.set_identifier = kwargs.get('SetIdentifier')\n self.weight = kwargs.get('Weight')\n self.region = kwargs.get('Region')\n self.health_check = kwargs.get('HealthCheckId')\n self.hosted_zone_name = kwargs.get('HostedZoneName')\n self.hosted_zone_id = kwargs.get('HostedZoneId')\n\n @classmethod\n def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):\n properties = cloudformation_json['Properties']\n\n zone_name = properties.get(\"HostedZoneName\")\n if zone_name:\n hosted_zone = route53_backend.get_hosted_zone_by_name(zone_name)\n else:\n hosted_zone = route53_backend.get_hosted_zone(\n properties[\"HostedZoneId\"])\n record_set = hosted_zone.add_rrset(properties)\n return record_set\n\n @classmethod\n def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name):\n cls.delete_from_cloudformation_json(\n original_resource.name, cloudformation_json, region_name)\n return cls.create_from_cloudformation_json(new_resource_name, cloudformation_json, region_name)\n\n @classmethod\n def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):\n # this will break if you changed the zone the record is in,\n # unfortunately\n properties = cloudformation_json['Properties']\n\n zone_name = properties.get(\"HostedZoneName\")\n if zone_name:\n hosted_zone = route53_backend.get_hosted_zone_by_name(zone_name)\n else:\n hosted_zone = route53_backend.get_hosted_zone(\n properties[\"HostedZoneId\"])\n\n try:\n hosted_zone.delete_rrset_by_name(resource_name)\n except KeyError:\n pass\n\n @property\n def physical_resource_id(self):\n return self.name\n\n def to_xml(self):\n template = Template(\"\"\"<ResourceRecordSet>\n <Name>{{ record_set.name }}</Name>\n <Type>{{ record_set._type }}</Type>\n {% if record_set.set_identifier %}\n <SetIdentifier>{{ record_set.set_identifier }}</SetIdentifier>\n {% endif %}\n {% if record_set.weight %}\n <Weight>{{ record_set.weight }}</Weight>\n {% endif %}\n {% if record_set.region %}\n <Region>{{ record_set.region }}</Region>\n {% endif %}\n <TTL>{{ record_set.ttl }}</TTL>\n <ResourceRecords>\n {% for record in record_set.records %}\n <ResourceRecord>\n <Value>{{ record }}</Value>\n </ResourceRecord>\n {% endfor %}\n </ResourceRecords>\n {% if record_set.health_check %}\n <HealthCheckId>{{ record_set.health_check }}</HealthCheckId>\n {% endif %}\n </ResourceRecordSet>\"\"\")\n return template.render(record_set=self)\n\n def delete(self, *args, **kwargs):\n ''' Not exposed as part of the Route 53 API - used for CloudFormation. args are ignored '''\n hosted_zone = route53_backend.get_hosted_zone_by_name(\n self.hosted_zone_name)\n if not hosted_zone:\n hosted_zone = route53_backend.get_hosted_zone(self.hosted_zone_id)\n hosted_zone.delete_rrset_by_name(self.name)\n\n\nclass FakeZone(BaseModel):\n\n def __init__(self, name, id_, private_zone, comment=None):\n self.name = name\n self.id = id_\n if comment is not None:\n self.comment = comment\n self.private_zone = private_zone\n self.rrsets = []\n\n def add_rrset(self, record_set):\n record_set = RecordSet(record_set)\n self.rrsets.append(record_set)\n return record_set\n\n def upsert_rrset(self, record_set):\n new_rrset = RecordSet(record_set)\n for i, rrset in enumerate(self.rrsets):\n if rrset.name == new_rrset.name:\n self.rrsets[i] = new_rrset\n break\n else:\n self.rrsets.append(new_rrset)\n return new_rrset\n\n def delete_rrset_by_name(self, name):\n self.rrsets = [\n record_set for record_set in self.rrsets if record_set.name != name]\n\n def delete_rrset_by_id(self, set_identifier):\n self.rrsets = [\n record_set for record_set in self.rrsets if record_set.set_identifier != set_identifier]\n\n def get_record_sets(self, type_filter, name_filter):\n record_sets = list(self.rrsets) # Copy the list\n if type_filter:\n record_sets = [\n record_set for record_set in record_sets if record_set._type == type_filter]\n if name_filter:\n record_sets = [\n record_set for record_set in record_sets if record_set.name == name_filter]\n\n return record_sets\n\n @property\n def physical_resource_id(self):\n return self.id\n\n @classmethod\n def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):\n properties = cloudformation_json['Properties']\n name = properties[\"Name\"]\n\n hosted_zone = route53_backend.create_hosted_zone(\n name, private_zone=False)\n return hosted_zone\n\n\nclass RecordSetGroup(BaseModel):\n\n def __init__(self, hosted_zone_id, record_sets):\n self.hosted_zone_id = hosted_zone_id\n self.record_sets = record_sets\n\n @property\n def physical_resource_id(self):\n return \"arn:aws:route53:::hostedzone/{0}\".format(self.hosted_zone_id)\n\n @classmethod\n def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):\n properties = cloudformation_json['Properties']\n\n zone_name = properties.get(\"HostedZoneName\")\n if zone_name:\n hosted_zone = route53_backend.get_hosted_zone_by_name(zone_name)\n else:\n hosted_zone = route53_backend.get_hosted_zone(properties[\"HostedZoneId\"])\n record_sets = properties[\"RecordSets\"]\n for record_set in record_sets:\n hosted_zone.add_rrset(record_set)\n\n record_set_group = RecordSetGroup(hosted_zone.id, record_sets)\n return record_set_group\n\n\nclass Route53Backend(BaseBackend):\n\n def __init__(self):\n self.zones = {}\n self.health_checks = {}\n self.resource_tags = defaultdict(dict)\n\n def create_hosted_zone(self, name, private_zone, comment=None):\n new_id = create_route53_zone_id()\n new_zone = FakeZone(\n name, new_id, private_zone=private_zone, comment=comment)\n self.zones[new_id] = new_zone\n return new_zone\n\n def change_tags_for_resource(self, resource_id, tags):\n if 'Tag' in tags:\n if isinstance(tags['Tag'], list):\n for tag in tags['Tag']:\n self.resource_tags[resource_id][tag['Key']] = tag['Value']\n else:\n key, value = (tags['Tag']['Key'], tags['Tag']['Value'])\n self.resource_tags[resource_id][key] = value\n else:\n if 'Key' in tags:\n if isinstance(tags['Key'], list):\n for key in tags['Key']:\n del(self.resource_tags[resource_id][key])\n else:\n del(self.resource_tags[resource_id][tags['Key']])\n\n def list_tags_for_resource(self, resource_id):\n if resource_id in self.resource_tags:\n return self.resource_tags[resource_id]\n\n def get_all_hosted_zones(self):\n return self.zones.values()\n\n def get_hosted_zone(self, id_):\n return self.zones.get(id_.replace(\"/hostedzone/\", \"\"))\n\n def get_hosted_zone_by_name(self, name):\n for zone in self.get_all_hosted_zones():\n if zone.name == name:\n return zone\n\n def delete_hosted_zone(self, id_):\n return self.zones.pop(id_.replace(\"/hostedzone/\", \"\"), None)\n\n def create_health_check(self, health_check_args):\n health_check_id = str(uuid.uuid4())\n health_check = HealthCheck(health_check_id, health_check_args)\n self.health_checks[health_check_id] = health_check\n return health_check\n\n def get_health_checks(self):\n return self.health_checks.values()\n\n def delete_health_check(self, health_check_id):\n return self.health_checks.pop(health_check_id, None)\n\n\nroute53_backend = Route53Backend()\n", "path": "moto/route53/models.py" } ]
diff --git a/moto/route53/models.py b/moto/route53/models.py index d12f4ee7a803..f0e52086d508 100644 --- a/moto/route53/models.py +++ b/moto/route53/models.py @@ -209,7 +209,7 @@ def get_record_sets(self, type_filter, name_filter): @property def physical_resource_id(self): - return self.name + return self.id @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): diff --git a/tests/test_cloudformation/fixtures/route53_ec2_instance_with_public_ip.py b/tests/test_cloudformation/fixtures/route53_ec2_instance_with_public_ip.py index 5e66bbd86959..43a11104b3a5 100644 --- a/tests/test_cloudformation/fixtures/route53_ec2_instance_with_public_ip.py +++ b/tests/test_cloudformation/fixtures/route53_ec2_instance_with_public_ip.py @@ -1,6 +1,13 @@ from __future__ import unicode_literals template = { + "Parameters": { + "R53ZoneName": { + "Type": "String", + "Default": "my_zone" + } + }, + "Resources": { "Ec2Instance": { "Type": "AWS::EC2::Instance", @@ -13,20 +20,20 @@ "HostedZone": { "Type": "AWS::Route53::HostedZone", "Properties": { - "Name": "my_zone" + "Name": {"Ref": "R53ZoneName"} } }, "myDNSRecord": { "Type": "AWS::Route53::RecordSet", "Properties": { - "HostedZoneName": {"Ref": "HostedZone"}, + "HostedZoneId": {"Ref": "HostedZone"}, "Comment": "DNS name for my instance.", "Name": { "Fn::Join": ["", [ {"Ref": "Ec2Instance"}, ".", {"Ref": "AWS::Region"}, ".", - {"Ref": "HostedZone"}, "." + {"Ref": "R53ZoneName"}, "." ]] }, "Type": "A", diff --git a/tests/test_cloudformation/fixtures/route53_health_check.py b/tests/test_cloudformation/fixtures/route53_health_check.py index f6a2c9b8e36c..420cd38ba84c 100644 --- a/tests/test_cloudformation/fixtures/route53_health_check.py +++ b/tests/test_cloudformation/fixtures/route53_health_check.py @@ -26,7 +26,7 @@ "myDNSRecord": { "Type": "AWS::Route53::RecordSet", "Properties": { - "HostedZoneName": {"Ref": "HostedZone"}, + "HostedZoneId": {"Ref": "HostedZone"}, "Comment": "DNS name for my instance.", "Name": "my_record_set", "Type": "A", diff --git a/tests/test_cloudformation/fixtures/route53_roundrobin.py b/tests/test_cloudformation/fixtures/route53_roundrobin.py index da4fecd4d17b..199e3e0886ad 100644 --- a/tests/test_cloudformation/fixtures/route53_roundrobin.py +++ b/tests/test_cloudformation/fixtures/route53_roundrobin.py @@ -5,30 +5,37 @@ "Description": "AWS CloudFormation Sample Template Route53_RoundRobin: Sample template showing how to use weighted round robin (WRR) DNS entried via Amazon Route 53. This contrived sample uses weighted CNAME records to illustrate that the weighting influences the return records. It assumes that you already have a Hosted Zone registered with Amazon Route 53. **WARNING** This template creates one or more AWS resources. You will be billed for the AWS resources used if you create a stack from this template.", + "Parameters": { + "R53ZoneName": { + "Type": "String", + "Default": "my_zone" + } + }, + "Resources": { "MyZone": { "Type": "AWS::Route53::HostedZone", "Properties": { - "Name": "my_zone" + "Name": {"Ref": "R53ZoneName"} } }, "MyDNSRecord": { "Type": "AWS::Route53::RecordSetGroup", "Properties": { - "HostedZoneName": {"Ref": "MyZone"}, + "HostedZoneId": {"Ref": "MyZone"}, "Comment": "Contrived example to redirect to aws.amazon.com 75% of the time and www.amazon.com 25% of the time.", "RecordSets": [{ "SetIdentifier": {"Fn::Join": [" ", [{"Ref": "AWS::StackName"}, "AWS"]]}, - "Name": {"Fn::Join": ["", [{"Ref": "AWS::StackName"}, ".", {"Ref": "AWS::Region"}, ".", {"Ref": "MyZone"}, "."]]}, + "Name": {"Fn::Join": ["", [{"Ref": "AWS::StackName"}, ".", {"Ref": "AWS::Region"}, ".", {"Ref": "R53ZoneName"}, "."]]}, "Type": "CNAME", "TTL": "900", "ResourceRecords": ["aws.amazon.com"], "Weight": "3" }, { "SetIdentifier": {"Fn::Join": [" ", [{"Ref": "AWS::StackName"}, "Amazon"]]}, - "Name": {"Fn::Join": ["", [{"Ref": "AWS::StackName"}, ".", {"Ref": "AWS::Region"}, ".", {"Ref": "MyZone"}, "."]]}, + "Name": {"Fn::Join": ["", [{"Ref": "AWS::StackName"}, ".", {"Ref": "AWS::Region"}, ".", {"Ref": "R53ZoneName"}, "."]]}, "Type": "CNAME", "TTL": "900", "ResourceRecords": ["www.amazon.com"],
googleapis__google-api-python-client-1125
Incorrect logging level for `oauth2client.contrib.locked_file` (ref #427) This is - as per request in the original #427 - a follow-up issue. The function documentation says: > Detects an appropriate cache module and returns it. Returns `googleapiclient.discovery_cache.base.Cache`, a cache object which is auto detected, or `None` if no cache object is available. Exceptions in this context only provide a pythonic way to do control flow, and do not indicate anomalies or malfunctioning of the code. `None` is a perfectly fine, as-per-specifications behaviour. The correct level should probably be `INFO`. Also, even if you would stand by the `WARNING` level, you should tidy up the messy log message. This is what the user is met with: ```WARNING:googleapiclient.discovery_cache:file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth Traceback (most recent call last): File "/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/__init__.py", line 36, in autodetect from google.appengine.api import memcache ModuleNotFoundError: No module named 'google.appengine' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/file_cache.py", line 33, in <module> from oauth2client.contrib.locked_file import LockedFile ModuleNotFoundError: No module named 'oauth2client.contrib.locked_file' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/file_cache.py", line 37, in <module> from oauth2client.locked_file import LockedFile ModuleNotFoundError: No module named 'oauth2client.locked_file' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/__init__.py", line 42, in autodetect from . import file_cache File "/home/mac/dev/flashfaces/flashfaces/lib/python3.8/site-packages/googleapiclient/discovery_cache/file_cache.py", line 40, in <module> raise ImportError( ImportError: file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth ``` A saner way to to communicate with the user could potentially be: `WARNING - No caching available (file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth)`
[ { "content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Caching utility for the discovery document.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport logging\nimport datetime\nimport os\n\nLOGGER = logging.getLogger(__name__)\n\nDISCOVERY_DOC_MAX_AGE = 60 * 60 * 24 # 1 day\n\n\ndef autodetect():\n \"\"\"Detects an appropriate cache module and returns it.\n\n Returns:\n googleapiclient.discovery_cache.base.Cache, a cache object which\n is auto detected, or None if no cache object is available.\n \"\"\"\n if 'APPENGINE_RUNTIME' in os.environ:\n try:\n from google.appengine.api import memcache\n from . import appengine_memcache\n\n return appengine_memcache.cache\n except Exception:\n pass\n try:\n from . import file_cache\n\n return file_cache.cache\n except Exception as e:\n LOGGER.warning(e, exc_info=True)\n return None\n", "path": "googleapiclient/discovery_cache/__init__.py" } ]
[ { "content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Caching utility for the discovery document.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport logging\nimport datetime\nimport os\n\nLOGGER = logging.getLogger(__name__)\n\nDISCOVERY_DOC_MAX_AGE = 60 * 60 * 24 # 1 day\n\n\ndef autodetect():\n \"\"\"Detects an appropriate cache module and returns it.\n\n Returns:\n googleapiclient.discovery_cache.base.Cache, a cache object which\n is auto detected, or None if no cache object is available.\n \"\"\"\n if 'APPENGINE_RUNTIME' in os.environ:\n try:\n from google.appengine.api import memcache\n from . import appengine_memcache\n\n return appengine_memcache.cache\n except Exception:\n pass\n try:\n from . import file_cache\n\n return file_cache.cache\n except Exception:\n LOGGER.info(\"file_cache is only supported with oauth2client<4.0.0\",\n exc_info=False)\n return None\n", "path": "googleapiclient/discovery_cache/__init__.py" } ]
diff --git a/googleapiclient/discovery_cache/__init__.py b/googleapiclient/discovery_cache/__init__.py index 455ff6224f2..197f6bc0a1a 100644 --- a/googleapiclient/discovery_cache/__init__.py +++ b/googleapiclient/discovery_cache/__init__.py @@ -44,6 +44,7 @@ def autodetect(): from . import file_cache return file_cache.cache - except Exception as e: - LOGGER.warning(e, exc_info=True) + except Exception: + LOGGER.info("file_cache is only supported with oauth2client<4.0.0", + exc_info=False) return None
bokeh__bokeh-4792
docstring of ``add_tools`` not correct.
[ { "content": "\"\"\" Models for representing top-level plot objects.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom six import string_types\nimport warnings\n\nfrom ..core.query import find\nfrom ..core import validation\nfrom ..core.validation.errors import REQUIRED_RANGE\nfrom ..core.validation.warnings import (\n MISSING_RENDERERS, NO_DATA_RENDERERS, MALFORMED_CATEGORY_LABEL,\n SNAPPED_TOOLBAR_ANNOTATIONS)\nfrom ..core.enums import Location\nfrom ..core.property_mixins import LineProps, FillProps\nfrom ..core.properties import (\n Bool, Int, String, Enum, Auto, Instance, Either,\n List, Dict, Include, Override, TitleProp)\nfrom ..util.string import nice_join\n\nfrom .annotations import Legend, Title\nfrom .axes import Axis\nfrom .glyphs import Glyph\nfrom .grids import Grid\nfrom .ranges import Range, FactorRange\nfrom .renderers import Renderer, GlyphRenderer, DataRenderer, TileRenderer, DynamicImageRenderer\nfrom .sources import DataSource, ColumnDataSource\nfrom .tools import Tool, ToolEvents, Toolbar\nfrom .layouts import LayoutDOM\n\nfrom ..util.plot_utils import _list_attr_splat, _select_helper\n\n# See all the way at the bottom of Plot for where this is used.\nDEP_MSG_0_12_0 = \"\"\"\n Plot property '%s' was deprecated in 0.12.0 and will be removed. Use '%s' instead.\n \"\"\"\n\n# We create an empty title by default\nDEFAULT_TITLE = lambda: Title(text=\"\")\n\n\nclass Plot(LayoutDOM):\n \"\"\" Model representing a plot, containing glyphs, guides, annotations.\n\n \"\"\"\n\n def __init__(self, **kwargs):\n if \"tool_events\" not in kwargs:\n kwargs[\"tool_events\"] = ToolEvents()\n\n if \"toolbar\" in kwargs and \"logo\" in kwargs:\n raise ValueError(\"Conflicing properties set on plot: toolbar, logo.\")\n\n if \"toolbar\" in kwargs and \"tools\" in kwargs:\n raise ValueError(\"Conflicing properties set on plot: toolbar, tools.\")\n\n if \"toolbar\" not in kwargs:\n tools = kwargs.pop('tools', [])\n logo = kwargs.pop('logo', 'normal')\n\n kwargs[\"toolbar\"] = Toolbar(tools=tools, logo=logo)\n\n if \"border_fill\" in kwargs and \"border_fill_color\" in kwargs:\n raise ValueError(\"Conflicting properties set on plot: border_fill, border_fill_color.\")\n\n if \"background_fill\" in kwargs and \"background_fill_color\" in kwargs:\n raise ValueError(\"Conflicting properties set on plot: background_fill, background_fill_color.\")\n\n super(LayoutDOM, self).__init__(**kwargs)\n\n def select(self, *args, **kwargs):\n ''' Query this object and all of its references for objects that\n match the given selector.\n\n There are a few different ways to call the ``select`` method.\n The most general is to supply a JSON-like query dictionary as the\n single argument or as keyword arguments:\n\n Args:\n selector (JSON-like) : some sample text\n\n Keyword Arguments:\n kwargs : query dict key/values as keyword arguments\n\n For convenience, queries on just names can be made by supplying\n the ``name`` string as the single parameter:\n\n Args:\n name (str) : the name to query on\n\n Also queries on just type can be made simply by supplying the\n ``Model`` subclass as the single parameter:\n\n Args:\n type (Model) : the type to query on\n\n Returns:\n seq[Model]\n\n Examples:\n\n .. code-block:: python\n\n # These two are equivalent\n p.select({\"type\": HoverTool})\n p.select(HoverTool)\n\n # These two are also equivalent\n p.select({\"name\": \"mycircle\"})\n p.select(\"mycircle\")\n\n # Keyword arguments can be supplied in place of selector dict\n p.select({\"name\": \"foo\", \"type\": HoverTool})\n p.select(name=\"foo\", type=HoverTool)\n\n '''\n\n selector = _select_helper(args, kwargs)\n\n # Want to pass selector that is a dictionary\n return _list_attr_splat(find(self.references(), selector, {'plot': self}))\n\n def row(self, row, gridplot):\n ''' Return whether this plot is in a given row of a GridPlot.\n\n Args:\n row (int) : index of the row to test\n gridplot (GridPlot) : the GridPlot to check\n\n Returns:\n bool\n\n '''\n return self in gridplot.row(row)\n\n def column(self, col, gridplot):\n ''' Return whether this plot is in a given column of a GridPlot.\n\n Args:\n col (int) : index of the column to test\n gridplot (GridPlot) : the GridPlot to check\n\n Returns:\n bool\n\n '''\n return self in gridplot.column(col)\n\n def _axis(self, *sides):\n objs = []\n for s in sides:\n objs.extend(getattr(self, s, []))\n axis = [obj for obj in objs if isinstance(obj, Axis)]\n return _list_attr_splat(axis)\n\n @property\n def xaxis(self):\n \"\"\" Splattable list of :class:`~bokeh.models.axes.Axis` objects for the x dimension.\n\n \"\"\"\n return self._axis(\"above\", \"below\")\n\n @property\n def yaxis(self):\n \"\"\" Splattable list of :class:`~bokeh.models.axes.Axis` objects for the y dimension.\n\n \"\"\"\n return self._axis(\"left\", \"right\")\n\n @property\n def axis(self):\n \"\"\" Splattable list of :class:`~bokeh.models.axes.Axis` objects.\n\n \"\"\"\n return _list_attr_splat(self.xaxis + self.yaxis)\n\n @property\n def legend(self):\n \"\"\"Splattable list of :class:`~bokeh.models.annotations.Legend` objects.\n\n \"\"\"\n legends = [obj for obj in self.renderers if isinstance(obj, Legend)]\n return _list_attr_splat(legends)\n\n def _grid(self, dimension):\n grid = [obj for obj in self.renderers if isinstance(obj, Grid) and obj.dimension==dimension]\n return _list_attr_splat(grid)\n\n @property\n def xgrid(self):\n \"\"\" Splattable list of :class:`~bokeh.models.grids.Grid` objects for the x dimension.\n\n \"\"\"\n return self._grid(0)\n\n @property\n def ygrid(self):\n \"\"\" Splattable list of :class:`~bokeh.models.grids.Grid` objects for the y dimension.\n\n \"\"\"\n return self._grid(1)\n\n @property\n def grid(self):\n \"\"\" Splattable list of :class:`~bokeh.models.grids.Grid` objects.\n\n \"\"\"\n return _list_attr_splat(self.xgrid + self.ygrid)\n\n @property\n def tools(self):\n return self.toolbar.tools\n\n @tools.setter\n def tools(self, tools):\n self.toolbar.tools = tools\n\n\n def add_layout(self, obj, place='center'):\n ''' Adds an object to the plot in a specified place.\n\n Args:\n obj (Renderer) : the object to add to the Plot\n place (str, optional) : where to add the object (default: 'center')\n Valid places are: 'left', 'right', 'above', 'below', 'center'.\n\n Returns:\n None\n\n '''\n valid_places = ['left', 'right', 'above', 'below', 'center']\n if place not in valid_places:\n raise ValueError(\n \"Invalid place '%s' specified. Valid place values are: %s\" % (place, nice_join(valid_places))\n )\n\n if hasattr(obj, 'plot'):\n if obj.plot is not None:\n raise ValueError(\"object to be added already has 'plot' attribute set\")\n obj.plot = self\n\n self.renderers.append(obj)\n\n if place is not 'center':\n getattr(self, place).append(obj)\n\n def add_tools(self, *tools):\n ''' Adds an tools to the plot.\n\n Args:\n *tools (Tool) : the tools to add to the Plot\n\n Returns:\n None\n\n '''\n if not all(isinstance(tool, Tool) for tool in tools):\n raise ValueError(\"All arguments to add_tool must be Tool subclasses.\")\n\n for tool in tools:\n if tool.plot is not None:\n raise ValueError(\"tool %s to be added already has 'plot' attribute set\" % tool)\n tool.plot = self\n if hasattr(tool, 'overlay'):\n self.renderers.append(tool.overlay)\n self.toolbar.tools.append(tool)\n\n def add_glyph(self, source_or_glyph, glyph=None, **kw):\n ''' Adds a glyph to the plot with associated data sources and ranges.\n\n This function will take care of creating and configuring a Glyph object,\n and then add it to the plot's list of renderers.\n\n Args:\n source (DataSource) : a data source for the glyphs to all use\n glyph (Glyph) : the glyph to add to the Plot\n\n\n Keyword Arguments:\n Any additional keyword arguments are passed on as-is to the\n Glyph initializer.\n\n Returns:\n Glyph\n\n '''\n if glyph is not None:\n source = source_or_glyph\n else:\n source, glyph = ColumnDataSource(), source_or_glyph\n\n if not isinstance(source, DataSource):\n raise ValueError(\"'source' argument to add_glyph() must be DataSource subclass\")\n\n if not isinstance(glyph, Glyph):\n raise ValueError(\"'glyph' argument to add_glyph() must be Glyph subclass\")\n\n g = GlyphRenderer(data_source=source, glyph=glyph, **kw)\n self.renderers.append(g)\n return g\n\n def add_tile(self, tile_source, **kw):\n '''Adds new TileRenderer into the Plot.renderers\n\n Args:\n tile_source (TileSource) : a tile source instance which contain tileset configuration\n\n Keyword Arguments:\n Additional keyword arguments are passed on as-is to the tile renderer\n\n Returns:\n TileRenderer : TileRenderer\n\n '''\n tile_renderer = TileRenderer(tile_source=tile_source, **kw)\n self.renderers.append(tile_renderer)\n return tile_renderer\n\n def add_dynamic_image(self, image_source, **kw):\n '''Adds new DynamicImageRenderer into the Plot.renderers\n\n Args:\n image_source (ImageSource) : a image source instance which contain image configuration\n\n Keyword Arguments:\n Additional keyword arguments are passed on as-is to the dynamic image renderer\n\n Returns:\n DynamicImageRenderer : DynamicImageRenderer\n\n '''\n image_renderer = DynamicImageRenderer(image_source=image_source, **kw)\n self.renderers.append(image_renderer)\n return image_renderer\n\n @validation.error(REQUIRED_RANGE)\n def _check_required_range(self):\n missing = []\n if not self.x_range: missing.append('x_range')\n if not self.y_range: missing.append('y_range')\n if missing:\n return \", \".join(missing) + \" [%s]\" % self\n\n @validation.warning(MISSING_RENDERERS)\n def _check_missing_renderers(self):\n if len(self.renderers) == 0:\n return str(self)\n\n @validation.warning(NO_DATA_RENDERERS)\n def _check_no_data_renderers(self):\n if len(self.select(DataRenderer)) == 0:\n return str(self)\n\n @validation.warning(MALFORMED_CATEGORY_LABEL)\n def _check_colon_in_category_label(self):\n if not self.x_range: return\n if not self.y_range: return\n\n broken = []\n\n for range_name in ['x_range', 'y_range']:\n category_range = getattr(self, range_name)\n if not isinstance(category_range, FactorRange): continue\n\n for value in category_range.factors:\n if not isinstance(value, string_types): break\n if ':' in value:\n broken.append((range_name, value))\n break\n\n if broken:\n field_msg = ' '.join('[range:%s] [first_value: %s]' % (field, value)\n for field, value in broken)\n return '%s [renderer: %s]' % (field_msg, self)\n\n @validation.warning(SNAPPED_TOOLBAR_ANNOTATIONS)\n def _check_snapped_toolbar_and_axis(self):\n if not self.toolbar_sticky: return\n if self.toolbar_location is None: return\n\n objs = getattr(self, self.toolbar_location)\n if len(objs) > 0:\n return str(self)\n\n __deprecated_attributes__ = (\n 'background_fill', 'border_fill', 'logo', 'tools', 'responsive',\n 'title_text_baseline', 'title_text_align', 'title_text_alpha', 'title_text_color',\n 'title_text_font_style', 'title_text_font_size', 'title_text_font', 'title_standoff'\n )\n\n x_range = Instance(Range, help=\"\"\"\n The (default) data range of the horizontal dimension of the plot.\n \"\"\")\n\n y_range = Instance(Range, help=\"\"\"\n The (default) data range of the vertical dimension of the plot.\n \"\"\")\n\n x_mapper_type = Either(Auto, String, help=\"\"\"\n What kind of mapper to use to convert x-coordinates in data space\n into x-coordinates in screen space.\n\n Typically this can be determined automatically, but this property\n can be useful to, e.g., show datetime values as floating point\n \"seconds since epoch\" instead of formatted dates.\n \"\"\")\n\n y_mapper_type = Either(Auto, String, help=\"\"\"\n What kind of mapper to use to convert y-coordinates in data space\n into y-coordinates in screen space.\n\n Typically this can be determined automatically, but this property\n can be useful to, e.g., show datetime values as floating point\n \"seconds since epoch\" instead of formatted dates\n \"\"\")\n\n extra_x_ranges = Dict(String, Instance(Range), help=\"\"\"\n Additional named ranges to make available for mapping x-coordinates.\n\n This is useful for adding additional axes.\n \"\"\")\n\n extra_y_ranges = Dict(String, Instance(Range), help=\"\"\"\n Additional named ranges to make available for mapping y-coordinates.\n\n This is useful for adding additional axes.\n \"\"\")\n\n hidpi = Bool(default=True, help=\"\"\"\n Whether to use HiDPI mode when available.\n \"\"\")\n\n title = TitleProp(default=DEFAULT_TITLE, help=\"\"\"\n A title for the plot. Can be a text string or a Title annotation. Default is Title(text=\"\").\n \"\"\")\n\n title_location = Enum(Location, default=\"above\", help=\"\"\"\n Where the title will be located. Titles on the left or right side\n will be rotated.\n \"\"\")\n\n outline_props = Include(LineProps, help=\"\"\"\n The %s for the plot border outline.\n \"\"\")\n\n outline_line_color = Override(default=\"#e5e5e5\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n A list of all renderers for this plot, including guides and annotations\n in addition to glyphs and markers.\n\n This property can be manipulated by hand, but the ``add_glyph`` and\n ``add_layout`` methods are recommended to help make sure all necessary\n setup is performed.\n \"\"\")\n\n toolbar = Instance(Toolbar, help=\"\"\"\n The toolbar associated with this plot which holds all the tools.\n\n The toolbar is automatically created with the plot.\n \"\"\")\n\n toolbar_location = Enum(Location, default=\"right\", help=\"\"\"\n Where the toolbar will be located. If set to None, no toolbar\n will be attached to the plot.\n \"\"\")\n\n toolbar_sticky = Bool(default=True, help=\"\"\"\n Stick the toolbar to the edge of the plot. Default: True. If False,\n the toolbar will be outside of the axes, titles etc.\n \"\"\")\n\n tool_events = Instance(ToolEvents, help=\"\"\"\n A ToolEvents object to share and report tool events.\n \"\"\")\n\n left = List(Instance(Renderer), help=\"\"\"\n A list of renderers to occupy the area to the left of the plot.\n \"\"\")\n\n right = List(Instance(Renderer), help=\"\"\"\n A list of renderers to occupy the area to the right of the plot.\n \"\"\")\n\n above = List(Instance(Renderer), help=\"\"\"\n A list of renderers to occupy the area above of the plot.\n \"\"\")\n\n below = List(Instance(Renderer), help=\"\"\"\n A list of renderers to occupy the area below of the plot.\n \"\"\")\n\n plot_height = Int(600, help=\"\"\"\n Total height of the entire plot (including any axes, titles,\n border padding, etc.)\n\n .. note::\n This corresponds directly to the height of the HTML\n canvas that will be used.\n\n \"\"\")\n\n plot_width = Int(600, help=\"\"\"\n Total width of the entire plot (including any axes, titles,\n border padding, etc.)\n\n .. note::\n This corresponds directly to the width of the HTML\n canvas that will be used.\n\n \"\"\")\n\n background_props = Include(FillProps, help=\"\"\"\n The %s for the plot background style.\n \"\"\")\n\n background_fill_color = Override(default='#ffffff')\n\n border_props = Include(FillProps, help=\"\"\"\n The %s for the plot border style.\n \"\"\")\n\n border_fill_color = Override(default='#ffffff')\n\n min_border_top = Int(help=\"\"\"\n Minimum size in pixels of the padding region above the top of the\n central plot region.\n\n .. note::\n This is a *minimum*. The padding region may expand as needed to\n accommodate titles or axes, etc.\n\n \"\"\")\n\n min_border_bottom = Int(help=\"\"\"\n Minimum size in pixels of the padding region below the bottom of\n the central plot region.\n\n .. note::\n This is a *minimum*. The padding region may expand as needed to\n accommodate titles or axes, etc.\n\n \"\"\")\n\n min_border_left = Int(help=\"\"\"\n Minimum size in pixels of the padding region to the left of\n the central plot region.\n\n .. note::\n This is a *minimum*. The padding region may expand as needed to\n accommodate titles or axes, etc.\n\n \"\"\")\n\n min_border_right = Int(help=\"\"\"\n Minimum size in pixels of the padding region to the right of\n the central plot region.\n\n .. note::\n This is a *minimum*. The padding region may expand as needed to\n accommodate titles or axes, etc.\n\n \"\"\")\n\n min_border = Int(5, help=\"\"\"\n A convenience property to set all all the ``min_border_X`` properties\n to the same value. If an individual border property is explicitly set,\n it will override ``min_border``.\n \"\"\")\n\n h_symmetry = Bool(True, help=\"\"\"\n Whether the total horizontal padding on both sides of the plot will\n be made equal (the left or right padding amount, whichever is larger).\n \"\"\")\n\n v_symmetry = Bool(False, help=\"\"\"\n Whether the total vertical padding on both sides of the plot will\n be made equal (the top or bottom padding amount, whichever is larger).\n \"\"\")\n\n lod_factor = Int(10, help=\"\"\"\n Decimation factor to use when applying level-of-detail decimation.\n \"\"\")\n\n lod_threshold = Int(2000, help=\"\"\"\n A number of data points, above which level-of-detail downsampling may\n be performed by glyph renderers. Set to ``None`` to disable any\n level-of-detail downsampling.\n \"\"\")\n\n lod_interval = Int(300, help=\"\"\"\n Interval (in ms) during which an interactive tool event will enable\n level-of-detail downsampling.\n \"\"\")\n\n lod_timeout = Int(500, help=\"\"\"\n Timeout (in ms) for checking whether interactive tool events are still\n occurring. Once level-of-detail mode is enabled, a check is made every\n ``lod_timeout`` ms. If no interactive tool events have happened,\n level-of-detail mode is disabled.\n \"\"\")\n\n webgl = Bool(False, help=\"\"\"\n Whether WebGL is enabled for this plot. If True, the glyphs that\n support this will render via WebGL instead of the 2D canvas.\n \"\"\")\n\n #\n # DEPRECATED PROPERTIES\n #\n\n @property\n def responsive(self):\n warnings.warn(DEP_MSG_0_12_0 % ('responsive', 'Plot.sizing_mode'))\n return self.sizing_mode != \"fixed\"\n\n @responsive.setter\n def responsive(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('responsive', 'Plot.sizing_mode'))\n warnings.warn(\"\"\"\n The 'responsive' property has been deprecated in 0.12.0. It has been\n replaced by 'sizing_mode' which accepts one of five modes:\n\n fixed, scale_width, scale_height, scale_both, stretch_both\n\n 'responsive = False' is the equivalent of 'sizing_mode = \"fixed\"'\n\n 'responsive = True' is the equivalent of 'sizing_mode = \"scale_width\"'\n \"\"\")\n if value is True:\n self.sizing_mode = \"scale_width\"\n elif value is False:\n self.sizing_mode = \"fixed\"\n else:\n raise ValueError(\"Plot.responsive only accepts True or False, got: %r\" % value)\n\n @property\n def background_fill(self):\n warnings.warn(\n \"\"\"\n Plot property 'background_fill' was deprecated in Bokeh\n 0.11.0 and will be removed. Use 'background_fill_color' instead.\n \"\"\")\n return self.background_fill_color\n\n @background_fill.setter\n def background_fill(self, color):\n warnings.warn(\n \"\"\"\n Plot property 'background_fill' was deprecated in Bokeh\n 0.11.0 and will be removed. Use 'background_fill_color' instead.\n \"\"\")\n self.background_fill_color = color\n\n @property\n def border_fill(self):\n warnings.warn(\n \"\"\"\n Plot property 'border_fill' was deprecated in Bokeh 0.11.0 and\n will be removed. Use 'border_fill_color' instead.\n \"\"\")\n return self.border_fill_color\n\n @border_fill.setter\n def border_fill(self, color):\n warnings.warn(\n \"\"\"\n Plot property 'border_fill' was deprecated in Bokeh 0.11.0 and\n will be removed. Use 'border_fill_color' instead.\n \"\"\")\n self.border_fill_color = color\n\n @property\n def logo(self):\n warnings.warn(DEP_MSG_0_12_0 % ('logo', 'Plot.toolbar.logo'))\n return self.toolbar.logo\n\n @logo.setter\n def logo(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('logo', 'Plot.toolbar.logo'))\n self.toolbar.logo = value\n\n @property\n def title_standoff(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_standoff', 'Plot.title.offset'))\n return self.title.offset\n\n @title_standoff.setter\n def title_standoff(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_standoff', 'Plot.title.offset'))\n self.title.offset = value\n\n @property\n def title_text_font(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_font', 'Plot.title.text_font'))\n return self.title.text_font\n\n @title_text_font.setter\n def title_text_font(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_font', 'Plot.title.text_font'))\n self.title.text_font = value\n\n @property\n def title_text_font_size(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_font_size', 'Plot.title.text_font_size'))\n return self.title.text_font_size\n\n @title_text_font_size.setter\n def title_text_font_size(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_font_size', 'Plot.title.text_font_size'))\n self.title.text_font_size = value\n\n @property\n def title_text_font_style(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_font_style', 'Plot.title.text_font_style'))\n return self.title.text_font_style\n\n @title_text_font_style.setter\n def title_text_font_style(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_font_style', 'Plot.title.text_font_style'))\n self.title.text_font_style = value\n\n @property\n def title_text_color(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_color', 'Plot.title.text_color'))\n return self.title.text_color\n\n @title_text_color.setter\n def title_text_color(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_color', 'Plot.title.text_color'))\n self.title.text_color = value\n\n @property\n def title_text_alpha(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_alpha', 'Plot.title.text_alpha'))\n return self.title.text_alpha\n\n @title_text_alpha.setter\n def title_text_alpha(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_alpha', 'Plot.title.text_alpha'))\n self.title.text_alpha = value\n\n @property\n def title_text_align(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_align', 'Plot.title.align'))\n warnings.warn(\"\"\"``title_text_align`` was deprecated in 0.12.0 and is no longer\n available on the new Title object. There is a new ``plot.title.title_align`` which is\n similar but not exactly the same. The new ``title_align`` both positions and aligns the title.\n If you need the exact ``title_text_align`` behavior, please add a title by creating a\n Label (``bokeh.models.annotations.Label``) and manually adding\n it to the plot by doing, for example ``plot.add_layout(Label(), 'above')``.\n \"\"\")\n return self.title.align\n\n @title_text_align.setter\n def title_text_align(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_align', 'Plot.title.align'))\n warnings.warn(\"\"\"``title_text_align`` was deprecated in 0.12.0 and is no longer\n available on the new Title object. There is a new ``plot.title.title_align`` which is\n similar but not exactly the same. The new ``title_align`` both positions and aligns the title.\n If you need the exact ``title_text_align`` behavior, please add a title by creating a\n Label (``bokeh.models.annotations.Label``) and manually adding\n it to the plot by doing, for example ``plot.add_layout(Label(), 'above')``.\n \"\"\")\n self.title.align = value\n\n @property\n def title_text_baseline(self):\n warnings.warn(\"\"\"title_text_baseline was deprecated in 0.12.0 and is no longer\n available on the new Title object. If you need to alter the text_baseline, please\n add a title by creating a Label (``bokeh.models.annotations.Label``) and manually adding\n it to the plot by doing, for example ``plot.add_layout(Label(), 'above')``.\n \"\"\")\n return None\n\n @title_text_baseline.setter\n def title_text_baseline(self, value):\n warnings.warn(\"\"\"title_text_baseline was deprecated in 0.12.0 and is no longer\n available on the new Title object. If you need to alter the text_baseline, please\n add a title by creating a Label (``bokeh.models.annotations.Label``) and manually adding\n it to the plot by doing, for example ``plot.add_layout(Label(), 'above')``.\n \"\"\")\n", "path": "bokeh/models/plots.py" } ]
[ { "content": "\"\"\" Models for representing top-level plot objects.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom six import string_types\nimport warnings\n\nfrom ..core.query import find\nfrom ..core import validation\nfrom ..core.validation.errors import REQUIRED_RANGE\nfrom ..core.validation.warnings import (\n MISSING_RENDERERS, NO_DATA_RENDERERS, MALFORMED_CATEGORY_LABEL,\n SNAPPED_TOOLBAR_ANNOTATIONS)\nfrom ..core.enums import Location\nfrom ..core.property_mixins import LineProps, FillProps\nfrom ..core.properties import (\n Bool, Int, String, Enum, Auto, Instance, Either,\n List, Dict, Include, Override, TitleProp)\nfrom ..util.string import nice_join\n\nfrom .annotations import Legend, Title\nfrom .axes import Axis\nfrom .glyphs import Glyph\nfrom .grids import Grid\nfrom .ranges import Range, FactorRange\nfrom .renderers import Renderer, GlyphRenderer, DataRenderer, TileRenderer, DynamicImageRenderer\nfrom .sources import DataSource, ColumnDataSource\nfrom .tools import Tool, ToolEvents, Toolbar\nfrom .layouts import LayoutDOM\n\nfrom ..util.plot_utils import _list_attr_splat, _select_helper\n\n# See all the way at the bottom of Plot for where this is used.\nDEP_MSG_0_12_0 = \"\"\"\n Plot property '%s' was deprecated in 0.12.0 and will be removed. Use '%s' instead.\n \"\"\"\n\n# We create an empty title by default\nDEFAULT_TITLE = lambda: Title(text=\"\")\n\n\nclass Plot(LayoutDOM):\n \"\"\" Model representing a plot, containing glyphs, guides, annotations.\n\n \"\"\"\n\n def __init__(self, **kwargs):\n if \"tool_events\" not in kwargs:\n kwargs[\"tool_events\"] = ToolEvents()\n\n if \"toolbar\" in kwargs and \"logo\" in kwargs:\n raise ValueError(\"Conflicing properties set on plot: toolbar, logo.\")\n\n if \"toolbar\" in kwargs and \"tools\" in kwargs:\n raise ValueError(\"Conflicing properties set on plot: toolbar, tools.\")\n\n if \"toolbar\" not in kwargs:\n tools = kwargs.pop('tools', [])\n logo = kwargs.pop('logo', 'normal')\n\n kwargs[\"toolbar\"] = Toolbar(tools=tools, logo=logo)\n\n if \"border_fill\" in kwargs and \"border_fill_color\" in kwargs:\n raise ValueError(\"Conflicting properties set on plot: border_fill, border_fill_color.\")\n\n if \"background_fill\" in kwargs and \"background_fill_color\" in kwargs:\n raise ValueError(\"Conflicting properties set on plot: background_fill, background_fill_color.\")\n\n super(LayoutDOM, self).__init__(**kwargs)\n\n def select(self, *args, **kwargs):\n ''' Query this object and all of its references for objects that\n match the given selector.\n\n There are a few different ways to call the ``select`` method.\n The most general is to supply a JSON-like query dictionary as the\n single argument or as keyword arguments:\n\n Args:\n selector (JSON-like) : some sample text\n\n Keyword Arguments:\n kwargs : query dict key/values as keyword arguments\n\n For convenience, queries on just names can be made by supplying\n the ``name`` string as the single parameter:\n\n Args:\n name (str) : the name to query on\n\n Also queries on just type can be made simply by supplying the\n ``Model`` subclass as the single parameter:\n\n Args:\n type (Model) : the type to query on\n\n Returns:\n seq[Model]\n\n Examples:\n\n .. code-block:: python\n\n # These two are equivalent\n p.select({\"type\": HoverTool})\n p.select(HoverTool)\n\n # These two are also equivalent\n p.select({\"name\": \"mycircle\"})\n p.select(\"mycircle\")\n\n # Keyword arguments can be supplied in place of selector dict\n p.select({\"name\": \"foo\", \"type\": HoverTool})\n p.select(name=\"foo\", type=HoverTool)\n\n '''\n\n selector = _select_helper(args, kwargs)\n\n # Want to pass selector that is a dictionary\n return _list_attr_splat(find(self.references(), selector, {'plot': self}))\n\n def row(self, row, gridplot):\n ''' Return whether this plot is in a given row of a GridPlot.\n\n Args:\n row (int) : index of the row to test\n gridplot (GridPlot) : the GridPlot to check\n\n Returns:\n bool\n\n '''\n return self in gridplot.row(row)\n\n def column(self, col, gridplot):\n ''' Return whether this plot is in a given column of a GridPlot.\n\n Args:\n col (int) : index of the column to test\n gridplot (GridPlot) : the GridPlot to check\n\n Returns:\n bool\n\n '''\n return self in gridplot.column(col)\n\n def _axis(self, *sides):\n objs = []\n for s in sides:\n objs.extend(getattr(self, s, []))\n axis = [obj for obj in objs if isinstance(obj, Axis)]\n return _list_attr_splat(axis)\n\n @property\n def xaxis(self):\n \"\"\" Splattable list of :class:`~bokeh.models.axes.Axis` objects for the x dimension.\n\n \"\"\"\n return self._axis(\"above\", \"below\")\n\n @property\n def yaxis(self):\n \"\"\" Splattable list of :class:`~bokeh.models.axes.Axis` objects for the y dimension.\n\n \"\"\"\n return self._axis(\"left\", \"right\")\n\n @property\n def axis(self):\n \"\"\" Splattable list of :class:`~bokeh.models.axes.Axis` objects.\n\n \"\"\"\n return _list_attr_splat(self.xaxis + self.yaxis)\n\n @property\n def legend(self):\n \"\"\"Splattable list of :class:`~bokeh.models.annotations.Legend` objects.\n\n \"\"\"\n legends = [obj for obj in self.renderers if isinstance(obj, Legend)]\n return _list_attr_splat(legends)\n\n def _grid(self, dimension):\n grid = [obj for obj in self.renderers if isinstance(obj, Grid) and obj.dimension==dimension]\n return _list_attr_splat(grid)\n\n @property\n def xgrid(self):\n \"\"\" Splattable list of :class:`~bokeh.models.grids.Grid` objects for the x dimension.\n\n \"\"\"\n return self._grid(0)\n\n @property\n def ygrid(self):\n \"\"\" Splattable list of :class:`~bokeh.models.grids.Grid` objects for the y dimension.\n\n \"\"\"\n return self._grid(1)\n\n @property\n def grid(self):\n \"\"\" Splattable list of :class:`~bokeh.models.grids.Grid` objects.\n\n \"\"\"\n return _list_attr_splat(self.xgrid + self.ygrid)\n\n @property\n def tools(self):\n return self.toolbar.tools\n\n @tools.setter\n def tools(self, tools):\n self.toolbar.tools = tools\n\n\n def add_layout(self, obj, place='center'):\n ''' Adds an object to the plot in a specified place.\n\n Args:\n obj (Renderer) : the object to add to the Plot\n place (str, optional) : where to add the object (default: 'center')\n Valid places are: 'left', 'right', 'above', 'below', 'center'.\n\n Returns:\n None\n\n '''\n valid_places = ['left', 'right', 'above', 'below', 'center']\n if place not in valid_places:\n raise ValueError(\n \"Invalid place '%s' specified. Valid place values are: %s\" % (place, nice_join(valid_places))\n )\n\n if hasattr(obj, 'plot'):\n if obj.plot is not None:\n raise ValueError(\"object to be added already has 'plot' attribute set\")\n obj.plot = self\n\n self.renderers.append(obj)\n\n if place is not 'center':\n getattr(self, place).append(obj)\n\n def add_tools(self, *tools):\n ''' Adds tools to the plot.\n\n Args:\n *tools (Tool) : the tools to add to the Plot\n\n Returns:\n None\n\n '''\n if not all(isinstance(tool, Tool) for tool in tools):\n raise ValueError(\"All arguments to add_tool must be Tool subclasses.\")\n\n for tool in tools:\n if tool.plot is not None:\n raise ValueError(\"tool %s to be added already has 'plot' attribute set\" % tool)\n tool.plot = self\n if hasattr(tool, 'overlay'):\n self.renderers.append(tool.overlay)\n self.toolbar.tools.append(tool)\n\n def add_glyph(self, source_or_glyph, glyph=None, **kw):\n ''' Adds a glyph to the plot with associated data sources and ranges.\n\n This function will take care of creating and configuring a Glyph object,\n and then add it to the plot's list of renderers.\n\n Args:\n source (DataSource) : a data source for the glyphs to all use\n glyph (Glyph) : the glyph to add to the Plot\n\n\n Keyword Arguments:\n Any additional keyword arguments are passed on as-is to the\n Glyph initializer.\n\n Returns:\n Glyph\n\n '''\n if glyph is not None:\n source = source_or_glyph\n else:\n source, glyph = ColumnDataSource(), source_or_glyph\n\n if not isinstance(source, DataSource):\n raise ValueError(\"'source' argument to add_glyph() must be DataSource subclass\")\n\n if not isinstance(glyph, Glyph):\n raise ValueError(\"'glyph' argument to add_glyph() must be Glyph subclass\")\n\n g = GlyphRenderer(data_source=source, glyph=glyph, **kw)\n self.renderers.append(g)\n return g\n\n def add_tile(self, tile_source, **kw):\n '''Adds new TileRenderer into the Plot.renderers\n\n Args:\n tile_source (TileSource) : a tile source instance which contain tileset configuration\n\n Keyword Arguments:\n Additional keyword arguments are passed on as-is to the tile renderer\n\n Returns:\n TileRenderer : TileRenderer\n\n '''\n tile_renderer = TileRenderer(tile_source=tile_source, **kw)\n self.renderers.append(tile_renderer)\n return tile_renderer\n\n def add_dynamic_image(self, image_source, **kw):\n '''Adds new DynamicImageRenderer into the Plot.renderers\n\n Args:\n image_source (ImageSource) : a image source instance which contain image configuration\n\n Keyword Arguments:\n Additional keyword arguments are passed on as-is to the dynamic image renderer\n\n Returns:\n DynamicImageRenderer : DynamicImageRenderer\n\n '''\n image_renderer = DynamicImageRenderer(image_source=image_source, **kw)\n self.renderers.append(image_renderer)\n return image_renderer\n\n @validation.error(REQUIRED_RANGE)\n def _check_required_range(self):\n missing = []\n if not self.x_range: missing.append('x_range')\n if not self.y_range: missing.append('y_range')\n if missing:\n return \", \".join(missing) + \" [%s]\" % self\n\n @validation.warning(MISSING_RENDERERS)\n def _check_missing_renderers(self):\n if len(self.renderers) == 0:\n return str(self)\n\n @validation.warning(NO_DATA_RENDERERS)\n def _check_no_data_renderers(self):\n if len(self.select(DataRenderer)) == 0:\n return str(self)\n\n @validation.warning(MALFORMED_CATEGORY_LABEL)\n def _check_colon_in_category_label(self):\n if not self.x_range: return\n if not self.y_range: return\n\n broken = []\n\n for range_name in ['x_range', 'y_range']:\n category_range = getattr(self, range_name)\n if not isinstance(category_range, FactorRange): continue\n\n for value in category_range.factors:\n if not isinstance(value, string_types): break\n if ':' in value:\n broken.append((range_name, value))\n break\n\n if broken:\n field_msg = ' '.join('[range:%s] [first_value: %s]' % (field, value)\n for field, value in broken)\n return '%s [renderer: %s]' % (field_msg, self)\n\n @validation.warning(SNAPPED_TOOLBAR_ANNOTATIONS)\n def _check_snapped_toolbar_and_axis(self):\n if not self.toolbar_sticky: return\n if self.toolbar_location is None: return\n\n objs = getattr(self, self.toolbar_location)\n if len(objs) > 0:\n return str(self)\n\n __deprecated_attributes__ = (\n 'background_fill', 'border_fill', 'logo', 'tools', 'responsive',\n 'title_text_baseline', 'title_text_align', 'title_text_alpha', 'title_text_color',\n 'title_text_font_style', 'title_text_font_size', 'title_text_font', 'title_standoff'\n )\n\n x_range = Instance(Range, help=\"\"\"\n The (default) data range of the horizontal dimension of the plot.\n \"\"\")\n\n y_range = Instance(Range, help=\"\"\"\n The (default) data range of the vertical dimension of the plot.\n \"\"\")\n\n x_mapper_type = Either(Auto, String, help=\"\"\"\n What kind of mapper to use to convert x-coordinates in data space\n into x-coordinates in screen space.\n\n Typically this can be determined automatically, but this property\n can be useful to, e.g., show datetime values as floating point\n \"seconds since epoch\" instead of formatted dates.\n \"\"\")\n\n y_mapper_type = Either(Auto, String, help=\"\"\"\n What kind of mapper to use to convert y-coordinates in data space\n into y-coordinates in screen space.\n\n Typically this can be determined automatically, but this property\n can be useful to, e.g., show datetime values as floating point\n \"seconds since epoch\" instead of formatted dates\n \"\"\")\n\n extra_x_ranges = Dict(String, Instance(Range), help=\"\"\"\n Additional named ranges to make available for mapping x-coordinates.\n\n This is useful for adding additional axes.\n \"\"\")\n\n extra_y_ranges = Dict(String, Instance(Range), help=\"\"\"\n Additional named ranges to make available for mapping y-coordinates.\n\n This is useful for adding additional axes.\n \"\"\")\n\n hidpi = Bool(default=True, help=\"\"\"\n Whether to use HiDPI mode when available.\n \"\"\")\n\n title = TitleProp(default=DEFAULT_TITLE, help=\"\"\"\n A title for the plot. Can be a text string or a Title annotation. Default is Title(text=\"\").\n \"\"\")\n\n title_location = Enum(Location, default=\"above\", help=\"\"\"\n Where the title will be located. Titles on the left or right side\n will be rotated.\n \"\"\")\n\n outline_props = Include(LineProps, help=\"\"\"\n The %s for the plot border outline.\n \"\"\")\n\n outline_line_color = Override(default=\"#e5e5e5\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n A list of all renderers for this plot, including guides and annotations\n in addition to glyphs and markers.\n\n This property can be manipulated by hand, but the ``add_glyph`` and\n ``add_layout`` methods are recommended to help make sure all necessary\n setup is performed.\n \"\"\")\n\n toolbar = Instance(Toolbar, help=\"\"\"\n The toolbar associated with this plot which holds all the tools.\n\n The toolbar is automatically created with the plot.\n \"\"\")\n\n toolbar_location = Enum(Location, default=\"right\", help=\"\"\"\n Where the toolbar will be located. If set to None, no toolbar\n will be attached to the plot.\n \"\"\")\n\n toolbar_sticky = Bool(default=True, help=\"\"\"\n Stick the toolbar to the edge of the plot. Default: True. If False,\n the toolbar will be outside of the axes, titles etc.\n \"\"\")\n\n tool_events = Instance(ToolEvents, help=\"\"\"\n A ToolEvents object to share and report tool events.\n \"\"\")\n\n left = List(Instance(Renderer), help=\"\"\"\n A list of renderers to occupy the area to the left of the plot.\n \"\"\")\n\n right = List(Instance(Renderer), help=\"\"\"\n A list of renderers to occupy the area to the right of the plot.\n \"\"\")\n\n above = List(Instance(Renderer), help=\"\"\"\n A list of renderers to occupy the area above of the plot.\n \"\"\")\n\n below = List(Instance(Renderer), help=\"\"\"\n A list of renderers to occupy the area below of the plot.\n \"\"\")\n\n plot_height = Int(600, help=\"\"\"\n Total height of the entire plot (including any axes, titles,\n border padding, etc.)\n\n .. note::\n This corresponds directly to the height of the HTML\n canvas that will be used.\n\n \"\"\")\n\n plot_width = Int(600, help=\"\"\"\n Total width of the entire plot (including any axes, titles,\n border padding, etc.)\n\n .. note::\n This corresponds directly to the width of the HTML\n canvas that will be used.\n\n \"\"\")\n\n background_props = Include(FillProps, help=\"\"\"\n The %s for the plot background style.\n \"\"\")\n\n background_fill_color = Override(default='#ffffff')\n\n border_props = Include(FillProps, help=\"\"\"\n The %s for the plot border style.\n \"\"\")\n\n border_fill_color = Override(default='#ffffff')\n\n min_border_top = Int(help=\"\"\"\n Minimum size in pixels of the padding region above the top of the\n central plot region.\n\n .. note::\n This is a *minimum*. The padding region may expand as needed to\n accommodate titles or axes, etc.\n\n \"\"\")\n\n min_border_bottom = Int(help=\"\"\"\n Minimum size in pixels of the padding region below the bottom of\n the central plot region.\n\n .. note::\n This is a *minimum*. The padding region may expand as needed to\n accommodate titles or axes, etc.\n\n \"\"\")\n\n min_border_left = Int(help=\"\"\"\n Minimum size in pixels of the padding region to the left of\n the central plot region.\n\n .. note::\n This is a *minimum*. The padding region may expand as needed to\n accommodate titles or axes, etc.\n\n \"\"\")\n\n min_border_right = Int(help=\"\"\"\n Minimum size in pixels of the padding region to the right of\n the central plot region.\n\n .. note::\n This is a *minimum*. The padding region may expand as needed to\n accommodate titles or axes, etc.\n\n \"\"\")\n\n min_border = Int(5, help=\"\"\"\n A convenience property to set all all the ``min_border_X`` properties\n to the same value. If an individual border property is explicitly set,\n it will override ``min_border``.\n \"\"\")\n\n h_symmetry = Bool(True, help=\"\"\"\n Whether the total horizontal padding on both sides of the plot will\n be made equal (the left or right padding amount, whichever is larger).\n \"\"\")\n\n v_symmetry = Bool(False, help=\"\"\"\n Whether the total vertical padding on both sides of the plot will\n be made equal (the top or bottom padding amount, whichever is larger).\n \"\"\")\n\n lod_factor = Int(10, help=\"\"\"\n Decimation factor to use when applying level-of-detail decimation.\n \"\"\")\n\n lod_threshold = Int(2000, help=\"\"\"\n A number of data points, above which level-of-detail downsampling may\n be performed by glyph renderers. Set to ``None`` to disable any\n level-of-detail downsampling.\n \"\"\")\n\n lod_interval = Int(300, help=\"\"\"\n Interval (in ms) during which an interactive tool event will enable\n level-of-detail downsampling.\n \"\"\")\n\n lod_timeout = Int(500, help=\"\"\"\n Timeout (in ms) for checking whether interactive tool events are still\n occurring. Once level-of-detail mode is enabled, a check is made every\n ``lod_timeout`` ms. If no interactive tool events have happened,\n level-of-detail mode is disabled.\n \"\"\")\n\n webgl = Bool(False, help=\"\"\"\n Whether WebGL is enabled for this plot. If True, the glyphs that\n support this will render via WebGL instead of the 2D canvas.\n \"\"\")\n\n #\n # DEPRECATED PROPERTIES\n #\n\n @property\n def responsive(self):\n warnings.warn(DEP_MSG_0_12_0 % ('responsive', 'Plot.sizing_mode'))\n return self.sizing_mode != \"fixed\"\n\n @responsive.setter\n def responsive(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('responsive', 'Plot.sizing_mode'))\n warnings.warn(\"\"\"\n The 'responsive' property has been deprecated in 0.12.0. It has been\n replaced by 'sizing_mode' which accepts one of five modes:\n\n fixed, scale_width, scale_height, scale_both, stretch_both\n\n 'responsive = False' is the equivalent of 'sizing_mode = \"fixed\"'\n\n 'responsive = True' is the equivalent of 'sizing_mode = \"scale_width\"'\n \"\"\")\n if value is True:\n self.sizing_mode = \"scale_width\"\n elif value is False:\n self.sizing_mode = \"fixed\"\n else:\n raise ValueError(\"Plot.responsive only accepts True or False, got: %r\" % value)\n\n @property\n def background_fill(self):\n warnings.warn(\n \"\"\"\n Plot property 'background_fill' was deprecated in Bokeh\n 0.11.0 and will be removed. Use 'background_fill_color' instead.\n \"\"\")\n return self.background_fill_color\n\n @background_fill.setter\n def background_fill(self, color):\n warnings.warn(\n \"\"\"\n Plot property 'background_fill' was deprecated in Bokeh\n 0.11.0 and will be removed. Use 'background_fill_color' instead.\n \"\"\")\n self.background_fill_color = color\n\n @property\n def border_fill(self):\n warnings.warn(\n \"\"\"\n Plot property 'border_fill' was deprecated in Bokeh 0.11.0 and\n will be removed. Use 'border_fill_color' instead.\n \"\"\")\n return self.border_fill_color\n\n @border_fill.setter\n def border_fill(self, color):\n warnings.warn(\n \"\"\"\n Plot property 'border_fill' was deprecated in Bokeh 0.11.0 and\n will be removed. Use 'border_fill_color' instead.\n \"\"\")\n self.border_fill_color = color\n\n @property\n def logo(self):\n warnings.warn(DEP_MSG_0_12_0 % ('logo', 'Plot.toolbar.logo'))\n return self.toolbar.logo\n\n @logo.setter\n def logo(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('logo', 'Plot.toolbar.logo'))\n self.toolbar.logo = value\n\n @property\n def title_standoff(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_standoff', 'Plot.title.offset'))\n return self.title.offset\n\n @title_standoff.setter\n def title_standoff(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_standoff', 'Plot.title.offset'))\n self.title.offset = value\n\n @property\n def title_text_font(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_font', 'Plot.title.text_font'))\n return self.title.text_font\n\n @title_text_font.setter\n def title_text_font(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_font', 'Plot.title.text_font'))\n self.title.text_font = value\n\n @property\n def title_text_font_size(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_font_size', 'Plot.title.text_font_size'))\n return self.title.text_font_size\n\n @title_text_font_size.setter\n def title_text_font_size(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_font_size', 'Plot.title.text_font_size'))\n self.title.text_font_size = value\n\n @property\n def title_text_font_style(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_font_style', 'Plot.title.text_font_style'))\n return self.title.text_font_style\n\n @title_text_font_style.setter\n def title_text_font_style(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_font_style', 'Plot.title.text_font_style'))\n self.title.text_font_style = value\n\n @property\n def title_text_color(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_color', 'Plot.title.text_color'))\n return self.title.text_color\n\n @title_text_color.setter\n def title_text_color(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_color', 'Plot.title.text_color'))\n self.title.text_color = value\n\n @property\n def title_text_alpha(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_alpha', 'Plot.title.text_alpha'))\n return self.title.text_alpha\n\n @title_text_alpha.setter\n def title_text_alpha(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_alpha', 'Plot.title.text_alpha'))\n self.title.text_alpha = value\n\n @property\n def title_text_align(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_align', 'Plot.title.align'))\n warnings.warn(\"\"\"``title_text_align`` was deprecated in 0.12.0 and is no longer\n available on the new Title object. There is a new ``plot.title.title_align`` which is\n similar but not exactly the same. The new ``title_align`` both positions and aligns the title.\n If you need the exact ``title_text_align`` behavior, please add a title by creating a\n Label (``bokeh.models.annotations.Label``) and manually adding\n it to the plot by doing, for example ``plot.add_layout(Label(), 'above')``.\n \"\"\")\n return self.title.align\n\n @title_text_align.setter\n def title_text_align(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_align', 'Plot.title.align'))\n warnings.warn(\"\"\"``title_text_align`` was deprecated in 0.12.0 and is no longer\n available on the new Title object. There is a new ``plot.title.title_align`` which is\n similar but not exactly the same. The new ``title_align`` both positions and aligns the title.\n If you need the exact ``title_text_align`` behavior, please add a title by creating a\n Label (``bokeh.models.annotations.Label``) and manually adding\n it to the plot by doing, for example ``plot.add_layout(Label(), 'above')``.\n \"\"\")\n self.title.align = value\n\n @property\n def title_text_baseline(self):\n warnings.warn(\"\"\"title_text_baseline was deprecated in 0.12.0 and is no longer\n available on the new Title object. If you need to alter the text_baseline, please\n add a title by creating a Label (``bokeh.models.annotations.Label``) and manually adding\n it to the plot by doing, for example ``plot.add_layout(Label(), 'above')``.\n \"\"\")\n return None\n\n @title_text_baseline.setter\n def title_text_baseline(self, value):\n warnings.warn(\"\"\"title_text_baseline was deprecated in 0.12.0 and is no longer\n available on the new Title object. If you need to alter the text_baseline, please\n add a title by creating a Label (``bokeh.models.annotations.Label``) and manually adding\n it to the plot by doing, for example ``plot.add_layout(Label(), 'above')``.\n \"\"\")\n", "path": "bokeh/models/plots.py" } ]
diff --git a/bokeh/models/plots.py b/bokeh/models/plots.py index 838d29ac46b..86a04895ba0 100644 --- a/bokeh/models/plots.py +++ b/bokeh/models/plots.py @@ -246,7 +246,7 @@ def add_layout(self, obj, place='center'): getattr(self, place).append(obj) def add_tools(self, *tools): - ''' Adds an tools to the plot. + ''' Adds tools to the plot. Args: *tools (Tool) : the tools to add to the Plot
huggingface__accelerate-811
Error when pickling accelerated optimizers with PyTorch 1.13 ### System Info ```Shell $ pip list Package Version ------------------------ ---------- accelerate 0.13.2 numpy 1.23.4 nvidia-cublas-cu11 11.10.3.66 nvidia-cuda-nvrtc-cu11 11.7.99 nvidia-cuda-runtime-cu11 11.7.99 nvidia-cudnn-cu11 8.5.0.96 packaging 21.3 pip 22.3 psutil 5.9.3 pyparsing 3.0.9 PyYAML 6.0 setuptools 65.5.0 torch 1.13.0 typing_extensions 4.4.0 wheel 0.37.1 ``` Same issue with torch-cpu. OS: Ubuntu 20.04 ### Information - [ ] The official example scripts - [X] My own modified scripts ### Tasks - [ ] One of the scripts in the examples/ folder of Accelerate or an officially supported `no_trainer` script in the `examples` folder of the `transformers` repo (such as `run_no_trainer_glue.py`) - [X] My own task or dataset (give details below) ### Reproduction ```python import pickle import accelerate import torch model = torch.nn.Linear(10, 10) optimizer = torch.optim.SGD(model.parameters(), 0.1) pickle.loads(pickle.dumps(optimizer)) # works accelerator = accelerate.Accelerator(mixed_precision='fp16') optimizer2 = accelerator.prepare(optimizer) pickle.loads(pickle.dumps(optimizer2)) # fails ``` ### Expected behavior I ran into a problem with accelerate and PyTorch 1.13. It appears that optimizers cannot be pickled anymore after being accelerated. When running the attached script, I get: ``` Traceback (most recent call last): File ".../skorch/foo.py", line 12, in <module> pickle.loads(pickle.dumps(optimizer2)) File ".../torch/optim/optimizer.py", line 84, in __setstate__ self.defaults.setdefault('differentiable', False) File ".../accelerate/optimizer.py", line 90, in defaults return self.optimizer.defaults AttributeError: 'AcceleratedOptimizer' object has no attribute 'optimizer' ``` The offending addition on PyTorch seems to be this line: https://github.com/pytorch/pytorch/blob/23fe6c8ca15ec2cf6ea74f93aa91cae343ea534f/torch/optim/optimizer.py#L84 which was not present in PyTorch 1.12. At object creation time, PyTorch now tries to access the `defaults` attribute, which in turn calls the `defaults` property in accelerate, which requires the `optimizer` attribute, which doesn't exist and thus errors. At first glance, it looks like `AcceleratedOptimizer` might need its own `__getstate__` and `__setstate__` to solve this but I'm not sure.
[ { "content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport inspect\nimport warnings\n\nimport torch\n\nfrom .state import AcceleratorState, GradientState\nfrom .utils import DistributedType, honor_type, is_torch_version, is_tpu_available\n\n\nif is_tpu_available(check_device=False):\n import torch_xla.core.xla_model as xm\n\n\ndef move_to_device(state, device):\n if isinstance(state, (list, tuple)):\n return honor_type(state, (move_to_device(t, device) for t in state))\n elif isinstance(state, dict):\n return type(state)({k: move_to_device(v, device) for k, v in state.items()})\n elif isinstance(state, torch.Tensor):\n return state.to(device)\n return state\n\n\nclass AcceleratedOptimizer(torch.optim.Optimizer):\n \"\"\"\n Internal wrapper around a torch optimizer.\n\n Conditionally will perform `step` and `zero_grad` if gradients should be synchronized when performing gradient\n accumulation.\n\n Args:\n optimizer (`torch.optim.optimizer.Optimizer`):\n The optimizer to wrap.\n device_placement (`bool`, *optional*, defaults to `True`):\n Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of\n `optimizer` on the right device.\n scaler (`torch.cuda.amp.grad_scaler.GradScaler`, *optional*):\n The scaler to use in the step function if training with mixed precision.\n \"\"\"\n\n def __init__(self, optimizer, device_placement=True, scaler=None):\n self.optimizer = optimizer\n self.scaler = scaler\n self.accelerator_state = AcceleratorState()\n self.gradient_state = GradientState()\n self.device_placement = device_placement\n self._is_overflow = False\n\n # Handle device placement\n if device_placement:\n state_dict = self.optimizer.state_dict()\n if self.accelerator_state.distributed_type == DistributedType.TPU:\n xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)\n else:\n state_dict = move_to_device(state_dict, self.accelerator_state.device)\n self.optimizer.load_state_dict(state_dict)\n\n @property\n def state(self):\n return self.optimizer.state\n\n @state.setter\n def state(self, state):\n self.optimizer.state = state\n\n @property\n def param_groups(self):\n return self.optimizer.param_groups\n\n @param_groups.setter\n def param_groups(self, param_groups):\n self.optimizer.param_groups = param_groups\n\n @property\n def defaults(self):\n return self.optimizer.defaults\n\n @defaults.setter\n def defaults(self, defaults):\n self.optimizer.defaults = defaults\n\n def add_param_group(self, param_group):\n self.optimizer.add_param_group(param_group)\n\n def load_state_dict(self, state_dict):\n if self.accelerator_state.distributed_type == DistributedType.TPU and self.device_placement:\n xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)\n self.optimizer.load_state_dict(state_dict)\n\n def state_dict(self):\n return self.optimizer.state_dict()\n\n def zero_grad(self, set_to_none=None):\n if self.gradient_state.sync_gradients:\n if is_torch_version(\"<\", \"1.7.0\"):\n if set_to_none is not None:\n raise ValueError(\n \"`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for \"\n f\"earlier versions (found version {torch.__version__}).\"\n )\n self.optimizer.zero_grad()\n else:\n accept_arg = \"set_to_none\" in inspect.signature(self.optimizer.zero_grad).parameters\n if accept_arg:\n if set_to_none is None:\n set_to_none = False\n self.optimizer.zero_grad(set_to_none=set_to_none)\n else:\n if set_to_none is not None:\n raise ValueError(\"`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.\")\n self.optimizer.zero_grad()\n\n def step(self, closure=None):\n if self.gradient_state.sync_gradients:\n if self.accelerator_state.distributed_type == DistributedType.TPU:\n optimizer_args = {\"closure\": closure} if closure is not None else {}\n xm.optimizer_step(self.optimizer, optimizer_args=optimizer_args)\n elif self.scaler is not None:\n scale_before = self.scaler.get_scale()\n self.scaler.step(self.optimizer, closure)\n self.scaler.update()\n scale_after = self.scaler.get_scale()\n # If we reduced the loss scale, it means the optimizer step was skipped because of gradient overflow.\n self._is_overflow = scale_after < scale_before\n else:\n self.optimizer.step(closure)\n\n def _switch_parameters(self, parameters_map):\n for param_group in self.optimizer.param_groups:\n param_group[\"params\"] = [parameters_map.get(p, p) for p in param_group[\"params\"]]\n\n @property\n def is_overflow(self):\n \"\"\"Whether or not the optimizer step was done, or skipped because of gradient overflow.\"\"\"\n warnings.warn(\n \"The `is_overflow` property is deprecated and will be removed in version 1.0 of Accelerate use \"\n \"`optimizer.step_was_skipped` instead.\",\n FutureWarning,\n )\n return self._is_overflow\n\n @property\n def step_was_skipped(self):\n \"\"\"Whether or not the optimizer step was skipped.\"\"\"\n return self._is_overflow\n", "path": "src/accelerate/optimizer.py" } ]
[ { "content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport inspect\nimport warnings\n\nimport torch\n\nfrom .state import AcceleratorState, GradientState\nfrom .utils import DistributedType, honor_type, is_torch_version, is_tpu_available\n\n\nif is_tpu_available(check_device=False):\n import torch_xla.core.xla_model as xm\n\n\ndef move_to_device(state, device):\n if isinstance(state, (list, tuple)):\n return honor_type(state, (move_to_device(t, device) for t in state))\n elif isinstance(state, dict):\n return type(state)({k: move_to_device(v, device) for k, v in state.items()})\n elif isinstance(state, torch.Tensor):\n return state.to(device)\n return state\n\n\nclass AcceleratedOptimizer(torch.optim.Optimizer):\n \"\"\"\n Internal wrapper around a torch optimizer.\n\n Conditionally will perform `step` and `zero_grad` if gradients should be synchronized when performing gradient\n accumulation.\n\n Args:\n optimizer (`torch.optim.optimizer.Optimizer`):\n The optimizer to wrap.\n device_placement (`bool`, *optional*, defaults to `True`):\n Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of\n `optimizer` on the right device.\n scaler (`torch.cuda.amp.grad_scaler.GradScaler`, *optional*):\n The scaler to use in the step function if training with mixed precision.\n \"\"\"\n\n def __init__(self, optimizer, device_placement=True, scaler=None):\n self.optimizer = optimizer\n self.scaler = scaler\n self.accelerator_state = AcceleratorState()\n self.gradient_state = GradientState()\n self.device_placement = device_placement\n self._is_overflow = False\n\n # Handle device placement\n if device_placement:\n state_dict = self.optimizer.state_dict()\n if self.accelerator_state.distributed_type == DistributedType.TPU:\n xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)\n else:\n state_dict = move_to_device(state_dict, self.accelerator_state.device)\n self.optimizer.load_state_dict(state_dict)\n\n @property\n def state(self):\n return self.optimizer.state\n\n @state.setter\n def state(self, state):\n self.optimizer.state = state\n\n @property\n def param_groups(self):\n return self.optimizer.param_groups\n\n @param_groups.setter\n def param_groups(self, param_groups):\n self.optimizer.param_groups = param_groups\n\n @property\n def defaults(self):\n return self.optimizer.defaults\n\n @defaults.setter\n def defaults(self, defaults):\n self.optimizer.defaults = defaults\n\n def add_param_group(self, param_group):\n self.optimizer.add_param_group(param_group)\n\n def load_state_dict(self, state_dict):\n if self.accelerator_state.distributed_type == DistributedType.TPU and self.device_placement:\n xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)\n self.optimizer.load_state_dict(state_dict)\n\n def state_dict(self):\n return self.optimizer.state_dict()\n\n def zero_grad(self, set_to_none=None):\n if self.gradient_state.sync_gradients:\n if is_torch_version(\"<\", \"1.7.0\"):\n if set_to_none is not None:\n raise ValueError(\n \"`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for \"\n f\"earlier versions (found version {torch.__version__}).\"\n )\n self.optimizer.zero_grad()\n else:\n accept_arg = \"set_to_none\" in inspect.signature(self.optimizer.zero_grad).parameters\n if accept_arg:\n if set_to_none is None:\n set_to_none = False\n self.optimizer.zero_grad(set_to_none=set_to_none)\n else:\n if set_to_none is not None:\n raise ValueError(\"`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.\")\n self.optimizer.zero_grad()\n\n def step(self, closure=None):\n if self.gradient_state.sync_gradients:\n if self.accelerator_state.distributed_type == DistributedType.TPU:\n optimizer_args = {\"closure\": closure} if closure is not None else {}\n xm.optimizer_step(self.optimizer, optimizer_args=optimizer_args)\n elif self.scaler is not None:\n scale_before = self.scaler.get_scale()\n self.scaler.step(self.optimizer, closure)\n self.scaler.update()\n scale_after = self.scaler.get_scale()\n # If we reduced the loss scale, it means the optimizer step was skipped because of gradient overflow.\n self._is_overflow = scale_after < scale_before\n else:\n self.optimizer.step(closure)\n\n def _switch_parameters(self, parameters_map):\n for param_group in self.optimizer.param_groups:\n param_group[\"params\"] = [parameters_map.get(p, p) for p in param_group[\"params\"]]\n\n @property\n def is_overflow(self):\n \"\"\"Whether or not the optimizer step was done, or skipped because of gradient overflow.\"\"\"\n warnings.warn(\n \"The `is_overflow` property is deprecated and will be removed in version 1.0 of Accelerate use \"\n \"`optimizer.step_was_skipped` instead.\",\n FutureWarning,\n )\n return self._is_overflow\n\n @property\n def step_was_skipped(self):\n \"\"\"Whether or not the optimizer step was skipped.\"\"\"\n return self._is_overflow\n\n def __getstate__(self):\n return self.__dict__.copy()\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n", "path": "src/accelerate/optimizer.py" } ]
diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py index 4fad12c724e..d5eeef99a02 100644 --- a/src/accelerate/optimizer.py +++ b/src/accelerate/optimizer.py @@ -157,3 +157,9 @@ def is_overflow(self): def step_was_skipped(self): """Whether or not the optimizer step was skipped.""" return self._is_overflow + + def __getstate__(self): + return self.__dict__.copy() + + def __setstate__(self, state): + self.__dict__.update(state) diff --git a/tests/test_optimizer.py b/tests/test_optimizer.py new file mode 100644 index 00000000000..15a095bf798 --- /dev/null +++ b/tests/test_optimizer.py @@ -0,0 +1,36 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pickle +import unittest + +import torch + +from accelerate import Accelerator +from accelerate.state import AcceleratorState +from accelerate.test_utils import require_cpu + + +@require_cpu +class OptimizerTester(unittest.TestCase): + def test_accelerated_optimizer_pickling(self): + model = torch.nn.Linear(10, 10) + optimizer = torch.optim.SGD(model.parameters(), 0.1) + accelerator = Accelerator() + optimizer = accelerator.prepare(optimizer) + try: + pickle.loads(pickle.dumps(optimizer)) + except Exception as e: + self.fail(f"Accelerated optimizer pickling failed with {e}") + AcceleratorState._reset_state()
conan-io__conan-center-index-18494
[package] clickhouse-cpp/*: fPIC option is not respected In the recipe file fPIC option is always removed during configure stage, which can lead to not working static library.
[ { "content": "from conan import ConanFile\nfrom conan.tools.cmake import CMake, CMakeToolchain,CMakeDeps, cmake_layout\nfrom conan.tools.files import copy, get\nfrom conan.tools.build import check_min_cppstd\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.scm import Version\nimport os\n\nrequired_conan_version = \">=1.53.0\"\n\nclass ClickHouseCppConan(ConanFile):\n name = \"clickhouse-cpp\"\n homepage = \"https://github.com/ClickHouse/clickhouse-cpp\"\n url = \"https://github.com/conan-io/conan-center-index\"\n description = \"ClickHouse C++ API\"\n license = \"Apache-2.0\"\n topics = (\"database\", \"db\", \"clickhouse\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"enable_benchmark\": [True, False],\n \"with_openssl\": [True, False]\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"enable_benchmark\": False,\n \"with_openssl\": False\n }\n\n def requirements(self):\n\n self.requires(\"lz4/1.9.4\")\n\n self.requires(\"abseil/20230125.3\", transitive_headers=True)\n\n self.requires(\"cityhash/cci.20130801\")\n if self.options.with_openssl:\n self.requires(\"openssl/[>=1.1 <4]\")\n\n def build_requirements(self):\n if self.options.enable_benchmark:\n self.requires(\"benchmark/1.8.0\")\n\n @property\n def _min_cppstd(self):\n return \"17\"\n\n @property\n def _compilers_minimum_version(self):\n return {\n \"Visual Studio\": \"15\",\n \"msvc\": \"191\",\n \"gcc\": \"7\",\n \"clang\": \"6\",\n }\n\n @property\n def _requires_compiler_rt(self):\n return self.settings.compiler == \"clang\" and (( self.settings.compiler.libcxx in [\"libstdc++\", \"libstdc++11\"] and not self.options.shared) or self.settings.compiler.libcxx == \"libc++\" )\n\n def validate(self):\n if self.settings.compiler.get_safe(\"cppstd\"):\n check_min_cppstd(self, self._min_cppstd)\n minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)\n if minimum_version and Version(self.settings.compiler.version) < minimum_version:\n raise ConanInvalidConfiguration(f\"{self.ref} requires C++17, which your compiler does not support.\")\n if self.settings.os == \"Windows\" and self.options.shared:\n raise ConanInvalidConfiguration(\"f{self.ref} does not support shared library on Windows.\")\n # look at https://github.com/ClickHouse/clickhouse-cpp/pull/226\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n self.options.rm_safe(\"fPIC\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version],\n destination=self.source_folder, strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.variables[\"BUILD_BENCHMARK\"] = self.options.enable_benchmark\n tc.cache_variables[\"BUILD_SHARED_LIBS\"] = self.options.shared\n tc.variables[\"WITH_OPENSSL\"] = self.options.with_openssl\n tc.cache_variables[\"WITH_SYSTEM_ABSEIL\"] = True\n tc.cache_variables[\"WITH_SYSTEM_LZ4\"] = True\n tc.cache_variables[\"WITH_SYSTEM_CITYHASH\"] = True\n tc.generate()\n\n cd = CMakeDeps(self)\n cd.generate()\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, \"LICENSE\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n cmake = CMake(self)\n cmake.install()\n\n def package_info(self):\n self.cpp_info.libs.append(\"clickhouse-cpp-lib\")\n self.cpp_info.set_property(\"cmake_target_name\", \"clickhouse-cpp-lib::clickhouse-cpp-lib\")\n\n if self._requires_compiler_rt:\n ldflags = [\"--rtlib=compiler-rt\"]\n self.cpp_info.exelinkflags = ldflags\n self.cpp_info.sharedlinkflags = ldflags\n self.cpp_info.system_libs.append(\"gcc_s\")\n\n self.cpp_info.filenames[\"cmake_find_package\"] = \"clickhouse-cpp\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"clickhouse-cpp\"\n self.cpp_info.names[\"cmake_find_package\"] = \"clickhouse-cpp-lib\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"clickhouse-cpp-lib\"\n\n if self.settings.os == 'Windows':\n self.cpp_info.system_libs = ['ws2_32', 'wsock32']\n", "path": "recipes/clickhouse-cpp/all/conanfile.py" } ]
[ { "content": "from conan import ConanFile\nfrom conan.tools.cmake import CMake, CMakeToolchain,CMakeDeps, cmake_layout\nfrom conan.tools.files import copy, get\nfrom conan.tools.build import check_min_cppstd\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.scm import Version\nimport os\n\nrequired_conan_version = \">=1.53.0\"\n\nclass ClickHouseCppConan(ConanFile):\n name = \"clickhouse-cpp\"\n homepage = \"https://github.com/ClickHouse/clickhouse-cpp\"\n url = \"https://github.com/conan-io/conan-center-index\"\n description = \"ClickHouse C++ API\"\n license = \"Apache-2.0\"\n topics = (\"database\", \"db\", \"clickhouse\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"enable_benchmark\": [True, False],\n \"with_openssl\": [True, False]\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"enable_benchmark\": False,\n \"with_openssl\": False\n }\n\n def requirements(self):\n\n self.requires(\"lz4/1.9.4\")\n\n self.requires(\"abseil/20230125.3\", transitive_headers=True)\n\n self.requires(\"cityhash/cci.20130801\")\n if self.options.with_openssl:\n self.requires(\"openssl/[>=1.1 <4]\")\n\n def build_requirements(self):\n if self.options.enable_benchmark:\n self.requires(\"benchmark/1.8.0\")\n\n @property\n def _min_cppstd(self):\n return \"17\"\n\n @property\n def _compilers_minimum_version(self):\n return {\n \"Visual Studio\": \"15\",\n \"msvc\": \"191\",\n \"gcc\": \"7\",\n \"clang\": \"6\",\n }\n\n @property\n def _requires_compiler_rt(self):\n return self.settings.compiler == \"clang\" and (( self.settings.compiler.libcxx in [\"libstdc++\", \"libstdc++11\"] and not self.options.shared) or self.settings.compiler.libcxx == \"libc++\" )\n\n def validate(self):\n if self.settings.compiler.get_safe(\"cppstd\"):\n check_min_cppstd(self, self._min_cppstd)\n minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)\n if minimum_version and Version(self.settings.compiler.version) < minimum_version:\n raise ConanInvalidConfiguration(f\"{self.ref} requires C++17, which your compiler does not support.\")\n if self.settings.os == \"Windows\" and self.options.shared:\n raise ConanInvalidConfiguration(\"f{self.ref} does not support shared library on Windows.\")\n # look at https://github.com/ClickHouse/clickhouse-cpp/pull/226\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version],\n destination=self.source_folder, strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.variables[\"BUILD_BENCHMARK\"] = self.options.enable_benchmark\n tc.cache_variables[\"BUILD_SHARED_LIBS\"] = self.options.shared\n tc.variables[\"WITH_OPENSSL\"] = self.options.with_openssl\n tc.cache_variables[\"WITH_SYSTEM_ABSEIL\"] = True\n tc.cache_variables[\"WITH_SYSTEM_LZ4\"] = True\n tc.cache_variables[\"WITH_SYSTEM_CITYHASH\"] = True\n tc.generate()\n\n cd = CMakeDeps(self)\n cd.generate()\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, \"LICENSE\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n cmake = CMake(self)\n cmake.install()\n\n def package_info(self):\n self.cpp_info.libs.append(\"clickhouse-cpp-lib\")\n self.cpp_info.set_property(\"cmake_target_name\", \"clickhouse-cpp-lib::clickhouse-cpp-lib\")\n\n if self._requires_compiler_rt:\n ldflags = [\"--rtlib=compiler-rt\"]\n self.cpp_info.exelinkflags = ldflags\n self.cpp_info.sharedlinkflags = ldflags\n self.cpp_info.system_libs.append(\"gcc_s\")\n\n self.cpp_info.filenames[\"cmake_find_package\"] = \"clickhouse-cpp\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"clickhouse-cpp\"\n self.cpp_info.names[\"cmake_find_package\"] = \"clickhouse-cpp-lib\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"clickhouse-cpp-lib\"\n\n if self.settings.os == 'Windows':\n self.cpp_info.system_libs = ['ws2_32', 'wsock32']\n", "path": "recipes/clickhouse-cpp/all/conanfile.py" } ]
diff --git a/recipes/clickhouse-cpp/all/conanfile.py b/recipes/clickhouse-cpp/all/conanfile.py index 9586c0240cbc1..442d24123399e 100644 --- a/recipes/clickhouse-cpp/all/conanfile.py +++ b/recipes/clickhouse-cpp/all/conanfile.py @@ -75,7 +75,8 @@ def config_options(self): del self.options.fPIC def configure(self): - self.options.rm_safe("fPIC") + if self.options.shared: + self.options.rm_safe("fPIC") def layout(self): cmake_layout(self, src_folder="src")
pyscript__pyscript-1064
Python Plugin Methods are Executed Twice The Python plugin methods appear to be getting called twice each. To recreate, run `make test-integration ARGS='-k test_execution_hooks --headed` and look at the console log. You'll see both `afterSetup` and `afterStartup` are logged twice. (The test passes because it checks that each is present at least once). This is not just in testing - if you load the PyMarkdown plugin and look at the dev console, you'll see `runtime Received: [object Object]` present twice. Tested on unstable, on Chromium.
[ { "content": "import ast\nimport asyncio\nimport base64\nimport html\nimport io\nimport re\nimport time\nfrom collections import namedtuple\nfrom textwrap import dedent\n\nimport js\n\ntry:\n from pyodide import create_proxy\nexcept ImportError:\n from pyodide.ffi import create_proxy\n\nloop = asyncio.get_event_loop()\n\nMIME_METHODS = {\n \"__repr__\": \"text/plain\",\n \"_repr_html_\": \"text/html\",\n \"_repr_markdown_\": \"text/markdown\",\n \"_repr_svg_\": \"image/svg+xml\",\n \"_repr_png_\": \"image/png\",\n \"_repr_pdf_\": \"application/pdf\",\n \"_repr_jpeg_\": \"image/jpeg\",\n \"_repr_latex\": \"text/latex\",\n \"_repr_json_\": \"application/json\",\n \"_repr_javascript_\": \"application/javascript\",\n \"savefig\": \"image/png\",\n}\n\n\ndef render_image(mime, value, meta):\n # If the image value is using bytes we should convert it to base64\n # otherwise it will return raw bytes and the browser will not be able to\n # render it.\n if isinstance(value, bytes):\n value = base64.b64encode(value).decode(\"utf-8\")\n\n # This is the pattern of base64 strings\n base64_pattern = re.compile(\n r\"^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)?$\"\n )\n # If value doesn't match the base64 pattern we should encode it to base64\n if len(value) > 0 and not base64_pattern.match(value):\n value = base64.b64encode(value.encode(\"utf-8\")).decode(\"utf-8\")\n\n data = f\"data:{mime};charset=utf-8;base64,{value}\"\n attrs = \" \".join(['{k}=\"{v}\"' for k, v in meta.items()])\n return f'<img src=\"{data}\" {attrs}></img>'\n\n\ndef identity(value, meta):\n return value\n\n\nMIME_RENDERERS = {\n \"text/plain\": html.escape,\n \"text/html\": identity,\n \"image/png\": lambda value, meta: render_image(\"image/png\", value, meta),\n \"image/jpeg\": lambda value, meta: render_image(\"image/jpeg\", value, meta),\n \"image/svg+xml\": identity,\n \"application/json\": identity,\n \"application/javascript\": lambda value, meta: f\"<script>{value}</script>\",\n}\n\n\n# these are set by _set_version_info\n__version__ = None\nversion_info = None\n\n\ndef _set_version_info(version_from_runtime: str):\n \"\"\"Sets the __version__ and version_info properties from provided JSON data\n Args:\n version_from_runtime (str): A \"dotted\" representation of the version:\n YYYY.MM.m(m).releaselevel\n Year, Month, and Minor should be integers; releaselevel can be any string\n \"\"\"\n global __version__\n global version_info\n\n __version__ = version_from_runtime\n\n version_parts = version_from_runtime.split(\".\")\n year = int(version_parts[0])\n month = int(version_parts[1])\n minor = int(version_parts[2])\n if len(version_parts) > 3:\n releaselevel = version_parts[3]\n else:\n releaselevel = \"\"\n\n VersionInfo = namedtuple(\"version_info\", (\"year\", \"month\", \"minor\", \"releaselevel\"))\n version_info = VersionInfo(year, month, minor, releaselevel)\n\n # we ALSO set PyScript.__version__ and version_info for backwards\n # compatibility. Should be killed eventually.\n PyScript.__version__ = __version__\n PyScript.version_info = version_info\n\n\nclass HTML:\n \"\"\"\n Wrap a string so that display() can render it as plain HTML\n \"\"\"\n\n def __init__(self, html):\n self._html = html\n\n def _repr_html_(self):\n return self._html\n\n\ndef eval_formatter(obj, print_method):\n \"\"\"\n Evaluates a formatter method.\n \"\"\"\n if print_method == \"__repr__\":\n return repr(obj)\n elif hasattr(obj, print_method):\n if print_method == \"savefig\":\n buf = io.BytesIO()\n obj.savefig(buf, format=\"png\")\n buf.seek(0)\n return base64.b64encode(buf.read()).decode(\"utf-8\")\n return getattr(obj, print_method)()\n elif print_method == \"_repr_mimebundle_\":\n return {}, {}\n return None\n\n\ndef format_mime(obj):\n \"\"\"\n Formats object using _repr_x_ methods.\n \"\"\"\n if isinstance(obj, str):\n return html.escape(obj), \"text/plain\"\n\n mimebundle = eval_formatter(obj, \"_repr_mimebundle_\")\n if isinstance(mimebundle, tuple):\n format_dict, _ = mimebundle\n else:\n format_dict = mimebundle\n\n output, not_available = None, []\n for method, mime_type in reversed(MIME_METHODS.items()):\n if mime_type in format_dict:\n output = format_dict[mime_type]\n else:\n output = eval_formatter(obj, method)\n\n if output is None:\n continue\n elif mime_type not in MIME_RENDERERS:\n not_available.append(mime_type)\n continue\n break\n if output is None:\n if not_available:\n js.console.warn(\n f\"Rendered object requested unavailable MIME renderers: {not_available}\"\n )\n output = repr(output)\n mime_type = \"text/plain\"\n elif isinstance(output, tuple):\n output, meta = output\n else:\n meta = {}\n return MIME_RENDERERS[mime_type](output, meta), mime_type\n\n\n@staticmethod\ndef run_until_complete(f):\n _ = loop.run_until_complete(f)\n\n\n@staticmethod\ndef write(element_id, value, append=False, exec_id=0):\n \"\"\"Writes value to the element with id \"element_id\"\"\"\n Element(element_id).write(value=value, append=append)\n js.console.warn(\n dedent(\n \"\"\"PyScript Deprecation Warning: PyScript.write is\n marked as deprecated and will be removed sometime soon. Please, use\n Element(<id>).write instead.\"\"\"\n )\n )\n\n\ndef set_current_display_target(target_id):\n get_current_display_target._id = target_id\n\n\ndef get_current_display_target():\n return get_current_display_target._id\n\n\nget_current_display_target._id = None\n\n\ndef display(*values, target=None, append=True):\n default_target = get_current_display_target()\n\n if default_target is None and target is None:\n raise Exception(\n \"Implicit target not allowed here. Please use display(..., target=...)\"\n )\n\n if target is not None:\n for v in values:\n Element(target).write(v, append=append)\n else:\n for v in values:\n Element(default_target).write(v, append=append)\n\n\nclass Element:\n def __init__(self, element_id, element=None):\n self._id = element_id\n self._element = element\n\n @property\n def id(self):\n return self._id\n\n @property\n def element(self):\n \"\"\"Return the dom element\"\"\"\n if not self._element:\n self._element = js.document.querySelector(f\"#{self._id}\")\n return self._element\n\n @property\n def value(self):\n return self.element.value\n\n @property\n def innerHtml(self):\n return self.element.innerHTML\n\n def write(self, value, append=False):\n html, mime_type = format_mime(value)\n if html == \"\\n\":\n return\n\n if append:\n child = js.document.createElement(\"div\")\n self.element.appendChild(child)\n\n if self.element.children:\n out_element = self.element.children[-1]\n else:\n out_element = self.element\n\n if mime_type in (\"application/javascript\", \"text/html\"):\n script_element = js.document.createRange().createContextualFragment(html)\n out_element.appendChild(script_element)\n else:\n out_element.innerHTML = html\n\n def clear(self):\n if hasattr(self.element, \"value\"):\n self.element.value = \"\"\n else:\n self.write(\"\", append=False)\n\n def select(self, query, from_content=False):\n el = self.element\n\n if from_content:\n el = el.content\n\n _el = el.querySelector(query)\n if _el:\n return Element(_el.id, _el)\n else:\n js.console.warn(f\"WARNING: can't find element matching query {query}\")\n\n def clone(self, new_id=None, to=None):\n if new_id is None:\n new_id = self.element.id\n\n clone = self.element.cloneNode(True)\n clone.id = new_id\n\n if to:\n to.element.appendChild(clone)\n # Inject it into the DOM\n to.element.after(clone)\n else:\n # Inject it into the DOM\n self.element.after(clone)\n\n return Element(clone.id, clone)\n\n def remove_class(self, classname):\n if isinstance(classname, list):\n for cl in classname:\n self.remove_class(cl)\n else:\n self.element.classList.remove(classname)\n\n def add_class(self, classname):\n if isinstance(classname, list):\n for cl in classname:\n self.element.classList.add(cl)\n else:\n self.element.classList.add(classname)\n\n\ndef add_classes(element, class_list):\n for klass in class_list.split(\" \"):\n element.classList.add(klass)\n\n\ndef create(what, id_=None, classes=\"\"):\n element = js.document.createElement(what)\n if id_:\n element.id = id_\n add_classes(element, classes)\n return Element(id_, element)\n\n\nclass PyWidgetTheme:\n def __init__(self, main_style_classes):\n self.main_style_classes = main_style_classes\n\n def theme_it(self, widget):\n for klass in self.main_style_classes.split(\" \"):\n widget.classList.add(klass)\n\n\nclass PyItemTemplate(Element):\n label_fields = None\n\n def __init__(self, data, labels=None, state_key=None, parent=None):\n self.data = data\n\n self.register_parent(parent)\n\n if not labels:\n labels = list(self.data.keys())\n self.labels = labels\n\n self.state_key = state_key\n\n super().__init__(self._id)\n\n def register_parent(self, parent):\n self._parent = parent\n if parent:\n self._id = f\"{self._parent._id}-c-{len(self._parent._children)}\"\n self.data[\"id\"] = self._id\n else:\n self._id = None\n\n def create(self):\n new_child = create(\"div\", self._id, \"py-li-element\")\n new_child._element.innerHTML = dedent(\n f\"\"\"\n <label id=\"{self._id}\" for=\"flex items-center p-2 \">\n <input class=\"mr-2\" type=\"checkbox\" class=\"task-check\">\n <p>{self.render_content()}</p>\n </label>\n \"\"\"\n )\n return new_child\n\n def on_click(self, evt):\n pass\n\n def pre_append(self):\n pass\n\n def post_append(self):\n self.element.click = self.on_click\n self.element.onclick = self.on_click\n\n self._post_append()\n\n def _post_append(self):\n pass\n\n def strike(self, value, extra=None):\n if value:\n self.add_class(\"line-through\")\n else:\n self.remove_class(\"line-through\")\n\n def render_content(self):\n return \" - \".join([self.data[f] for f in self.labels])\n\n\nclass PyListTemplate:\n theme = PyWidgetTheme(\"py-li-element\")\n item_class = PyItemTemplate\n\n def __init__(self, parent):\n self.parent = parent\n self._children = []\n self._id = self.parent.id\n\n @property\n def children(self):\n return self._children\n\n @property\n def data(self):\n return [c.data for c in self._children]\n\n def render_children(self):\n binds = {}\n for i, c in enumerate(self._children):\n txt = c.element.innerHTML\n rnd = str(time.time()).replace(\".\", \"\")[-5:]\n new_id = f\"{c.element.id}-{i}-{rnd}\"\n binds[new_id] = c.element.id\n txt = txt.replace(\">\", f\" id='{new_id}'>\")\n print(txt)\n\n def foo(evt):\n evtEl = evt.srcElement\n srcEl = Element(binds[evtEl.id])\n srcEl.element.onclick()\n evtEl.classList = srcEl.element.classList\n\n for new_id in binds:\n Element(new_id).element.onclick = foo\n\n def connect(self):\n self.md = main_div = js.document.createElement(\"div\")\n main_div.id = self._id + \"-list-tasks-container\"\n\n if self.theme:\n self.theme.theme_it(main_div)\n\n self.parent.appendChild(main_div)\n\n def add(self, *args, **kws):\n if not isinstance(args[0], self.item_class):\n child = self.item_class(*args, **kws)\n else:\n child = args[0]\n child.register_parent(self)\n return self._add(child)\n\n def _add(self, child_elem):\n self.pre_child_append(child_elem)\n child_elem.pre_append()\n self._children.append(child_elem)\n self.md.appendChild(child_elem.create().element)\n child_elem.post_append()\n self.child_appended(child_elem)\n return child_elem\n\n def pre_child_append(self, child):\n pass\n\n def child_appended(self, child):\n \"\"\"Overwrite me to define logic\"\"\"\n pass\n\n\nclass TopLevelAsyncFinder(ast.NodeVisitor):\n def is_source_top_level_await(self, source):\n self.async_found = False\n node = ast.parse(source)\n self.generic_visit(node)\n return self.async_found\n\n def visit_Await(self, node):\n self.async_found = True\n\n def visit_AsyncFor(self, node):\n self.async_found = True\n\n def visit_AsyncWith(self, node):\n self.async_found = True\n\n def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef):\n pass # Do not visit children of async function defs\n\n\ndef uses_top_level_await(source: str) -> bool:\n return TopLevelAsyncFinder().is_source_top_level_await(source)\n\n\nclass Plugin:\n def __init__(self, name=None):\n if not name:\n name = self.__class__.__name__\n\n self.name = name\n\n def init(self, app):\n self.app = app\n self.app.plugins.addPythonPlugin(create_proxy(self))\n\n def register_custom_element(self, tag):\n # TODO: Ideally would be better to use the logger.\n js.console.info(f\"Defining new custom element {tag}\")\n\n def wrapper(class_):\n # TODO: this is very pyodide specific but will have to do\n # until we have JS interface that works across interpreters\n define_custom_element(tag, create_proxy(class_)) # noqa: F821\n\n return create_proxy(wrapper)\n\n\nclass DeprecatedGlobal:\n \"\"\"\n Proxy for globals which are deprecated.\n\n The intendend usage is as follows:\n\n # in the global namespace\n Element = pyscript.DeprecatedGlobal('Element', pyscript.Element, \"...\")\n console = pyscript.DeprecatedGlobal('console', js.console, \"...\")\n ...\n\n The proxy forwards __getattr__ and __call__ to the underlying object, and\n emit a warning on the first usage.\n\n This way users see a warning only if they actually access the top-level\n name.\n \"\"\"\n\n def __init__(self, name, obj, message):\n self.__name = name\n self.__obj = obj\n self.__message = message\n self.__warning_already_shown = False\n\n def __repr__(self):\n return f\"<DeprecatedGlobal({self.__name!r})>\"\n\n def _show_warning(self, message):\n \"\"\"\n NOTE: this is overridden by unit tests\n \"\"\"\n # this showWarning is implemented in js and injected into this\n # namespace by main.ts\n showWarning(message, \"html\") # noqa: F821\n\n def _show_warning_maybe(self):\n if self.__warning_already_shown:\n return\n self._show_warning(self.__message)\n self.__warning_already_shown = True\n\n def __getattr__(self, attr):\n self._show_warning_maybe()\n return getattr(self.__obj, attr)\n\n def __call__(self, *args, **kwargs):\n self._show_warning_maybe()\n return self.__obj(*args, **kwargs)\n\n def __iter__(self):\n self._show_warning_maybe()\n return iter(self.__obj)\n\n def __getitem__(self, key):\n self._show_warning_maybe()\n return self.__obj[key]\n\n def __setitem__(self, key, value):\n self._show_warning_maybe()\n self.__obj[key] = value\n\n\nclass PyScript:\n \"\"\"\n This class is deprecated since 2022.12.1.\n\n All its old functionalities are available as module-level functions. This\n class should be killed eventually.\n \"\"\"\n\n loop = loop\n\n @staticmethod\n def run_until_complete(f):\n run_until_complete(f)\n\n @staticmethod\n def write(element_id, value, append=False, exec_id=0):\n write(element_id, value, append, exec_id)\n\n\ndef _install_deprecated_globals_2022_12_1(ns):\n \"\"\"\n Install into the given namespace all the globals which have been\n deprecated since the 2022.12.1 release. Eventually they should be killed.\n \"\"\"\n\n def deprecate(name, obj, instead):\n message = f\"Direct usage of <code>{name}</code> is deprecated. \" + instead\n ns[name] = DeprecatedGlobal(name, obj, message)\n\n # function/classes defined in pyscript.py ===> pyscript.XXX\n pyscript_names = [\n \"PyItemTemplate\",\n \"PyListTemplate\",\n \"PyWidgetTheme\",\n \"add_classes\",\n \"create\",\n \"loop\",\n ]\n for name in pyscript_names:\n deprecate(\n name, globals()[name], f\"Please use <code>pyscript.{name}</code> instead.\"\n )\n\n # stdlib modules ===> import XXX\n stdlib_names = [\n \"asyncio\",\n \"base64\",\n \"io\",\n \"sys\",\n \"time\",\n \"datetime\",\n \"pyodide\",\n \"micropip\",\n ]\n for name in stdlib_names:\n obj = __import__(name)\n deprecate(name, obj, f\"Please use <code>import {name}</code> instead.\")\n\n # special case\n deprecate(\n \"dedent\", dedent, \"Please use <code>from textwrap import dedent</code> instead.\"\n )\n\n # these are names that used to leak in the globals but they are just\n # implementation details. People should not use them.\n private_names = [\n \"eval_formatter\",\n \"format_mime\",\n \"identity\",\n \"render_image\",\n \"MIME_RENDERERS\",\n \"MIME_METHODS\",\n ]\n for name in private_names:\n obj = globals()[name]\n message = (\n f\"<code>{name}</code> is deprecated. \"\n \"This is a private implementation detail of pyscript. \"\n \"You should not use it.\"\n )\n ns[name] = DeprecatedGlobal(name, obj, message)\n\n # these names are available as js.XXX\n for name in [\"document\", \"console\"]:\n obj = getattr(js, name)\n deprecate(name, obj, f\"Please use <code>js.{name}</code> instead.\")\n\n # PyScript is special, use a different message\n message = (\n \"The <code>PyScript</code> object is deprecated. \"\n \"Please use <code>pyscript</code> instead.\"\n )\n ns[\"PyScript\"] = DeprecatedGlobal(\"PyScript\", PyScript, message)\n", "path": "pyscriptjs/src/python/pyscript.py" } ]
[ { "content": "import ast\nimport asyncio\nimport base64\nimport html\nimport io\nimport re\nimport time\nfrom collections import namedtuple\nfrom textwrap import dedent\n\nimport js\n\ntry:\n from pyodide import create_proxy\nexcept ImportError:\n from pyodide.ffi import create_proxy\n\nloop = asyncio.get_event_loop()\n\nMIME_METHODS = {\n \"__repr__\": \"text/plain\",\n \"_repr_html_\": \"text/html\",\n \"_repr_markdown_\": \"text/markdown\",\n \"_repr_svg_\": \"image/svg+xml\",\n \"_repr_png_\": \"image/png\",\n \"_repr_pdf_\": \"application/pdf\",\n \"_repr_jpeg_\": \"image/jpeg\",\n \"_repr_latex\": \"text/latex\",\n \"_repr_json_\": \"application/json\",\n \"_repr_javascript_\": \"application/javascript\",\n \"savefig\": \"image/png\",\n}\n\n\ndef render_image(mime, value, meta):\n # If the image value is using bytes we should convert it to base64\n # otherwise it will return raw bytes and the browser will not be able to\n # render it.\n if isinstance(value, bytes):\n value = base64.b64encode(value).decode(\"utf-8\")\n\n # This is the pattern of base64 strings\n base64_pattern = re.compile(\n r\"^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)?$\"\n )\n # If value doesn't match the base64 pattern we should encode it to base64\n if len(value) > 0 and not base64_pattern.match(value):\n value = base64.b64encode(value.encode(\"utf-8\")).decode(\"utf-8\")\n\n data = f\"data:{mime};charset=utf-8;base64,{value}\"\n attrs = \" \".join(['{k}=\"{v}\"' for k, v in meta.items()])\n return f'<img src=\"{data}\" {attrs}</img>'\n\n\ndef identity(value, meta):\n return value\n\n\nMIME_RENDERERS = {\n \"text/plain\": html.escape,\n \"text/html\": identity,\n \"image/png\": lambda value, meta: render_image(\"image/png\", value, meta),\n \"image/jpeg\": lambda value, meta: render_image(\"image/jpeg\", value, meta),\n \"image/svg+xml\": identity,\n \"application/json\": identity,\n \"application/javascript\": lambda value, meta: f\"<script>{value}</script>\",\n}\n\n\n# these are set by _set_version_info\n__version__ = None\nversion_info = None\n\n\ndef _set_version_info(version_from_runtime: str):\n \"\"\"Sets the __version__ and version_info properties from provided JSON data\n Args:\n version_from_runtime (str): A \"dotted\" representation of the version:\n YYYY.MM.m(m).releaselevel\n Year, Month, and Minor should be integers; releaselevel can be any string\n \"\"\"\n global __version__\n global version_info\n\n __version__ = version_from_runtime\n\n version_parts = version_from_runtime.split(\".\")\n year = int(version_parts[0])\n month = int(version_parts[1])\n minor = int(version_parts[2])\n if len(version_parts) > 3:\n releaselevel = version_parts[3]\n else:\n releaselevel = \"\"\n\n VersionInfo = namedtuple(\"version_info\", (\"year\", \"month\", \"minor\", \"releaselevel\"))\n version_info = VersionInfo(year, month, minor, releaselevel)\n\n # we ALSO set PyScript.__version__ and version_info for backwards\n # compatibility. Should be killed eventually.\n PyScript.__version__ = __version__\n PyScript.version_info = version_info\n\n\nclass HTML:\n \"\"\"\n Wrap a string so that display() can render it as plain HTML\n \"\"\"\n\n def __init__(self, html):\n self._html = html\n\n def _repr_html_(self):\n return self._html\n\n\ndef eval_formatter(obj, print_method):\n \"\"\"\n Evaluates a formatter method.\n \"\"\"\n if print_method == \"__repr__\":\n return repr(obj)\n elif hasattr(obj, print_method):\n if print_method == \"savefig\":\n buf = io.BytesIO()\n obj.savefig(buf, format=\"png\")\n buf.seek(0)\n return base64.b64encode(buf.read()).decode(\"utf-8\")\n return getattr(obj, print_method)()\n elif print_method == \"_repr_mimebundle_\":\n return {}, {}\n return None\n\n\ndef format_mime(obj):\n \"\"\"\n Formats object using _repr_x_ methods.\n \"\"\"\n if isinstance(obj, str):\n return html.escape(obj), \"text/plain\"\n\n mimebundle = eval_formatter(obj, \"_repr_mimebundle_\")\n if isinstance(mimebundle, tuple):\n format_dict, _ = mimebundle\n else:\n format_dict = mimebundle\n\n output, not_available = None, []\n for method, mime_type in reversed(MIME_METHODS.items()):\n if mime_type in format_dict:\n output = format_dict[mime_type]\n else:\n output = eval_formatter(obj, method)\n\n if output is None:\n continue\n elif mime_type not in MIME_RENDERERS:\n not_available.append(mime_type)\n continue\n break\n if output is None:\n if not_available:\n js.console.warn(\n f\"Rendered object requested unavailable MIME renderers: {not_available}\"\n )\n output = repr(output)\n mime_type = \"text/plain\"\n elif isinstance(output, tuple):\n output, meta = output\n else:\n meta = {}\n return MIME_RENDERERS[mime_type](output, meta), mime_type\n\n\n@staticmethod\ndef run_until_complete(f):\n _ = loop.run_until_complete(f)\n\n\n@staticmethod\ndef write(element_id, value, append=False, exec_id=0):\n \"\"\"Writes value to the element with id \"element_id\"\"\"\n Element(element_id).write(value=value, append=append)\n js.console.warn(\n dedent(\n \"\"\"PyScript Deprecation Warning: PyScript.write is\n marked as deprecated and will be removed sometime soon. Please, use\n Element(<id>).write instead.\"\"\"\n )\n )\n\n\ndef set_current_display_target(target_id):\n get_current_display_target._id = target_id\n\n\ndef get_current_display_target():\n return get_current_display_target._id\n\n\nget_current_display_target._id = None\n\n\ndef display(*values, target=None, append=True):\n default_target = get_current_display_target()\n\n if default_target is None and target is None:\n raise Exception(\n \"Implicit target not allowed here. Please use display(..., target=...)\"\n )\n\n if target is not None:\n for v in values:\n Element(target).write(v, append=append)\n else:\n for v in values:\n Element(default_target).write(v, append=append)\n\n\nclass Element:\n def __init__(self, element_id, element=None):\n self._id = element_id\n self._element = element\n\n @property\n def id(self):\n return self._id\n\n @property\n def element(self):\n \"\"\"Return the dom element\"\"\"\n if not self._element:\n self._element = js.document.querySelector(f\"#{self._id}\")\n return self._element\n\n @property\n def value(self):\n return self.element.value\n\n @property\n def innerHtml(self):\n return self.element.innerHTML\n\n def write(self, value, append=False):\n html, mime_type = format_mime(value)\n if html == \"\\n\":\n return\n\n if append:\n child = js.document.createElement(\"div\")\n self.element.appendChild(child)\n\n if self.element.children:\n out_element = self.element.children[-1]\n else:\n out_element = self.element\n\n if mime_type in (\"application/javascript\", \"text/html\"):\n script_element = js.document.createRange().createContextualFragment(html)\n out_element.appendChild(script_element)\n else:\n out_element.innerHTML = html\n\n def clear(self):\n if hasattr(self.element, \"value\"):\n self.element.value = \"\"\n else:\n self.write(\"\", append=False)\n\n def select(self, query, from_content=False):\n el = self.element\n\n if from_content:\n el = el.content\n\n _el = el.querySelector(query)\n if _el:\n return Element(_el.id, _el)\n else:\n js.console.warn(f\"WARNING: can't find element matching query {query}\")\n\n def clone(self, new_id=None, to=None):\n if new_id is None:\n new_id = self.element.id\n\n clone = self.element.cloneNode(True)\n clone.id = new_id\n\n if to:\n to.element.appendChild(clone)\n # Inject it into the DOM\n to.element.after(clone)\n else:\n # Inject it into the DOM\n self.element.after(clone)\n\n return Element(clone.id, clone)\n\n def remove_class(self, classname):\n if isinstance(classname, list):\n for cl in classname:\n self.remove_class(cl)\n else:\n self.element.classList.remove(classname)\n\n def add_class(self, classname):\n if isinstance(classname, list):\n for cl in classname:\n self.element.classList.add(cl)\n else:\n self.element.classList.add(classname)\n\n\ndef add_classes(element, class_list):\n for klass in class_list.split(\" \"):\n element.classList.add(klass)\n\n\ndef create(what, id_=None, classes=\"\"):\n element = js.document.createElement(what)\n if id_:\n element.id = id_\n add_classes(element, classes)\n return Element(id_, element)\n\n\nclass PyWidgetTheme:\n def __init__(self, main_style_classes):\n self.main_style_classes = main_style_classes\n\n def theme_it(self, widget):\n for klass in self.main_style_classes.split(\" \"):\n widget.classList.add(klass)\n\n\nclass PyItemTemplate(Element):\n label_fields = None\n\n def __init__(self, data, labels=None, state_key=None, parent=None):\n self.data = data\n\n self.register_parent(parent)\n\n if not labels:\n labels = list(self.data.keys())\n self.labels = labels\n\n self.state_key = state_key\n\n super().__init__(self._id)\n\n def register_parent(self, parent):\n self._parent = parent\n if parent:\n self._id = f\"{self._parent._id}-c-{len(self._parent._children)}\"\n self.data[\"id\"] = self._id\n else:\n self._id = None\n\n def create(self):\n new_child = create(\"div\", self._id, \"py-li-element\")\n new_child._element.innerHTML = dedent(\n f\"\"\"\n <label id=\"{self._id}\" for=\"flex items-center p-2 \">\n <input class=\"mr-2\" type=\"checkbox\" class=\"task-check\">\n <p>{self.render_content()}</p>\n </label>\n \"\"\"\n )\n return new_child\n\n def on_click(self, evt):\n pass\n\n def pre_append(self):\n pass\n\n def post_append(self):\n self.element.click = self.on_click\n self.element.onclick = self.on_click\n\n self._post_append()\n\n def _post_append(self):\n pass\n\n def strike(self, value, extra=None):\n if value:\n self.add_class(\"line-through\")\n else:\n self.remove_class(\"line-through\")\n\n def render_content(self):\n return \" - \".join([self.data[f] for f in self.labels])\n\n\nclass PyListTemplate:\n theme = PyWidgetTheme(\"py-li-element\")\n item_class = PyItemTemplate\n\n def __init__(self, parent):\n self.parent = parent\n self._children = []\n self._id = self.parent.id\n\n @property\n def children(self):\n return self._children\n\n @property\n def data(self):\n return [c.data for c in self._children]\n\n def render_children(self):\n binds = {}\n for i, c in enumerate(self._children):\n txt = c.element.innerHTML\n rnd = str(time.time()).replace(\".\", \"\")[-5:]\n new_id = f\"{c.element.id}-{i}-{rnd}\"\n binds[new_id] = c.element.id\n txt = txt.replace(\">\", f\" id='{new_id}'>\")\n print(txt)\n\n def foo(evt):\n evtEl = evt.srcElement\n srcEl = Element(binds[evtEl.id])\n srcEl.element.onclick()\n evtEl.classList = srcEl.element.classList\n\n for new_id in binds:\n Element(new_id).element.onclick = foo\n\n def connect(self):\n self.md = main_div = js.document.createElement(\"div\")\n main_div.id = self._id + \"-list-tasks-container\"\n\n if self.theme:\n self.theme.theme_it(main_div)\n\n self.parent.appendChild(main_div)\n\n def add(self, *args, **kws):\n if not isinstance(args[0], self.item_class):\n child = self.item_class(*args, **kws)\n else:\n child = args[0]\n child.register_parent(self)\n return self._add(child)\n\n def _add(self, child_elem):\n self.pre_child_append(child_elem)\n child_elem.pre_append()\n self._children.append(child_elem)\n self.md.appendChild(child_elem.create().element)\n child_elem.post_append()\n self.child_appended(child_elem)\n return child_elem\n\n def pre_child_append(self, child):\n pass\n\n def child_appended(self, child):\n \"\"\"Overwrite me to define logic\"\"\"\n pass\n\n\nclass TopLevelAsyncFinder(ast.NodeVisitor):\n def is_source_top_level_await(self, source):\n self.async_found = False\n node = ast.parse(source)\n self.generic_visit(node)\n return self.async_found\n\n def visit_Await(self, node):\n self.async_found = True\n\n def visit_AsyncFor(self, node):\n self.async_found = True\n\n def visit_AsyncWith(self, node):\n self.async_found = True\n\n def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef):\n pass # Do not visit children of async function defs\n\n\ndef uses_top_level_await(source: str) -> bool:\n return TopLevelAsyncFinder().is_source_top_level_await(source)\n\n\nclass Plugin:\n def __init__(self, name=None):\n if not name:\n name = self.__class__.__name__\n\n self.name = name\n\n def init(self, app):\n self.app = app\n\n def register_custom_element(self, tag):\n # TODO: Ideally would be better to use the logger.\n js.console.info(f\"Defining new custom element {tag}\")\n\n def wrapper(class_):\n # TODO: this is very pyodide specific but will have to do\n # until we have JS interface that works across interpreters\n define_custom_element(tag, create_proxy(class_)) # noqa: F821\n\n return create_proxy(wrapper)\n\n\nclass DeprecatedGlobal:\n \"\"\"\n Proxy for globals which are deprecated.\n\n The intendend usage is as follows:\n\n # in the global namespace\n Element = pyscript.DeprecatedGlobal('Element', pyscript.Element, \"...\")\n console = pyscript.DeprecatedGlobal('console', js.console, \"...\")\n ...\n\n The proxy forwards __getattr__ and __call__ to the underlying object, and\n emit a warning on the first usage.\n\n This way users see a warning only if they actually access the top-level\n name.\n \"\"\"\n\n def __init__(self, name, obj, message):\n self.__name = name\n self.__obj = obj\n self.__message = message\n self.__warning_already_shown = False\n\n def __repr__(self):\n return f\"<DeprecatedGlobal({self.__name!r})>\"\n\n def _show_warning(self, message):\n \"\"\"\n NOTE: this is overridden by unit tests\n \"\"\"\n # this showWarning is implemented in js and injected into this\n # namespace by main.ts\n showWarning(message, \"html\") # noqa: F821\n\n def _show_warning_maybe(self):\n if self.__warning_already_shown:\n return\n self._show_warning(self.__message)\n self.__warning_already_shown = True\n\n def __getattr__(self, attr):\n self._show_warning_maybe()\n return getattr(self.__obj, attr)\n\n def __call__(self, *args, **kwargs):\n self._show_warning_maybe()\n return self.__obj(*args, **kwargs)\n\n def __iter__(self):\n self._show_warning_maybe()\n return iter(self.__obj)\n\n def __getitem__(self, key):\n self._show_warning_maybe()\n return self.__obj[key]\n\n def __setitem__(self, key, value):\n self._show_warning_maybe()\n self.__obj[key] = value\n\n\nclass PyScript:\n \"\"\"\n This class is deprecated since 2022.12.1.\n\n All its old functionalities are available as module-level functions. This\n class should be killed eventually.\n \"\"\"\n\n loop = loop\n\n @staticmethod\n def run_until_complete(f):\n run_until_complete(f)\n\n @staticmethod\n def write(element_id, value, append=False, exec_id=0):\n write(element_id, value, append, exec_id)\n\n\ndef _install_deprecated_globals_2022_12_1(ns):\n \"\"\"\n Install into the given namespace all the globals which have been\n deprecated since the 2022.12.1 release. Eventually they should be killed.\n \"\"\"\n\n def deprecate(name, obj, instead):\n message = f\"Direct usage of <code>{name}</code> is deprecated. \" + instead\n ns[name] = DeprecatedGlobal(name, obj, message)\n\n # function/classes defined in pyscript.py ===> pyscript.XXX\n pyscript_names = [\n \"PyItemTemplate\",\n \"PyListTemplate\",\n \"PyWidgetTheme\",\n \"add_classes\",\n \"create\",\n \"loop\",\n ]\n for name in pyscript_names:\n deprecate(\n name, globals()[name], f\"Please use <code>pyscript.{name}</code> instead.\"\n )\n\n # stdlib modules ===> import XXX\n stdlib_names = [\n \"asyncio\",\n \"base64\",\n \"io\",\n \"sys\",\n \"time\",\n \"datetime\",\n \"pyodide\",\n \"micropip\",\n ]\n for name in stdlib_names:\n obj = __import__(name)\n deprecate(name, obj, f\"Please use <code>import {name}</code> instead.\")\n\n # special case\n deprecate(\n \"dedent\", dedent, \"Please use <code>from textwrap import dedent</code> instead.\"\n )\n\n # these are names that used to leak in the globals but they are just\n # implementation details. People should not use them.\n private_names = [\n \"eval_formatter\",\n \"format_mime\",\n \"identity\",\n \"render_image\",\n \"MIME_RENDERERS\",\n \"MIME_METHODS\",\n ]\n for name in private_names:\n obj = globals()[name]\n message = (\n f\"<code>{name}</code> is deprecated. \"\n \"This is a private implementation detail of pyscript. \"\n \"You should not use it.\"\n )\n ns[name] = DeprecatedGlobal(name, obj, message)\n\n # these names are available as js.XXX\n for name in [\"document\", \"console\"]:\n obj = getattr(js, name)\n deprecate(name, obj, f\"Please use <code>js.{name}</code> instead.\")\n\n # PyScript is special, use a different message\n message = (\n \"The <code>PyScript</code> object is deprecated. \"\n \"Please use <code>pyscript</code> instead.\"\n )\n ns[\"PyScript\"] = DeprecatedGlobal(\"PyScript\", PyScript, message)\n", "path": "pyscriptjs/src/python/pyscript.py" } ]
diff --git a/pyscriptjs/src/python/pyscript.py b/pyscriptjs/src/python/pyscript.py index ec7dd5a8489..5c946c00745 100644 --- a/pyscriptjs/src/python/pyscript.py +++ b/pyscriptjs/src/python/pyscript.py @@ -497,7 +497,6 @@ def __init__(self, name=None): def init(self, app): self.app = app - self.app.plugins.addPythonPlugin(create_proxy(self)) def register_custom_element(self, tag): # TODO: Ideally would be better to use the logger. diff --git a/pyscriptjs/tests/integration/test_plugins.py b/pyscriptjs/tests/integration/test_plugins.py index 04ed6eeb30f..3299ca2d49b 100644 --- a/pyscriptjs/tests/integration/test_plugins.py +++ b/pyscriptjs/tests/integration/test_plugins.py @@ -164,7 +164,7 @@ def test_execution_hooks(self): # EXPECT it to log the correct logs for the events it intercepts log_lines = self.console.log.lines for method in hooks_available: - assert f"{method} called" in log_lines + assert log_lines.count(f"{method} called") == 1 # EXPECT it to NOT be called (hence not log anything) the events that happen # before it's ready, hence is not called
mozilla__pontoon-3003
GetText check fails incorrectly on newline https://pontoon.mozilla.org/en-GB/all-projects/all-resources/?string=286055 If you copy the source string, an extra line is added at the back, and that fails the checks for GetText.
[ { "content": "from django import forms\nfrom django.contrib.postgres.forms import SimpleArrayField\n\nfrom pontoon.base.models import (\n Entity,\n Locale,\n)\n\n\nclass CreateTranslationForm(forms.Form):\n \"\"\"\n Form for parameters to the `entities` view.\n \"\"\"\n\n entity = forms.IntegerField()\n locale = forms.CharField()\n plural_form = forms.CharField()\n\n # Some file formats allow empty original strings and translations.\n # We must allow both here. Validation is handled in pontoon.checks module.\n original = forms.CharField(required=False)\n translation = forms.CharField(required=False)\n\n ignore_warnings = forms.BooleanField(required=False)\n approve = forms.BooleanField(required=False)\n force_suggestions = forms.BooleanField(required=False)\n paths = forms.MultipleChoiceField(required=False)\n machinery_sources = SimpleArrayField(forms.CharField(max_length=30), required=False)\n\n def clean_paths(self):\n try:\n return self.data.getlist(\"paths[]\")\n except AttributeError:\n # If the data source is not a QueryDict, it won't have a `getlist` method.\n return self.data.get(\"paths[]\") or []\n\n def clean_entity(self):\n try:\n return Entity.objects.get(pk=self.cleaned_data[\"entity\"])\n except Entity.DoesNotExist:\n raise forms.ValidationError(f\"Entity `{self.entity}` could not be found\")\n\n def clean_locale(self):\n try:\n return Locale.objects.get(code=self.cleaned_data[\"locale\"])\n except Locale.DoesNotExist:\n raise forms.ValidationError(f\"Locale `{self.entity}` could not be found\")\n\n def clean_plural_form(self):\n if self.cleaned_data[\"plural_form\"] == \"-1\":\n return None\n return self.cleaned_data[\"plural_form\"]\n\n def clean_translation(self):\n return self.data.get(\"translation\", \"\")\n", "path": "pontoon/translations/forms.py" } ]
[ { "content": "from django import forms\nfrom django.contrib.postgres.forms import SimpleArrayField\n\nfrom pontoon.base.models import (\n Entity,\n Locale,\n)\n\n\nclass CreateTranslationForm(forms.Form):\n \"\"\"\n Form for parameters to the `entities` view.\n \"\"\"\n\n entity = forms.IntegerField()\n locale = forms.CharField()\n plural_form = forms.CharField()\n\n # Some file formats allow empty original strings and translations.\n # We must allow both here. Validation is handled in pontoon.checks module.\n original = forms.CharField(required=False)\n translation = forms.CharField(required=False)\n\n ignore_warnings = forms.BooleanField(required=False)\n approve = forms.BooleanField(required=False)\n force_suggestions = forms.BooleanField(required=False)\n paths = forms.MultipleChoiceField(required=False)\n machinery_sources = SimpleArrayField(forms.CharField(max_length=30), required=False)\n\n def clean_paths(self):\n try:\n return self.data.getlist(\"paths[]\")\n except AttributeError:\n # If the data source is not a QueryDict, it won't have a `getlist` method.\n return self.data.get(\"paths[]\") or []\n\n def clean_entity(self):\n try:\n return Entity.objects.get(pk=self.cleaned_data[\"entity\"])\n except Entity.DoesNotExist:\n raise forms.ValidationError(f\"Entity `{self.entity}` could not be found\")\n\n def clean_locale(self):\n try:\n return Locale.objects.get(code=self.cleaned_data[\"locale\"])\n except Locale.DoesNotExist:\n raise forms.ValidationError(f\"Locale `{self.entity}` could not be found\")\n\n def clean_plural_form(self):\n if self.cleaned_data[\"plural_form\"] == \"-1\":\n return None\n return self.cleaned_data[\"plural_form\"]\n\n def clean_original(self):\n return self.data.get(\"original\", \"\")\n\n def clean_translation(self):\n return self.data.get(\"translation\", \"\")\n", "path": "pontoon/translations/forms.py" } ]
diff --git a/pontoon/translations/forms.py b/pontoon/translations/forms.py index bc2371e765..125c8e4122 100644 --- a/pontoon/translations/forms.py +++ b/pontoon/translations/forms.py @@ -51,5 +51,8 @@ def clean_plural_form(self): return None return self.cleaned_data["plural_form"] + def clean_original(self): + return self.data.get("original", "") + def clean_translation(self): return self.data.get("translation", "")
conda__conda-build-862
Metadata parse failure when building apsw pypi package To generate the output below, I added some print output to `conda_build.metadata.yamlize`: ``` python @memoized def yamlize(data): print(72*'*') print(data) try: return yaml.load(data, Loader=BaseLoader) ``` Here is the build failure: ``` bash-3.2$ conda build apsw ************************************************************************ package: name: uninitialized ************************************************************************ package: name: apsw version: "3.9.2-r1" source: fn: apsw-3.9.2-r1.tar.gz url: https://pypi.python.org/packages/source/a/apsw/apsw-3.9.2-r1.tar.gz md5: 8cfdf9fea2904e3cc4c212ab41760fdd requirements: build: - python run: - python about: home: https://github.com/rogerbinns/apsw/ license: OSI Approved :: summary: 'Another Python SQLite Wrapper' Traceback (most recent call last): File "/Users/alx/anaconda/bin/conda-build", line 5, in <module> sys.exit(main()) File "/Users/alx/anaconda/lib/python2.7/site-packages/conda_build/main_build.py", line 208, in main args_func(args, p) File "/Users/alx/anaconda/lib/python2.7/site-packages/conda_build/main_build.py", line 493, in args_func args.func(args, p) File "/Users/alx/anaconda/lib/python2.7/site-packages/conda_build/main_build.py", line 385, in execute m = MetaData(recipe_dir) File "/Users/alx/anaconda/lib/python2.7/site-packages/conda_build/metadata.py", line 347, in __init__ self.parse_again(permit_undefined_jinja=True) File "/Users/alx/anaconda/lib/python2.7/site-packages/conda_build/metadata.py", line 358, in parse_again self.meta = parse(self._get_contents(permit_undefined_jinja)) File "/Users/alx/anaconda/lib/python2.7/site-packages/conda_build/metadata.py", line 146, in parse res = yamlize(data) File "/Users/alx/anaconda/lib/python2.7/site-packages/conda/utils.py", line 118, in __call__ value = self.func(*args, **kw) File "/Users/alx/anaconda/lib/python2.7/site-packages/conda_build/metadata.py", line 101, in yamlize return yaml.load(data, Loader=BaseLoader) File "/Users/alx/anaconda/lib/python2.7/site-packages/yaml/__init__.py", line 71, in load return loader.get_single_data() File "/Users/alx/anaconda/lib/python2.7/site-packages/yaml/constructor.py", line 37, in get_single_data node = self.get_single_node() File "_yaml.pyx", line 707, in _yaml.CParser.get_single_node (ext/_yaml.c:8308) File "_yaml.pyx", line 725, in _yaml.CParser._compose_document (ext/_yaml.c:8581) File "_yaml.pyx", line 776, in _yaml.CParser._compose_node (ext/_yaml.c:9306) File "_yaml.pyx", line 890, in _yaml.CParser._compose_mapping_node (ext/_yaml.c:10838) File "_yaml.pyx", line 776, in _yaml.CParser._compose_node (ext/_yaml.c:9306) File "_yaml.pyx", line 892, in _yaml.CParser._compose_mapping_node (ext/_yaml.c:10868) File "_yaml.pyx", line 905, in _yaml.CParser._parse_next_event (ext/_yaml.c:11045) yaml.scanner.ScannerError: mapping values are not allowed in this context in "<unicode string>", line 27, column 26 bash-3.2$ ```
[ { "content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport re\nimport sys\nfrom os.path import isdir, isfile, join\n\nfrom conda.compat import iteritems, PY3, text_type\nfrom conda.utils import memoized, md5_file\nimport conda.config as cc\nfrom conda.resolve import MatchSpec\nfrom conda.cli.common import specs_from_url\n\nfrom . import exceptions\n\ntry:\n import yaml\n\n # try to import C loader\n try:\n from yaml import CBaseLoader as BaseLoader\n except ImportError:\n from yaml import BaseLoader\nexcept ImportError:\n sys.exit('Error: could not import yaml (required to read meta.yaml '\n 'files of conda recipes)')\n\nfrom conda_build.config import config\nfrom conda_build.utils import comma_join\n\ndef ns_cfg():\n # Remember to update the docs of any of this changes\n plat = cc.subdir\n py = config.CONDA_PY\n np = config.CONDA_NPY\n pl = config.CONDA_PERL\n lua = config.CONDA_LUA\n assert isinstance(py, int), py\n d = dict(\n linux = plat.startswith('linux-'),\n linux32 = bool(plat == 'linux-32'),\n linux64 = bool(plat == 'linux-64'),\n arm = plat.startswith('linux-arm'),\n osx = plat.startswith('osx-'),\n unix = plat.startswith(('linux-', 'osx-')),\n win = plat.startswith('win-'),\n win32 = bool(plat == 'win-32'),\n win64 = bool(plat == 'win-64'),\n pl = pl,\n py = py,\n lua = lua,\n luajit = bool(lua[0] == \"2\"),\n py3k = bool(30 <= py < 40),\n py2k = bool(20 <= py < 30),\n py26 = bool(py == 26),\n py27 = bool(py == 27),\n py33 = bool(py == 33),\n py34 = bool(py == 34),\n py35 = bool(py == 35),\n np = np,\n os = os,\n environ = os.environ,\n )\n for machine in cc.non_x86_linux_machines:\n d[machine] = bool(plat == 'linux-%s' % machine)\n\n d.update(os.environ)\n return d\n\n\nsel_pat = re.compile(r'(.+?)\\s*(#.*)?\\[(.+)\\](?(2).*)$')\ndef select_lines(data, namespace):\n lines = []\n for i, line in enumerate(data.splitlines()):\n line = line.rstrip()\n if line.lstrip().startswith('#'):\n # Don't bother with comment only lines\n continue\n m = sel_pat.match(line)\n if m:\n cond = m.group(3)\n try:\n if eval(cond, namespace, {}):\n lines.append(m.group(1))\n except:\n sys.exit('''\\\nError: Invalid selector in meta.yaml line %d:\n%s\n''' % (i + 1, line))\n sys.exit(1)\n continue\n lines.append(line)\n return '\\n'.join(lines) + '\\n'\n\n\n@memoized\ndef yamlize(data):\n try:\n return yaml.load(data, Loader=BaseLoader)\n except yaml.parser.ParserError as e:\n if '{{' in data:\n try:\n import jinja2\n jinja2 # Avoid pyflakes failure: 'jinja2' imported but unused\n except ImportError:\n raise exceptions.UnableToParseMissingJinja2(original=e)\n raise exceptions.UnableToParse(original=e)\n\n\nallowed_license_families = set(\"\"\"\nAGPL\nApache\nBSD\nGPL2\nGPL3\nLGPL\nMIT\nOther\nPSF\nProprietary\nPublic-Domain\n\"\"\".split())\n\ndef ensure_valid_license_family(meta):\n try:\n license_family = meta['about']['license_family']\n except KeyError:\n return\n if license_family not in allowed_license_families:\n raise RuntimeError(exceptions.indent(\n \"about/license_family '%s' not allowed. Allowed families are %s.\" %\n (license_family, comma_join(sorted(allowed_license_families)))))\n\ndef ensure_valid_fields(meta):\n try:\n pin_depends = meta['build']['pin_depends']\n except KeyError:\n pin_depends = ''\n if pin_depends not in ('', 'record', 'strict'):\n raise RuntimeError(\"build/pin_depends cannot be '%s'\" % pin_depends)\n\ndef parse(data):\n data = select_lines(data, ns_cfg())\n res = yamlize(data)\n # ensure the result is a dict\n if res is None:\n res = {}\n for field in FIELDS:\n if field not in res:\n continue\n if not isinstance(res[field], dict):\n raise RuntimeError(\"The %s field should be a dict, not %s\" %\n (field, res[field].__class__.__name__))\n\n\n\n ensure_valid_fields(res)\n ensure_valid_license_family(res)\n return sanitize(res)\n\n\ntrues = {'y', 'on', 'true', 'yes'}\nfalses = {'n', 'no', 'false', 'off'}\n\ndefault_stucts = {\n 'source/patches': list,\n 'build/entry_points': list,\n 'build/script_env': list,\n 'build/features': list,\n 'build/track_features': list,\n 'requirements/build': list,\n 'requirements/run': list,\n 'requirements/conflicts': list,\n 'test/requires': list,\n 'test/files': list,\n 'test/commands': list,\n 'test/imports': list,\n 'package/version': text_type,\n 'build/string': text_type,\n 'build/pin_depends': text_type,\n 'source/svn_rev': text_type,\n 'source/git_tag': text_type,\n 'source/git_branch': text_type,\n 'source/md5': text_type,\n 'source/git_rev': text_type,\n 'source/path': text_type,\n 'source/git_url': text_type,\n 'build/osx_is_app': bool,\n 'build/preserve_egg_dir': bool,\n 'build/binary_relocation': bool,\n 'build/noarch_python': bool,\n 'build/detect_binary_files_with_prefix': bool,\n 'build/skip': bool,\n 'app/own_environment': bool\n}\n\ndef sanitize(meta):\n \"\"\"\n Sanitize the meta-data to remove aliases/handle deprecation\n\n \"\"\"\n # make a copy to avoid side-effects\n meta = meta.copy()\n sanitize_funs = [('source', _git_clean), ]\n for section, func in sanitize_funs:\n if section in meta:\n meta[section] = func(meta[section])\n return meta\n\n\ndef _git_clean(source_meta):\n \"\"\"\n Reduce the redundancy in git specification by removing git_tag and\n git_branch.\n\n If one is specified, copy to git_rev.\n\n If more than one field is used to specified, exit\n and complain.\n \"\"\"\n\n git_rev_tags_old = ('git_branch', 'git_tag')\n git_rev = 'git_rev'\n\n git_rev_tags = (git_rev,) + git_rev_tags_old\n\n has_rev_tags = tuple(bool(source_meta.get(tag, text_type())) for\n tag in git_rev_tags)\n if sum(has_rev_tags) > 1:\n msg = \"Error: mulitple git_revs:\"\n msg += ', '.join(\"{}\".format(key) for key, has in\n zip(git_rev_tags, has_rev_tags) if has)\n sys.exit(msg)\n\n # make a copy of the input so we have no side-effects\n ret_meta = source_meta.copy()\n # loop over the old versions\n for key, has in zip(git_rev_tags[1:], has_rev_tags[1:]):\n # update if needed\n if has:\n ret_meta[git_rev_tags[0]] = ret_meta[key]\n # and remove\n ret_meta.pop(key, None)\n\n return ret_meta\n\n# If you update this please update the example in\n# conda-docs/docs/source/build.rst\nFIELDS = {\n 'package': ['name', 'version'],\n 'source': ['fn', 'url', 'md5', 'sha1', 'sha256', 'path',\n 'git_url', 'git_tag', 'git_branch', 'git_rev', 'git_depth',\n 'hg_url', 'hg_tag',\n 'svn_url', 'svn_rev', 'svn_ignore_externals',\n 'patches'],\n 'build': ['number', 'string', 'entry_points', 'osx_is_app',\n 'features', 'track_features', 'preserve_egg_dir',\n 'no_link', 'binary_relocation', 'script', 'noarch_python',\n 'has_prefix_files', 'binary_has_prefix_files', 'script_env',\n 'detect_binary_files_with_prefix', 'rpaths',\n 'always_include_files', 'skip', 'msvc_compiler',\n 'pin_depends' # pin_depends is experimental still\n ],\n 'requirements': ['build', 'run', 'conflicts'],\n 'app': ['entry', 'icon', 'summary', 'type', 'cli_opts',\n 'own_environment'],\n 'test': ['requires', 'commands', 'files', 'imports'],\n 'about': ['home', 'dev_url', 'doc_url', 'license_url', # these are URLs\n 'license', 'summary', 'description', 'license_family', # text\n 'license_file', 'readme', # paths in source tree\n ],\n}\n\n\ndef check_bad_chrs(s, field):\n bad_chrs = '=!@#$%^&*:;\"\\'\\\\|<>?/ '\n if field in ('package/version', 'build/string'):\n bad_chrs += '-'\n for c in bad_chrs:\n if c in s:\n sys.exit(\"Error: bad character '%s' in %s: %s\" % (c, field, s))\n\n\ndef handle_config_version(ms, ver):\n \"\"\"\n 'ms' is an instance of MatchSpec, and 'ver' is the version from the\n configuration, e.g. for ms.name == 'python', ver = 26 or None,\n return a (sometimes new) MatchSpec object\n \"\"\"\n if ms.strictness == 3:\n return ms\n\n if ms.strictness == 2:\n if ms.spec.split()[1] == 'x.x':\n if ver is None:\n raise RuntimeError(\"'%s' requires external setting\" % ms.spec)\n # (no return here - proceeds below)\n else: # regular version\n return ms\n\n if ver is None or (ms.strictness == 1 and ms.name == 'numpy'):\n return MatchSpec(ms.name)\n\n ver = text_type(ver)\n if '.' not in ver:\n if ms.name == 'numpy':\n ver = '%s.%s' % (ver[0], ver[1:])\n else:\n ver = '.'.join(ver)\n return MatchSpec('%s %s*' % (ms.name, ver))\n\n\nclass MetaData(object):\n\n def __init__(self, path):\n assert isdir(path)\n self.path = path\n self.meta_path = join(path, 'meta.yaml')\n self.requirements_path = join(path, 'requirements.txt')\n if not isfile(self.meta_path):\n self.meta_path = join(path, 'conda.yaml')\n if not isfile(self.meta_path):\n sys.exit(\"Error: meta.yaml or conda.yaml not found in %s\" % path)\n\n # Start with bare-minimum contents so we can call environ.get_dict() with impunity\n # We'll immediately replace these contents in parse_again()\n self.meta = parse(\"package:\\n\"\n \" name: uninitialized\")\n\n # This is the 'first pass' parse of meta.yaml, so not all variables are defined yet\n # (e.g. GIT_FULL_HASH, etc. are undefined)\n # Therefore, undefined jinja variables are permitted here\n # In the second pass, we'll be more strict. See build.build()\n self.parse_again(permit_undefined_jinja=True)\n\n def parse_again(self, permit_undefined_jinja=False):\n \"\"\"Redo parsing for key-value pairs that are not initialized in the\n first pass.\n\n permit_undefined_jinja: If True, *any* use of undefined jinja variables will\n evaluate to an emtpy string, without emitting an error.\n \"\"\"\n if not self.meta_path:\n return\n self.meta = parse(self._get_contents(permit_undefined_jinja))\n\n if (isfile(self.requirements_path) and\n not self.meta['requirements']['run']):\n self.meta.setdefault('requirements', {})\n run_requirements = specs_from_url(self.requirements_path)\n self.meta['requirements']['run'] = run_requirements\n\n @classmethod\n def fromdict(cls, metadata):\n \"\"\"\n Create a MetaData object from metadata dict directly.\n \"\"\"\n m = super(MetaData, cls).__new__(cls)\n m.path = ''\n m.meta_path = ''\n m.meta = sanitize(metadata)\n return m\n\n def get_section(self, section):\n return self.meta.get(section, {})\n\n def get_value(self, field, default=None, autotype=True):\n \"\"\"\n Get a value from a meta.yaml.\n :param field: Field to return\n :param default: Default object to return if field doesn't exist\n :param autotype: If True, return the default type of field if one exists.\n False will return the default object.\n :return:\n \"\"\"\n section, key = field.split('/')\n\n # get correct default\n if autotype and default is None and field in default_stucts:\n default = default_stucts[field]()\n\n value = self.get_section(section).get(key, default)\n\n # handle yaml 1.1 boolean values\n if isinstance(value, text_type):\n if value.lower() in trues:\n value = True\n elif value.lower() in falses:\n value = False\n\n return value\n\n def check_fields(self):\n for section, submeta in iteritems(self.meta):\n if section == 'extra':\n continue\n if section not in FIELDS:\n sys.exit(\"Error: unknown section: %s\" % section)\n for key in submeta:\n if key not in FIELDS[section]:\n sys.exit(\"Error: in section %r: unknown key %r\" %\n (section, key))\n\n def name(self):\n res = self.get_value('package/name')\n if not res:\n sys.exit('Error: package/name missing in: %r' % self.meta_path)\n res = text_type(res)\n if res != res.lower():\n sys.exit('Error: package/name must be lowercase, got: %r' % res)\n check_bad_chrs(res, 'package/name')\n return res\n\n def version(self):\n res = self.get_value('package/version')\n if res is None:\n sys.exit(\"Error: package/version missing in: %r\" % self.meta_path)\n check_bad_chrs(res, 'package/version')\n return res\n\n def build_number(self):\n return int(self.get_value('build/number', 0))\n\n def ms_depends(self, typ='run'):\n res = []\n name_ver_list = [\n ('python', config.CONDA_PY),\n ('numpy', config.CONDA_NPY),\n ('perl', config.CONDA_PERL),\n ('lua', config.CONDA_LUA),\n ('r', config.CONDA_R),\n ]\n for spec in self.get_value('requirements/' + typ, []):\n try:\n ms = MatchSpec(spec)\n except AssertionError:\n raise RuntimeError(\"Invalid package specification: %r\" % spec)\n if ms.name == self.name():\n raise RuntimeError(\"%s cannot depend on itself\" % self.name())\n for name, ver in name_ver_list:\n if ms.name == name:\n if self.get_value('build/noarch_python'):\n continue\n ms = handle_config_version(ms, ver)\n\n for c in '=!@#$%^&*:;\"\\'\\\\|<>?/':\n if c in ms.name:\n sys.exit(\"Error: bad character '%s' in package name \"\n \"dependency '%s'\" % (c, ms.name))\n parts = spec.split()\n if len(parts) >= 2:\n if parts[1] in {'>', '>=', '=', '==', '!=', '<', '<='}:\n msg = (\"Error: bad character '%s' in package version \"\n \"dependency '%s'\" % (parts[1], ms.name))\n if len(parts) >= 3:\n msg += \"\\nPerhaps you meant '%s %s%s'\" % (ms.name,\n parts[1], parts[2])\n sys.exit(msg)\n res.append(ms)\n return res\n\n def build_id(self):\n ret = self.get_value('build/string')\n if ret:\n check_bad_chrs(ret, 'build/string')\n return ret\n res = []\n version_pat = re.compile(r'(?:==)?(\\d+)\\.(\\d+)')\n for name, s in (('numpy', 'np'), ('python', 'py'),\n ('perl', 'pl'), ('lua', 'lua'), ('r', 'r')):\n for ms in self.ms_depends():\n if ms.name == name:\n try:\n v = ms.spec.split()[1]\n except IndexError:\n if name not in ['numpy']:\n res.append(s)\n break\n if any(i in v for i in ',|>!<'):\n break\n if name not in ['perl', 'r', 'lua']:\n match = version_pat.match(v)\n if match:\n res.append(s + match.group(1) + match.group(2))\n else:\n res.append(s + v.strip('*'))\n break\n\n features = self.get_value('build/features', [])\n if res:\n res.append('_')\n if features:\n res.extend(('_'.join(features), '_'))\n res.append('%d' % self.build_number())\n return ''.join(res)\n\n def dist(self):\n return '%s-%s-%s' % (self.name(), self.version(), self.build_id())\n\n def pkg_fn(self):\n return \"%s.tar.bz2\" % self.dist()\n\n def is_app(self):\n return bool(self.get_value('app/entry'))\n\n def app_meta(self):\n d = {'type': 'app'}\n if self.get_value('app/icon'):\n d['icon'] = '%s.png' % md5_file(join(\n self.path, self.get_value('app/icon')))\n\n for field, key in [('app/entry', 'app_entry'),\n ('app/type', 'app_type'),\n ('app/cli_opts', 'app_cli_opts'),\n ('app/summary', 'summary'),\n ('app/own_environment', 'app_own_environment')]:\n value = self.get_value(field)\n if value:\n d[key] = value\n return d\n\n def info_index(self):\n d = dict(\n name = self.name(),\n version = self.version(),\n build = self.build_id(),\n build_number = self.build_number(),\n platform = cc.platform,\n arch = cc.arch_name,\n subdir = cc.subdir,\n depends = sorted(' '.join(ms.spec.split())\n for ms in self.ms_depends()),\n )\n for key in ('license', 'license_family'):\n value = self.get_value('about/' + key)\n if value:\n d[key] = value\n\n if self.get_value('build/features'):\n d['features'] = ' '.join(self.get_value('build/features'))\n if self.get_value('build/track_features'):\n d['track_features'] = ' '.join(self.get_value('build/track_features'))\n if self.get_value('build/noarch_python'):\n d['platform'] = d['arch'] = None\n d['subdir'] = 'noarch'\n if self.is_app():\n d.update(self.app_meta())\n return d\n\n def has_prefix_files(self):\n ret = self.get_value('build/has_prefix_files', [])\n if not isinstance(ret, list):\n raise RuntimeError('build/has_prefix_files should be a list of paths')\n if sys.platform == 'win32':\n if any('\\\\' in i for i in ret):\n raise RuntimeError(\"build/has_prefix_files paths must use / as the path delimiter on Windows\")\n return ret\n\n def always_include_files(self):\n return self.get_value('build/always_include_files', [])\n\n def binary_has_prefix_files(self):\n ret = self.get_value('build/binary_has_prefix_files', [])\n if not isinstance(ret, list):\n raise RuntimeError('build/binary_has_prefix_files should be a list of paths')\n if sys.platform == 'win32':\n if any('\\\\' in i for i in ret):\n raise RuntimeError(\"build/binary_has_prefix_files paths must use / as the path delimiter on Windows\")\n return ret\n\n def skip(self):\n return self.get_value('build/skip', False)\n\n def _get_contents(self, permit_undefined_jinja):\n '''\n Get the contents of our [meta.yaml|conda.yaml] file.\n If jinja is installed, then the template.render function is called\n before standard conda macro processors.\n\n permit_undefined_jinja: If True, *any* use of undefined jinja variables will\n evaluate to an emtpy string, without emitting an error.\n '''\n try:\n import jinja2\n except ImportError:\n print(\"There was an error importing jinja2.\", file=sys.stderr)\n print(\"Please run `conda install jinja2` to enable jinja template support\", file=sys.stderr)\n with open(self.meta_path) as fd:\n return fd.read()\n\n from conda_build.jinja_context import context_processor\n\n path, filename = os.path.split(self.meta_path)\n loaders = [# search relative to '<conda_root>/Lib/site-packages/conda_build/templates'\n jinja2.PackageLoader('conda_build'),\n # search relative to RECIPE_DIR\n jinja2.FileSystemLoader(path)\n ]\n\n # search relative to current conda environment directory\n conda_env_path = os.environ.get('CONDA_DEFAULT_ENV') # path to current conda environment\n if conda_env_path and os.path.isdir(conda_env_path):\n conda_env_path = os.path.abspath(conda_env_path)\n conda_env_path = conda_env_path.replace('\\\\', '/') # need unix-style path\n env_loader = jinja2.FileSystemLoader(conda_env_path)\n loaders.append(jinja2.PrefixLoader({'$CONDA_DEFAULT_ENV': env_loader}))\n\n undefined_type = jinja2.StrictUndefined\n if permit_undefined_jinja:\n class UndefinedNeverFail(jinja2.Undefined):\n \"\"\"\n A class for Undefined jinja variables.\n This is even less strict than the default jinja2.Undefined class,\n because it permits things like {{ MY_UNDEFINED_VAR[:2] }} and {{ MY_UNDEFINED_VAR|int }}.\n This can mask lots of errors in jinja templates, so it should only be used for a first-pass\n parse, when you plan on running a 'strict' second pass later.\n \"\"\"\n __add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \\\n __truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \\\n __mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \\\n __getitem__ = __lt__ = __le__ = __gt__ = __ge__ = \\\n __complex__ = __pow__ = __rpow__ = \\\n lambda *args, **kwargs: UndefinedNeverFail()\n\n __str__ = __repr__ = \\\n lambda *args, **kwargs: u''\n\n __int__ = lambda _: 0\n __float__ = lambda _: 0.0\n\n def __getattr__(self, k):\n try:\n return object.__getattr__(self, k)\n except AttributeError:\n return UndefinedNeverFail()\n\n def __setattr__(self, k, v):\n pass\n\n undefined_type = UndefinedNeverFail\n\n env = jinja2.Environment(loader=jinja2.ChoiceLoader(loaders), undefined=undefined_type)\n env.globals.update(ns_cfg())\n env.globals.update(context_processor(self, path))\n\n try:\n template = env.get_or_select_template(filename)\n return template.render(environment=env)\n except jinja2.TemplateError as ex:\n sys.exit(\"Error: Failed to render jinja template in {}:\\n{}\".format(self.meta_path, ex.message))\n\n def __unicode__(self):\n '''\n String representation of the MetaData.\n '''\n return text_type(self.__dict__)\n\n def __str__(self):\n if PY3:\n return self.__unicode__()\n else:\n return self.__unicode__().encode('utf-8')\n\n def __repr__(self):\n '''\n String representation of the MetaData.\n '''\n return self.__str__()\n\n\nif __name__ == '__main__':\n from pprint import pprint\n from os.path import expanduser\n\n m = MetaData(expanduser('~/conda-recipes/pycosat'))\n pprint(m.info_index())\n", "path": "conda_build/metadata.py" } ]
[ { "content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport re\nimport sys\nfrom os.path import isdir, isfile, join\n\nfrom conda.compat import iteritems, PY3, text_type\nfrom conda.utils import memoized, md5_file\nimport conda.config as cc\nfrom conda.resolve import MatchSpec\nfrom conda.cli.common import specs_from_url\n\nfrom . import exceptions\n\ntry:\n import yaml\n\n # try to import C loader\n try:\n from yaml import CBaseLoader as BaseLoader\n except ImportError:\n from yaml import BaseLoader\nexcept ImportError:\n sys.exit('Error: could not import yaml (required to read meta.yaml '\n 'files of conda recipes)')\n\nfrom conda_build.config import config\nfrom conda_build.utils import comma_join\n\ndef ns_cfg():\n # Remember to update the docs of any of this changes\n plat = cc.subdir\n py = config.CONDA_PY\n np = config.CONDA_NPY\n pl = config.CONDA_PERL\n lua = config.CONDA_LUA\n assert isinstance(py, int), py\n d = dict(\n linux = plat.startswith('linux-'),\n linux32 = bool(plat == 'linux-32'),\n linux64 = bool(plat == 'linux-64'),\n arm = plat.startswith('linux-arm'),\n osx = plat.startswith('osx-'),\n unix = plat.startswith(('linux-', 'osx-')),\n win = plat.startswith('win-'),\n win32 = bool(plat == 'win-32'),\n win64 = bool(plat == 'win-64'),\n pl = pl,\n py = py,\n lua = lua,\n luajit = bool(lua[0] == \"2\"),\n py3k = bool(30 <= py < 40),\n py2k = bool(20 <= py < 30),\n py26 = bool(py == 26),\n py27 = bool(py == 27),\n py33 = bool(py == 33),\n py34 = bool(py == 34),\n py35 = bool(py == 35),\n np = np,\n os = os,\n environ = os.environ,\n )\n for machine in cc.non_x86_linux_machines:\n d[machine] = bool(plat == 'linux-%s' % machine)\n\n d.update(os.environ)\n return d\n\n\nsel_pat = re.compile(r'(.+?)\\s*(#.*)?\\[(.+)\\](?(2).*)$')\ndef select_lines(data, namespace):\n lines = []\n for i, line in enumerate(data.splitlines()):\n line = line.rstrip()\n if line.lstrip().startswith('#'):\n # Don't bother with comment only lines\n continue\n m = sel_pat.match(line)\n if m:\n cond = m.group(3)\n try:\n if eval(cond, namespace, {}):\n lines.append(m.group(1))\n except:\n sys.exit('''\\\nError: Invalid selector in meta.yaml line %d:\n%s\n''' % (i + 1, line))\n sys.exit(1)\n continue\n lines.append(line)\n return '\\n'.join(lines) + '\\n'\n\n\n@memoized\ndef yamlize(data):\n try:\n return yaml.load(data, Loader=BaseLoader)\n except yaml.error.YAMLError as e:\n if '{{' in data:\n try:\n import jinja2\n jinja2 # Avoid pyflakes failure: 'jinja2' imported but unused\n except ImportError:\n raise exceptions.UnableToParseMissingJinja2(original=e)\n raise exceptions.UnableToParse(original=e)\n\n\nallowed_license_families = set(\"\"\"\nAGPL\nApache\nBSD\nGPL2\nGPL3\nLGPL\nMIT\nOther\nPSF\nProprietary\nPublic-Domain\n\"\"\".split())\n\ndef ensure_valid_license_family(meta):\n try:\n license_family = meta['about']['license_family']\n except KeyError:\n return\n if license_family not in allowed_license_families:\n raise RuntimeError(exceptions.indent(\n \"about/license_family '%s' not allowed. Allowed families are %s.\" %\n (license_family, comma_join(sorted(allowed_license_families)))))\n\ndef ensure_valid_fields(meta):\n try:\n pin_depends = meta['build']['pin_depends']\n except KeyError:\n pin_depends = ''\n if pin_depends not in ('', 'record', 'strict'):\n raise RuntimeError(\"build/pin_depends cannot be '%s'\" % pin_depends)\n\ndef parse(data):\n data = select_lines(data, ns_cfg())\n res = yamlize(data)\n # ensure the result is a dict\n if res is None:\n res = {}\n for field in FIELDS:\n if field not in res:\n continue\n if not isinstance(res[field], dict):\n raise RuntimeError(\"The %s field should be a dict, not %s\" %\n (field, res[field].__class__.__name__))\n\n\n\n ensure_valid_fields(res)\n ensure_valid_license_family(res)\n return sanitize(res)\n\n\ntrues = {'y', 'on', 'true', 'yes'}\nfalses = {'n', 'no', 'false', 'off'}\n\ndefault_stucts = {\n 'source/patches': list,\n 'build/entry_points': list,\n 'build/script_env': list,\n 'build/features': list,\n 'build/track_features': list,\n 'requirements/build': list,\n 'requirements/run': list,\n 'requirements/conflicts': list,\n 'test/requires': list,\n 'test/files': list,\n 'test/commands': list,\n 'test/imports': list,\n 'package/version': text_type,\n 'build/string': text_type,\n 'build/pin_depends': text_type,\n 'source/svn_rev': text_type,\n 'source/git_tag': text_type,\n 'source/git_branch': text_type,\n 'source/md5': text_type,\n 'source/git_rev': text_type,\n 'source/path': text_type,\n 'source/git_url': text_type,\n 'build/osx_is_app': bool,\n 'build/preserve_egg_dir': bool,\n 'build/binary_relocation': bool,\n 'build/noarch_python': bool,\n 'build/detect_binary_files_with_prefix': bool,\n 'build/skip': bool,\n 'app/own_environment': bool\n}\n\ndef sanitize(meta):\n \"\"\"\n Sanitize the meta-data to remove aliases/handle deprecation\n\n \"\"\"\n # make a copy to avoid side-effects\n meta = meta.copy()\n sanitize_funs = [('source', _git_clean), ]\n for section, func in sanitize_funs:\n if section in meta:\n meta[section] = func(meta[section])\n return meta\n\n\ndef _git_clean(source_meta):\n \"\"\"\n Reduce the redundancy in git specification by removing git_tag and\n git_branch.\n\n If one is specified, copy to git_rev.\n\n If more than one field is used to specified, exit\n and complain.\n \"\"\"\n\n git_rev_tags_old = ('git_branch', 'git_tag')\n git_rev = 'git_rev'\n\n git_rev_tags = (git_rev,) + git_rev_tags_old\n\n has_rev_tags = tuple(bool(source_meta.get(tag, text_type())) for\n tag in git_rev_tags)\n if sum(has_rev_tags) > 1:\n msg = \"Error: mulitple git_revs:\"\n msg += ', '.join(\"{}\".format(key) for key, has in\n zip(git_rev_tags, has_rev_tags) if has)\n sys.exit(msg)\n\n # make a copy of the input so we have no side-effects\n ret_meta = source_meta.copy()\n # loop over the old versions\n for key, has in zip(git_rev_tags[1:], has_rev_tags[1:]):\n # update if needed\n if has:\n ret_meta[git_rev_tags[0]] = ret_meta[key]\n # and remove\n ret_meta.pop(key, None)\n\n return ret_meta\n\n# If you update this please update the example in\n# conda-docs/docs/source/build.rst\nFIELDS = {\n 'package': ['name', 'version'],\n 'source': ['fn', 'url', 'md5', 'sha1', 'sha256', 'path',\n 'git_url', 'git_tag', 'git_branch', 'git_rev', 'git_depth',\n 'hg_url', 'hg_tag',\n 'svn_url', 'svn_rev', 'svn_ignore_externals',\n 'patches'],\n 'build': ['number', 'string', 'entry_points', 'osx_is_app',\n 'features', 'track_features', 'preserve_egg_dir',\n 'no_link', 'binary_relocation', 'script', 'noarch_python',\n 'has_prefix_files', 'binary_has_prefix_files', 'script_env',\n 'detect_binary_files_with_prefix', 'rpaths',\n 'always_include_files', 'skip', 'msvc_compiler',\n 'pin_depends' # pin_depends is experimental still\n ],\n 'requirements': ['build', 'run', 'conflicts'],\n 'app': ['entry', 'icon', 'summary', 'type', 'cli_opts',\n 'own_environment'],\n 'test': ['requires', 'commands', 'files', 'imports'],\n 'about': ['home', 'dev_url', 'doc_url', 'license_url', # these are URLs\n 'license', 'summary', 'description', 'license_family', # text\n 'license_file', 'readme', # paths in source tree\n ],\n}\n\n\ndef check_bad_chrs(s, field):\n bad_chrs = '=!@#$%^&*:;\"\\'\\\\|<>?/ '\n if field in ('package/version', 'build/string'):\n bad_chrs += '-'\n for c in bad_chrs:\n if c in s:\n sys.exit(\"Error: bad character '%s' in %s: %s\" % (c, field, s))\n\n\ndef handle_config_version(ms, ver):\n \"\"\"\n 'ms' is an instance of MatchSpec, and 'ver' is the version from the\n configuration, e.g. for ms.name == 'python', ver = 26 or None,\n return a (sometimes new) MatchSpec object\n \"\"\"\n if ms.strictness == 3:\n return ms\n\n if ms.strictness == 2:\n if ms.spec.split()[1] == 'x.x':\n if ver is None:\n raise RuntimeError(\"'%s' requires external setting\" % ms.spec)\n # (no return here - proceeds below)\n else: # regular version\n return ms\n\n if ver is None or (ms.strictness == 1 and ms.name == 'numpy'):\n return MatchSpec(ms.name)\n\n ver = text_type(ver)\n if '.' not in ver:\n if ms.name == 'numpy':\n ver = '%s.%s' % (ver[0], ver[1:])\n else:\n ver = '.'.join(ver)\n return MatchSpec('%s %s*' % (ms.name, ver))\n\n\nclass MetaData(object):\n\n def __init__(self, path):\n assert isdir(path)\n self.path = path\n self.meta_path = join(path, 'meta.yaml')\n self.requirements_path = join(path, 'requirements.txt')\n if not isfile(self.meta_path):\n self.meta_path = join(path, 'conda.yaml')\n if not isfile(self.meta_path):\n sys.exit(\"Error: meta.yaml or conda.yaml not found in %s\" % path)\n\n # Start with bare-minimum contents so we can call environ.get_dict() with impunity\n # We'll immediately replace these contents in parse_again()\n self.meta = parse(\"package:\\n\"\n \" name: uninitialized\")\n\n # This is the 'first pass' parse of meta.yaml, so not all variables are defined yet\n # (e.g. GIT_FULL_HASH, etc. are undefined)\n # Therefore, undefined jinja variables are permitted here\n # In the second pass, we'll be more strict. See build.build()\n self.parse_again(permit_undefined_jinja=True)\n\n def parse_again(self, permit_undefined_jinja=False):\n \"\"\"Redo parsing for key-value pairs that are not initialized in the\n first pass.\n\n permit_undefined_jinja: If True, *any* use of undefined jinja variables will\n evaluate to an emtpy string, without emitting an error.\n \"\"\"\n if not self.meta_path:\n return\n self.meta = parse(self._get_contents(permit_undefined_jinja))\n\n if (isfile(self.requirements_path) and\n not self.meta['requirements']['run']):\n self.meta.setdefault('requirements', {})\n run_requirements = specs_from_url(self.requirements_path)\n self.meta['requirements']['run'] = run_requirements\n\n @classmethod\n def fromdict(cls, metadata):\n \"\"\"\n Create a MetaData object from metadata dict directly.\n \"\"\"\n m = super(MetaData, cls).__new__(cls)\n m.path = ''\n m.meta_path = ''\n m.meta = sanitize(metadata)\n return m\n\n def get_section(self, section):\n return self.meta.get(section, {})\n\n def get_value(self, field, default=None, autotype=True):\n \"\"\"\n Get a value from a meta.yaml.\n :param field: Field to return\n :param default: Default object to return if field doesn't exist\n :param autotype: If True, return the default type of field if one exists.\n False will return the default object.\n :return:\n \"\"\"\n section, key = field.split('/')\n\n # get correct default\n if autotype and default is None and field in default_stucts:\n default = default_stucts[field]()\n\n value = self.get_section(section).get(key, default)\n\n # handle yaml 1.1 boolean values\n if isinstance(value, text_type):\n if value.lower() in trues:\n value = True\n elif value.lower() in falses:\n value = False\n\n return value\n\n def check_fields(self):\n for section, submeta in iteritems(self.meta):\n if section == 'extra':\n continue\n if section not in FIELDS:\n sys.exit(\"Error: unknown section: %s\" % section)\n for key in submeta:\n if key not in FIELDS[section]:\n sys.exit(\"Error: in section %r: unknown key %r\" %\n (section, key))\n\n def name(self):\n res = self.get_value('package/name')\n if not res:\n sys.exit('Error: package/name missing in: %r' % self.meta_path)\n res = text_type(res)\n if res != res.lower():\n sys.exit('Error: package/name must be lowercase, got: %r' % res)\n check_bad_chrs(res, 'package/name')\n return res\n\n def version(self):\n res = self.get_value('package/version')\n if res is None:\n sys.exit(\"Error: package/version missing in: %r\" % self.meta_path)\n check_bad_chrs(res, 'package/version')\n return res\n\n def build_number(self):\n return int(self.get_value('build/number', 0))\n\n def ms_depends(self, typ='run'):\n res = []\n name_ver_list = [\n ('python', config.CONDA_PY),\n ('numpy', config.CONDA_NPY),\n ('perl', config.CONDA_PERL),\n ('lua', config.CONDA_LUA),\n ('r', config.CONDA_R),\n ]\n for spec in self.get_value('requirements/' + typ, []):\n try:\n ms = MatchSpec(spec)\n except AssertionError:\n raise RuntimeError(\"Invalid package specification: %r\" % spec)\n if ms.name == self.name():\n raise RuntimeError(\"%s cannot depend on itself\" % self.name())\n for name, ver in name_ver_list:\n if ms.name == name:\n if self.get_value('build/noarch_python'):\n continue\n ms = handle_config_version(ms, ver)\n\n for c in '=!@#$%^&*:;\"\\'\\\\|<>?/':\n if c in ms.name:\n sys.exit(\"Error: bad character '%s' in package name \"\n \"dependency '%s'\" % (c, ms.name))\n parts = spec.split()\n if len(parts) >= 2:\n if parts[1] in {'>', '>=', '=', '==', '!=', '<', '<='}:\n msg = (\"Error: bad character '%s' in package version \"\n \"dependency '%s'\" % (parts[1], ms.name))\n if len(parts) >= 3:\n msg += \"\\nPerhaps you meant '%s %s%s'\" % (ms.name,\n parts[1], parts[2])\n sys.exit(msg)\n res.append(ms)\n return res\n\n def build_id(self):\n ret = self.get_value('build/string')\n if ret:\n check_bad_chrs(ret, 'build/string')\n return ret\n res = []\n version_pat = re.compile(r'(?:==)?(\\d+)\\.(\\d+)')\n for name, s in (('numpy', 'np'), ('python', 'py'),\n ('perl', 'pl'), ('lua', 'lua'), ('r', 'r')):\n for ms in self.ms_depends():\n if ms.name == name:\n try:\n v = ms.spec.split()[1]\n except IndexError:\n if name not in ['numpy']:\n res.append(s)\n break\n if any(i in v for i in ',|>!<'):\n break\n if name not in ['perl', 'r', 'lua']:\n match = version_pat.match(v)\n if match:\n res.append(s + match.group(1) + match.group(2))\n else:\n res.append(s + v.strip('*'))\n break\n\n features = self.get_value('build/features', [])\n if res:\n res.append('_')\n if features:\n res.extend(('_'.join(features), '_'))\n res.append('%d' % self.build_number())\n return ''.join(res)\n\n def dist(self):\n return '%s-%s-%s' % (self.name(), self.version(), self.build_id())\n\n def pkg_fn(self):\n return \"%s.tar.bz2\" % self.dist()\n\n def is_app(self):\n return bool(self.get_value('app/entry'))\n\n def app_meta(self):\n d = {'type': 'app'}\n if self.get_value('app/icon'):\n d['icon'] = '%s.png' % md5_file(join(\n self.path, self.get_value('app/icon')))\n\n for field, key in [('app/entry', 'app_entry'),\n ('app/type', 'app_type'),\n ('app/cli_opts', 'app_cli_opts'),\n ('app/summary', 'summary'),\n ('app/own_environment', 'app_own_environment')]:\n value = self.get_value(field)\n if value:\n d[key] = value\n return d\n\n def info_index(self):\n d = dict(\n name = self.name(),\n version = self.version(),\n build = self.build_id(),\n build_number = self.build_number(),\n platform = cc.platform,\n arch = cc.arch_name,\n subdir = cc.subdir,\n depends = sorted(' '.join(ms.spec.split())\n for ms in self.ms_depends()),\n )\n for key in ('license', 'license_family'):\n value = self.get_value('about/' + key)\n if value:\n d[key] = value\n\n if self.get_value('build/features'):\n d['features'] = ' '.join(self.get_value('build/features'))\n if self.get_value('build/track_features'):\n d['track_features'] = ' '.join(self.get_value('build/track_features'))\n if self.get_value('build/noarch_python'):\n d['platform'] = d['arch'] = None\n d['subdir'] = 'noarch'\n if self.is_app():\n d.update(self.app_meta())\n return d\n\n def has_prefix_files(self):\n ret = self.get_value('build/has_prefix_files', [])\n if not isinstance(ret, list):\n raise RuntimeError('build/has_prefix_files should be a list of paths')\n if sys.platform == 'win32':\n if any('\\\\' in i for i in ret):\n raise RuntimeError(\"build/has_prefix_files paths must use / as the path delimiter on Windows\")\n return ret\n\n def always_include_files(self):\n return self.get_value('build/always_include_files', [])\n\n def binary_has_prefix_files(self):\n ret = self.get_value('build/binary_has_prefix_files', [])\n if not isinstance(ret, list):\n raise RuntimeError('build/binary_has_prefix_files should be a list of paths')\n if sys.platform == 'win32':\n if any('\\\\' in i for i in ret):\n raise RuntimeError(\"build/binary_has_prefix_files paths must use / as the path delimiter on Windows\")\n return ret\n\n def skip(self):\n return self.get_value('build/skip', False)\n\n def _get_contents(self, permit_undefined_jinja):\n '''\n Get the contents of our [meta.yaml|conda.yaml] file.\n If jinja is installed, then the template.render function is called\n before standard conda macro processors.\n\n permit_undefined_jinja: If True, *any* use of undefined jinja variables will\n evaluate to an emtpy string, without emitting an error.\n '''\n try:\n import jinja2\n except ImportError:\n print(\"There was an error importing jinja2.\", file=sys.stderr)\n print(\"Please run `conda install jinja2` to enable jinja template support\", file=sys.stderr)\n with open(self.meta_path) as fd:\n return fd.read()\n\n from conda_build.jinja_context import context_processor\n\n path, filename = os.path.split(self.meta_path)\n loaders = [# search relative to '<conda_root>/Lib/site-packages/conda_build/templates'\n jinja2.PackageLoader('conda_build'),\n # search relative to RECIPE_DIR\n jinja2.FileSystemLoader(path)\n ]\n\n # search relative to current conda environment directory\n conda_env_path = os.environ.get('CONDA_DEFAULT_ENV') # path to current conda environment\n if conda_env_path and os.path.isdir(conda_env_path):\n conda_env_path = os.path.abspath(conda_env_path)\n conda_env_path = conda_env_path.replace('\\\\', '/') # need unix-style path\n env_loader = jinja2.FileSystemLoader(conda_env_path)\n loaders.append(jinja2.PrefixLoader({'$CONDA_DEFAULT_ENV': env_loader}))\n\n undefined_type = jinja2.StrictUndefined\n if permit_undefined_jinja:\n class UndefinedNeverFail(jinja2.Undefined):\n \"\"\"\n A class for Undefined jinja variables.\n This is even less strict than the default jinja2.Undefined class,\n because it permits things like {{ MY_UNDEFINED_VAR[:2] }} and {{ MY_UNDEFINED_VAR|int }}.\n This can mask lots of errors in jinja templates, so it should only be used for a first-pass\n parse, when you plan on running a 'strict' second pass later.\n \"\"\"\n __add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \\\n __truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \\\n __mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \\\n __getitem__ = __lt__ = __le__ = __gt__ = __ge__ = \\\n __complex__ = __pow__ = __rpow__ = \\\n lambda *args, **kwargs: UndefinedNeverFail()\n\n __str__ = __repr__ = \\\n lambda *args, **kwargs: u''\n\n __int__ = lambda _: 0\n __float__ = lambda _: 0.0\n\n def __getattr__(self, k):\n try:\n return object.__getattr__(self, k)\n except AttributeError:\n return UndefinedNeverFail()\n\n def __setattr__(self, k, v):\n pass\n\n undefined_type = UndefinedNeverFail\n\n env = jinja2.Environment(loader=jinja2.ChoiceLoader(loaders), undefined=undefined_type)\n env.globals.update(ns_cfg())\n env.globals.update(context_processor(self, path))\n\n try:\n template = env.get_or_select_template(filename)\n return template.render(environment=env)\n except jinja2.TemplateError as ex:\n sys.exit(\"Error: Failed to render jinja template in {}:\\n{}\".format(self.meta_path, ex.message))\n\n def __unicode__(self):\n '''\n String representation of the MetaData.\n '''\n return text_type(self.__dict__)\n\n def __str__(self):\n if PY3:\n return self.__unicode__()\n else:\n return self.__unicode__().encode('utf-8')\n\n def __repr__(self):\n '''\n String representation of the MetaData.\n '''\n return self.__str__()\n\n\nif __name__ == '__main__':\n from pprint import pprint\n from os.path import expanduser\n\n m = MetaData(expanduser('~/conda-recipes/pycosat'))\n pprint(m.info_index())\n", "path": "conda_build/metadata.py" } ]
diff --git a/conda_build/metadata.py b/conda_build/metadata.py index cd8329a021..d737e6d6c4 100644 --- a/conda_build/metadata.py +++ b/conda_build/metadata.py @@ -97,7 +97,7 @@ def select_lines(data, namespace): def yamlize(data): try: return yaml.load(data, Loader=BaseLoader) - except yaml.parser.ParserError as e: + except yaml.error.YAMLError as e: if '{{' in data: try: import jinja2
Nitrate__Nitrate-337
Upgrade django-tinymce to 2.7.0 As per subject.
[ { "content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'PyMySQL == 0.7.11',\n 'beautifulsoup4 >= 4.1.1',\n 'celery == 4.1.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.6.0',\n 'django-uuslug == 1.1.8',\n 'django >= 1.10,<2.0',\n 'html2text',\n 'kobo == 0.7.0',\n 'odfpy >= 0.9.6',\n 'six',\n 'xmltodict',\n]\n\nextras_require = {\n # Required for tcms.core.contrib.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n ]\n}\n\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 1.10',\n 'Framework :: Django :: 1.11',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'PyMySQL == 0.7.11',\n 'beautifulsoup4 >= 4.1.1',\n 'celery == 4.1.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'django >= 1.10,<2.0',\n 'html2text',\n 'kobo == 0.7.0',\n 'odfpy >= 0.9.6',\n 'six',\n 'xmltodict',\n]\n\nextras_require = {\n # Required for tcms.core.contrib.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n ]\n}\n\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 1.10',\n 'Framework :: Django :: 1.11',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n)\n", "path": "setup.py" } ]
diff --git a/requirements.txt b/requirements.txt index 631796a5..ae527117 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,63 +1,67 @@ -# Frozen on May 26, 2018 -alabaster==0.7.10 -amqp==2.2.2 -atomicwrites==1.1.5 -attrs==18.1.0 -Babel==2.5.3 -beautifulsoup4==4.6.0 -billiard==3.5.0.3 +alabaster==0.7.12 +amqp==2.3.2 +atomicwrites==1.2.1 +attrs==18.2.0 +Babel==2.6.0 +beautifulsoup4==4.6.3 +billiard==3.5.0.4 celery==4.1.0 -certifi==2018.4.16 +certifi==2018.10.15 chardet==3.0.4 -coverage==4.5.1 -Django==1.11.13 +coverage==4.5.2 +Django==1.11.16 django-contrib-comments==1.8.0 django-debug-toolbar==1.7 -django-extensions==2.0.7 -django-tinymce==2.6.0 +django-extensions==2.1.3 +django-tinymce==2.7.0 django-uuslug==1.1.8 docutils==0.14 factory-boy==2.11.1 -Faker==0.8.15 -flake8==3.5.0 +Faker==1.0.0 +filelock==3.0.10 +flake8==3.6.0 +future-breakpoint==1.0.1 html2text==2018.1.9 -idna==2.6 -imagesize==1.0.0 +idna==2.7 +imagesize==1.1.0 Jinja2==2.10 +kerberos==1.2.5 kobo==0.7.0 -kombu==4.2.0 -MarkupSafe==1.0 +kombu==4.2.1 +MarkupSafe==1.1.0 mccabe==0.6.1 mock==2.0.0 -more-itertools==4.2.0 +more-itertools==4.3.0 +nitrate==4.1 odfpy==1.3.6 -packaging==17.1 -pbr==4.0.3 -pluggy==0.6.0 -py==1.5.3 -pycodestyle==2.3.1 -pyflakes==1.6.0 +packaging==18.0 +pbr==5.1.1 +pluggy==0.8.0 +py==1.7.0 +pycodestyle==2.4.0 +pyflakes==2.0.0 Pygments==2.2.0 -pygraphviz==1.3.1 +pygraphviz==1.5 PyMySQL==0.7.11 -pyparsing==2.2.0 -pytest==3.6.0 -pytest-cov==2.5.1 -pytest-django==3.2.1 -python-dateutil==2.7.3 -python-slugify==1.2.5 -pytz==2018.4 -requests==2.18.4 +pyparsing==2.3.0 +pytest==4.0.0 +pytest-cov==2.6.0 +pytest-django==3.4.4 +python-dateutil==2.7.5 +python-slugify==1.2.6 +pytz==2018.7 +requests==2.20.1 six==1.11.0 snowballstemmer==1.2.1 -Sphinx==1.7.4 -sphinx-rtd-theme==0.3.1 -sphinxcontrib-websupport==1.0.1 +Sphinx==1.8.2 +sphinx-rtd-theme==0.4.2 +sphinxcontrib-websupport==1.1.0 sqlparse==0.2.4 text-unidecode==1.2 -tox==3.0.0 +toml==0.10.0 +tox==3.5.3 Unidecode==1.0.22 -urllib3==1.22 +urllib3==1.24.1 vine==1.1.4 -virtualenv==16.0.0 +virtualenv==16.1.0 xmltodict==0.11.0 diff --git a/setup.py b/setup.py index a3d1ed2c..59252f48 100644 --- a/setup.py +++ b/setup.py @@ -17,7 +17,7 @@ def get_long_description(): 'beautifulsoup4 >= 4.1.1', 'celery == 4.1.0', 'django-contrib-comments == 1.8.0', - 'django-tinymce == 2.6.0', + 'django-tinymce == 2.7.0', 'django-uuslug == 1.1.8', 'django >= 1.10,<2.0', 'html2text',
qtile__qtile-1604
libqtile utils.py:safe_import():L192 Unmet dependencies for optional Widget: '.widget.launchbar.LaunchBar', No module named 'xdg.IconTheme' It seems there's confusion about the `xdg` dependency that is used. The code expects [PyXDG](https://freedesktop.org/wiki/Software/pyxdg/) while the actual installed version is [xdg](https://pypi.org/project/xdg/). The latter does not have an `IconTheme` submodule, explaining the message. The distribution name for `pyxdg` is `pyxdg` (not `xdg`). https://github.com/qtile/qtile/blob/0d8b6e5de1cacb9827c4b30ce7ed8da4bb686f26/libqtile/widget/launchbar.py#L49
[ { "content": "# Copyright (c) 2014 Tycho Andersen\n# Copyright (c) 2014 dequis\n# Copyright (c) 2014-2015 Joseph Razik\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2015 reus\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\nThis module define a widget that displays icons to launch softwares or commands\nwhen clicked -- a launchbar.\nOnly png icon files are displayed, not xpm because cairo doesn't support\nloading of xpm file.\nThe order of displaying (from left to right) is in the order of the list.\n\nIf no icon was found for the name provided and if default_icon is set to None\nthen the name is printed instead. If default_icon is defined then this icon is\ndisplayed instead.\n\nTo execute a software:\n - ('thunderbird', 'thunderbird -safe-mode', 'launch thunderbird in safe mode')\nTo execute a python command in qtile, begin with by 'qshell:'\n - ('logout', 'qshell:self.qtile.cmd_shutdown()', 'logout from qtile')\n\n\n\"\"\"\nfrom libqtile import bar\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\nimport os.path\nimport cairocffi\nfrom xdg.IconTheme import getIconPath\n\n\nclass LaunchBar(base._Widget):\n \"\"\"A widget that display icons to launch the associated command\n\n Parameters\n ==========\n progs :\n a list of tuples ``(software_name, command_to_execute, comment)``, for\n example::\n\n ('thunderbird', 'thunderbird -safe-mode', 'launch thunderbird in safe mode')\n ('logout', 'qshell:self.qtile.cmd_shutdown()', 'logout from qtile')\n \"\"\"\n orientations = base.ORIENTATION_HORIZONTAL\n defaults = [\n ('padding', 2, 'Padding between icons'),\n ('default_icon', '/usr/share/icons/oxygen/256x256/mimetypes/'\n 'application-x-executable.png', 'Default icon not found'),\n ]\n\n def __init__(self, progs=None, width=bar.CALCULATED, **config):\n base._Widget.__init__(self, width, **config)\n if progs is None:\n progs = []\n self.add_defaults(LaunchBar.defaults)\n self.surfaces = {}\n self.icons_files = {}\n self.icons_widths = {}\n self.icons_offsets = {}\n # For now, ignore the comments but may be one day it will be useful\n self.progs = dict(enumerate([{'name': prog[0], 'cmd': prog[1],\n 'comment': prog[2] if len(prog) > 2 else\n None} for prog in progs]))\n self.progs_name = set([prog['name'] for prog in self.progs.values()])\n self.length_type = bar.STATIC\n self.length = 0\n\n def _configure(self, qtile, pbar):\n base._Widget._configure(self, qtile, pbar)\n self.lookup_icons()\n self.setup_images()\n self.length = self.calculate_length()\n\n def setup_images(self):\n \"\"\" Create image structures for each icon files. \"\"\"\n for img_name, iconfile in self.icons_files.items():\n if iconfile is None:\n logger.warning(\n 'No icon found for application \"%s\" (%s) switch to text mode',\n img_name, iconfile)\n # if no icon is found and no default icon was set, we just\n # print the name, based on a textbox.\n textbox = base._TextBox()\n textbox._configure(self.qtile, self.bar)\n textbox.layout = self.drawer.textlayout(\n textbox.text,\n textbox.foreground,\n textbox.font,\n textbox.fontsize,\n textbox.fontshadow,\n markup=textbox.markup,\n )\n # the name will be displayed\n textbox.text = img_name\n textbox.calculate_length()\n self.icons_widths[img_name] = textbox.width\n self.surfaces[img_name] = textbox\n continue\n else:\n try:\n img = cairocffi.ImageSurface.create_from_png(iconfile)\n except cairocffi.Error:\n logger.exception('Error loading icon for application \"%s\" (%s)', img_name, iconfile)\n return\n\n input_width = img.get_width()\n input_height = img.get_height()\n\n sp = input_height / (self.bar.height - 4)\n width = int(input_width / sp)\n\n imgpat = cairocffi.SurfacePattern(img)\n scaler = cairocffi.Matrix()\n scaler.scale(sp, sp)\n scaler.translate(self.padding * -1, -2)\n imgpat.set_matrix(scaler)\n\n imgpat.set_filter(cairocffi.FILTER_BEST)\n self.surfaces[img_name] = imgpat\n self.icons_widths[img_name] = width\n\n def _lookup_icon(self, name):\n \"\"\" Search for the icon corresponding to one command. \"\"\"\n self.icons_files[name] = None\n # if the software_name is directly an absolute path icon file\n if os.path.isabs(name):\n # name start with '/' thus it's an absolute path\n root, ext = os.path.splitext(name)\n if ext == '.png':\n self.icons_files[name] = name if os.path.isfile(name) else None\n else:\n # try to add the extension\n self.icons_files[name] = name + '.png' if os.path.isfile(name + '.png') else None\n else:\n self.icons_files[name] = getIconPath(name)\n # no search method found an icon, so default icon\n if self.icons_files[name] is None:\n self.icons_files[name] = self.default_icon\n\n def lookup_icons(self):\n \"\"\" Search for the icons corresponding to the commands to execute. \"\"\"\n if self.default_icon is not None:\n if not os.path.isfile(self.default_icon):\n # if the default icon provided is not found, switch to\n # text mode\n self.default_icon = None\n for name in self.progs_name:\n self._lookup_icon(name)\n\n def get_icon_in_position(self, x, y):\n \"\"\" Determine which icon is clicked according to its position. \"\"\"\n for i in self.progs:\n if x < (self.icons_offsets[i] +\n self.icons_widths[self.progs[i]['name']] +\n self.padding / 2):\n return i\n\n def button_press(self, x, y, button):\n \"\"\" Launch the associated command to the clicked icon. \"\"\"\n if button == 1:\n icon = self.get_icon_in_position(x, y)\n if icon is not None:\n cmd = self.progs[icon]['cmd']\n if cmd.startswith('qshell:'):\n exec(cmd[7:].lstrip())\n else:\n self.qtile.cmd_spawn(cmd)\n self.draw()\n\n def draw(self):\n \"\"\" Draw the icons in the widget. \"\"\"\n self.drawer.clear(self.background or self.bar.background)\n xoffset = 0\n for i in sorted(self.progs.keys()):\n self.icons_offsets[i] = xoffset + self.padding\n name = self.progs[i]['name']\n icon_width = self.icons_widths[name]\n self.drawer.ctx.move_to(self.offset + xoffset, icon_width)\n self.drawer.clear(self.background or self.bar.background)\n if isinstance(self.surfaces[name], base._TextBox):\n # display the name if no icon was found and no default icon\n textbox = self.surfaces[name]\n textbox.layout.draw(\n self.padding + textbox.actual_padding,\n int((self.bar.height - textbox.layout.height) / 2.0) + 1\n )\n else:\n # display an icon\n self.drawer.ctx.set_source(self.surfaces[name])\n self.drawer.ctx.paint()\n self.drawer.draw(offsetx=self.offset + xoffset,\n width=icon_width + self.padding)\n xoffset += icon_width + self.padding\n\n def calculate_length(self):\n \"\"\" Compute the width of the widget according to each icon width. \"\"\"\n return sum(self.icons_widths[prg['name']] for prg in self.progs.values()) \\\n + self.padding * (len(self.progs) + 1)\n", "path": "libqtile/widget/launchbar.py" } ]
[ { "content": "# Copyright (c) 2014 Tycho Andersen\n# Copyright (c) 2014 dequis\n# Copyright (c) 2014-2015 Joseph Razik\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2015 reus\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\nThis module define a widget that displays icons to launch softwares or commands\nwhen clicked -- a launchbar.\nOnly png icon files are displayed, not xpm because cairo doesn't support\nloading of xpm file.\nThe order of displaying (from left to right) is in the order of the list.\n\nIf no icon was found for the name provided and if default_icon is set to None\nthen the name is printed instead. If default_icon is defined then this icon is\ndisplayed instead.\n\nTo execute a software:\n - ('thunderbird', 'thunderbird -safe-mode', 'launch thunderbird in safe mode')\nTo execute a python command in qtile, begin with by 'qshell:'\n - ('logout', 'qshell:self.qtile.cmd_shutdown()', 'logout from qtile')\n\n\n\"\"\"\nfrom libqtile import bar\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\nimport os.path\nimport cairocffi\nfrom xdg.IconTheme import getIconPath\n\n\nclass LaunchBar(base._Widget):\n \"\"\"A widget that display icons to launch the associated command\n\n Widget requirements: pyxdg_.\n\n .. _pyxdg: https://freedesktop.org/wiki/Software/pyxdg/\n\n Parameters\n ==========\n progs :\n a list of tuples ``(software_name, command_to_execute, comment)``, for\n example::\n\n ('thunderbird', 'thunderbird -safe-mode', 'launch thunderbird in safe mode')\n ('logout', 'qshell:self.qtile.cmd_shutdown()', 'logout from qtile')\n \"\"\"\n orientations = base.ORIENTATION_HORIZONTAL\n defaults = [\n ('padding', 2, 'Padding between icons'),\n ('default_icon', '/usr/share/icons/oxygen/256x256/mimetypes/'\n 'application-x-executable.png', 'Default icon not found'),\n ]\n\n def __init__(self, progs=None, width=bar.CALCULATED, **config):\n base._Widget.__init__(self, width, **config)\n if progs is None:\n progs = []\n self.add_defaults(LaunchBar.defaults)\n self.surfaces = {}\n self.icons_files = {}\n self.icons_widths = {}\n self.icons_offsets = {}\n # For now, ignore the comments but may be one day it will be useful\n self.progs = dict(enumerate([{'name': prog[0], 'cmd': prog[1],\n 'comment': prog[2] if len(prog) > 2 else\n None} for prog in progs]))\n self.progs_name = set([prog['name'] for prog in self.progs.values()])\n self.length_type = bar.STATIC\n self.length = 0\n\n def _configure(self, qtile, pbar):\n base._Widget._configure(self, qtile, pbar)\n self.lookup_icons()\n self.setup_images()\n self.length = self.calculate_length()\n\n def setup_images(self):\n \"\"\" Create image structures for each icon files. \"\"\"\n for img_name, iconfile in self.icons_files.items():\n if iconfile is None:\n logger.warning(\n 'No icon found for application \"%s\" (%s) switch to text mode',\n img_name, iconfile)\n # if no icon is found and no default icon was set, we just\n # print the name, based on a textbox.\n textbox = base._TextBox()\n textbox._configure(self.qtile, self.bar)\n textbox.layout = self.drawer.textlayout(\n textbox.text,\n textbox.foreground,\n textbox.font,\n textbox.fontsize,\n textbox.fontshadow,\n markup=textbox.markup,\n )\n # the name will be displayed\n textbox.text = img_name\n textbox.calculate_length()\n self.icons_widths[img_name] = textbox.width\n self.surfaces[img_name] = textbox\n continue\n else:\n try:\n img = cairocffi.ImageSurface.create_from_png(iconfile)\n except cairocffi.Error:\n logger.exception('Error loading icon for application \"%s\" (%s)', img_name, iconfile)\n return\n\n input_width = img.get_width()\n input_height = img.get_height()\n\n sp = input_height / (self.bar.height - 4)\n width = int(input_width / sp)\n\n imgpat = cairocffi.SurfacePattern(img)\n scaler = cairocffi.Matrix()\n scaler.scale(sp, sp)\n scaler.translate(self.padding * -1, -2)\n imgpat.set_matrix(scaler)\n\n imgpat.set_filter(cairocffi.FILTER_BEST)\n self.surfaces[img_name] = imgpat\n self.icons_widths[img_name] = width\n\n def _lookup_icon(self, name):\n \"\"\" Search for the icon corresponding to one command. \"\"\"\n self.icons_files[name] = None\n # if the software_name is directly an absolute path icon file\n if os.path.isabs(name):\n # name start with '/' thus it's an absolute path\n root, ext = os.path.splitext(name)\n if ext == '.png':\n self.icons_files[name] = name if os.path.isfile(name) else None\n else:\n # try to add the extension\n self.icons_files[name] = name + '.png' if os.path.isfile(name + '.png') else None\n else:\n self.icons_files[name] = getIconPath(name)\n # no search method found an icon, so default icon\n if self.icons_files[name] is None:\n self.icons_files[name] = self.default_icon\n\n def lookup_icons(self):\n \"\"\" Search for the icons corresponding to the commands to execute. \"\"\"\n if self.default_icon is not None:\n if not os.path.isfile(self.default_icon):\n # if the default icon provided is not found, switch to\n # text mode\n self.default_icon = None\n for name in self.progs_name:\n self._lookup_icon(name)\n\n def get_icon_in_position(self, x, y):\n \"\"\" Determine which icon is clicked according to its position. \"\"\"\n for i in self.progs:\n if x < (self.icons_offsets[i] +\n self.icons_widths[self.progs[i]['name']] +\n self.padding / 2):\n return i\n\n def button_press(self, x, y, button):\n \"\"\" Launch the associated command to the clicked icon. \"\"\"\n if button == 1:\n icon = self.get_icon_in_position(x, y)\n if icon is not None:\n cmd = self.progs[icon]['cmd']\n if cmd.startswith('qshell:'):\n exec(cmd[7:].lstrip())\n else:\n self.qtile.cmd_spawn(cmd)\n self.draw()\n\n def draw(self):\n \"\"\" Draw the icons in the widget. \"\"\"\n self.drawer.clear(self.background or self.bar.background)\n xoffset = 0\n for i in sorted(self.progs.keys()):\n self.icons_offsets[i] = xoffset + self.padding\n name = self.progs[i]['name']\n icon_width = self.icons_widths[name]\n self.drawer.ctx.move_to(self.offset + xoffset, icon_width)\n self.drawer.clear(self.background or self.bar.background)\n if isinstance(self.surfaces[name], base._TextBox):\n # display the name if no icon was found and no default icon\n textbox = self.surfaces[name]\n textbox.layout.draw(\n self.padding + textbox.actual_padding,\n int((self.bar.height - textbox.layout.height) / 2.0) + 1\n )\n else:\n # display an icon\n self.drawer.ctx.set_source(self.surfaces[name])\n self.drawer.ctx.paint()\n self.drawer.draw(offsetx=self.offset + xoffset,\n width=icon_width + self.padding)\n xoffset += icon_width + self.padding\n\n def calculate_length(self):\n \"\"\" Compute the width of the widget according to each icon width. \"\"\"\n return sum(self.icons_widths[prg['name']] for prg in self.progs.values()) \\\n + self.padding * (len(self.progs) + 1)\n", "path": "libqtile/widget/launchbar.py" } ]
diff --git a/libqtile/widget/launchbar.py b/libqtile/widget/launchbar.py index 838126a58f..cda36dc686 100644 --- a/libqtile/widget/launchbar.py +++ b/libqtile/widget/launchbar.py @@ -52,6 +52,10 @@ class LaunchBar(base._Widget): """A widget that display icons to launch the associated command + Widget requirements: pyxdg_. + + .. _pyxdg: https://freedesktop.org/wiki/Software/pyxdg/ + Parameters ========== progs :
cython__cython-6246
[BUG] Limited API: cythonize() method drops py_limited_api=True ### Describe the bug In a setup.py file, if you use an explicit setuptools Extension with `py_limited_api=True`, then pass this to the cythonize() method, the resulting .so files incorrectly include a `.cpython-38-x86_64-linux-gnu.so` suffix (or similar), whereas the expected suffix is `.abi3.so`. As far as I can tell, this is because the `py_limitd_api` field is popped from the kwarg blob by setuptools [here](https://github.com/pypa/setuptools/blob/main/setuptools/extension.py#L129), and as a result when cython attempts to create a new Extension object [here](https://github.com/cython/cython/blob/d455d51bf31379f47c074e40517e24857d4d9cc0/Cython/Build/Dependencies.py#L866) the field isn't included. It kinda looks like the fix is probably to differentiate between *Extension_distutils* and *Extension_setuptools* and in the latter case read *py_limitd_api*. The issue can be worked around by explicitly using Cython.Distutils.extension.Extension. ### Code to reproduce the behaviour: _No response_ ### Expected behaviour _No response_ ### OS _No response_ ### Python version _No response_ ### Cython version _No response_ ### Additional context _No response_
[ { "content": "import cython\n\nimport collections\nimport os\nimport re, sys, time\nfrom glob import iglob\nfrom io import StringIO\nfrom os.path import relpath as _relpath\nfrom .Cache import Cache, FingerprintFlags\n\nfrom collections.abc import Iterable\n\ntry:\n import pythran\nexcept:\n pythran = None\n\nfrom .. import Utils\nfrom ..Utils import (cached_function, cached_method, path_exists,\n safe_makedirs, copy_file_to_dir_if_newer, is_package_dir, write_depfile)\nfrom ..Compiler import Errors\nfrom ..Compiler.Main import Context\nfrom ..Compiler.Options import (CompilationOptions, default_options,\n get_directive_defaults)\n\njoin_path = cached_function(os.path.join)\ncopy_once_if_newer = cached_function(copy_file_to_dir_if_newer)\nsafe_makedirs_once = cached_function(safe_makedirs)\n\n\ndef _make_relative(file_paths, base=None):\n if not base:\n base = os.getcwd()\n if base[-1] != os.path.sep:\n base += os.path.sep\n return [_relpath(path, base) if path.startswith(base) else path\n for path in file_paths]\n\n\ndef extended_iglob(pattern):\n if '{' in pattern:\n m = re.match('(.*){([^}]+)}(.*)', pattern)\n if m:\n before, switch, after = m.groups()\n for case in switch.split(','):\n for path in extended_iglob(before + case + after):\n yield path\n return\n\n # We always accept '/' and also '\\' on Windows,\n # because '/' is generally common for relative paths.\n if '**/' in pattern or os.sep == '\\\\' and '**\\\\' in pattern:\n seen = set()\n first, rest = re.split(r'\\*\\*[%s]' % ('/\\\\\\\\' if os.sep == '\\\\' else '/'), pattern, 1)\n if first:\n first = iglob(first + os.sep)\n else:\n first = ['']\n for root in first:\n for path in extended_iglob(join_path(root, rest)):\n if path not in seen:\n seen.add(path)\n yield path\n for path in extended_iglob(join_path(root, '*', '**', rest)):\n if path not in seen:\n seen.add(path)\n yield path\n else:\n for path in iglob(pattern):\n yield path\n\n\ndef nonempty(it, error_msg=\"expected non-empty iterator\"):\n empty = True\n for value in it:\n empty = False\n yield value\n if empty:\n raise ValueError(error_msg)\n\n\ndef update_pythran_extension(ext):\n if pythran is None:\n raise RuntimeError(\"You first need to install Pythran to use the np_pythran directive.\")\n try:\n pythran_ext = pythran.config.make_extension(python=True)\n except TypeError: # older pythran version only\n pythran_ext = pythran.config.make_extension()\n\n ext.include_dirs.extend(pythran_ext['include_dirs'])\n ext.extra_compile_args.extend(pythran_ext['extra_compile_args'])\n ext.extra_link_args.extend(pythran_ext['extra_link_args'])\n ext.define_macros.extend(pythran_ext['define_macros'])\n ext.undef_macros.extend(pythran_ext['undef_macros'])\n ext.library_dirs.extend(pythran_ext['library_dirs'])\n ext.libraries.extend(pythran_ext['libraries'])\n ext.language = 'c++'\n\n # These options are not compatible with the way normal Cython extensions work\n for bad_option in [\"-fwhole-program\", \"-fvisibility=hidden\"]:\n try:\n ext.extra_compile_args.remove(bad_option)\n except ValueError:\n pass\n\n\ndef parse_list(s):\n \"\"\"\n >>> parse_list(\"\")\n []\n >>> parse_list(\"a\")\n ['a']\n >>> parse_list(\"a b c\")\n ['a', 'b', 'c']\n >>> parse_list(\"[a, b, c]\")\n ['a', 'b', 'c']\n >>> parse_list('a \" \" b')\n ['a', ' ', 'b']\n >>> parse_list('[a, \",a\", \"a,\", \",\", ]')\n ['a', ',a', 'a,', ',']\n \"\"\"\n if len(s) >= 2 and s[0] == '[' and s[-1] == ']':\n s = s[1:-1]\n delimiter = ','\n else:\n delimiter = ' '\n s, literals = strip_string_literals(s)\n def unquote(literal):\n literal = literal.strip()\n if literal[0] in \"'\\\"\":\n return literals[literal[1:-1]]\n else:\n return literal\n return [unquote(item) for item in s.split(delimiter) if item.strip()]\n\n\ntransitive_str = object()\ntransitive_list = object()\nbool_or = object()\n\ndistutils_settings = {\n 'name': str,\n 'sources': list,\n 'define_macros': list,\n 'undef_macros': list,\n 'libraries': transitive_list,\n 'library_dirs': transitive_list,\n 'runtime_library_dirs': transitive_list,\n 'include_dirs': transitive_list,\n 'extra_objects': list,\n 'extra_compile_args': transitive_list,\n 'extra_link_args': transitive_list,\n 'export_symbols': list,\n 'depends': transitive_list,\n 'language': transitive_str,\n 'np_pythran': bool_or\n}\n\n\ndef _legacy_strtobool(val):\n # Used to be \"distutils.util.strtobool\", adapted for deprecation warnings.\n if val == \"True\":\n return True\n elif val == \"False\":\n return False\n\n import warnings\n warnings.warn(\"The 'np_python' option requires 'True' or 'False'\", category=DeprecationWarning)\n val = val.lower()\n if val in ('y', 'yes', 't', 'true', 'on', '1'):\n return True\n elif val in ('n', 'no', 'f', 'false', 'off', '0'):\n return False\n else:\n raise ValueError(\"invalid truth value %r\" % (val,))\n\n\nclass DistutilsInfo:\n\n def __init__(self, source=None, exn=None):\n self.values = {}\n if source is not None:\n source_lines = StringIO(source) if isinstance(source, str) else source\n for line in source_lines:\n line = line.lstrip()\n if not line:\n continue\n if line[0] != '#':\n break\n line = line[1:].lstrip()\n kind = next((k for k in (\"distutils:\",\"cython:\") if line.startswith(k)), None)\n if kind is not None:\n key, _, value = [s.strip() for s in line[len(kind):].partition('=')]\n type = distutils_settings.get(key, None)\n if line.startswith(\"cython:\") and type is None: continue\n if type in (list, transitive_list):\n value = parse_list(value)\n if key == 'define_macros':\n value = [tuple(macro.split('=', 1))\n if '=' in macro else (macro, None)\n for macro in value]\n if type is bool_or:\n value = _legacy_strtobool(value)\n self.values[key] = value\n elif exn is not None:\n for key in distutils_settings:\n if key in ('name', 'sources','np_pythran'):\n continue\n value = getattr(exn, key, None)\n if value:\n self.values[key] = value\n\n def merge(self, other):\n if other is None:\n return self\n for key, value in other.values.items():\n type = distutils_settings[key]\n if type is transitive_str and key not in self.values:\n self.values[key] = value\n elif type is transitive_list:\n if key in self.values:\n # Change a *copy* of the list (Trac #845)\n all = self.values[key][:]\n for v in value:\n if v not in all:\n all.append(v)\n value = all\n self.values[key] = value\n elif type is bool_or:\n self.values[key] = self.values.get(key, False) | value\n return self\n\n def subs(self, aliases):\n if aliases is None:\n return self\n resolved = DistutilsInfo()\n for key, value in self.values.items():\n type = distutils_settings[key]\n if type in [list, transitive_list]:\n new_value_list = []\n for v in value:\n if v in aliases:\n v = aliases[v]\n if isinstance(v, list):\n new_value_list += v\n else:\n new_value_list.append(v)\n value = new_value_list\n else:\n if value in aliases:\n value = aliases[value]\n resolved.values[key] = value\n return resolved\n\n def apply(self, extension):\n for key, value in self.values.items():\n type = distutils_settings[key]\n if type in [list, transitive_list]:\n value = getattr(extension, key) + list(value)\n setattr(extension, key, value)\n\n\n_FIND_TOKEN = cython.declare(object, re.compile(r\"\"\"\n (?P<comment> [#] ) |\n (?P<brace> [{}] ) |\n (?P<fstring> f )? (?P<quote> '+ | \"+ )\n\"\"\", re.VERBOSE).search)\n\n_FIND_STRING_TOKEN = cython.declare(object, re.compile(r\"\"\"\n (?P<escape> [\\\\]+ ) (?P<escaped_quote> ['\"] ) |\n (?P<fstring> f )? (?P<quote> '+ | \"+ )\n\"\"\", re.VERBOSE).search)\n\n_FIND_FSTRING_TOKEN = cython.declare(object, re.compile(r\"\"\"\n (?P<braces> [{]+ | [}]+ ) |\n (?P<escape> [\\\\]+ ) (?P<escaped_quote> ['\"] ) |\n (?P<fstring> f )? (?P<quote> '+ | \"+ )\n\"\"\", re.VERBOSE).search)\n\n\ndef strip_string_literals(code: str, prefix: str = '__Pyx_L'):\n \"\"\"\n Normalizes every string literal to be of the form '__Pyx_Lxxx',\n returning the normalized code and a mapping of labels to\n string literals.\n \"\"\"\n new_code: list = []\n literals: dict = {}\n counter: cython.Py_ssize_t = 0\n find_token = _FIND_TOKEN\n\n def append_new_label(literal):\n nonlocal counter\n counter += 1\n label = f\"{prefix}{counter}_\"\n literals[label] = literal\n new_code.append(label)\n\n def parse_string(quote_type: str, start: cython.Py_ssize_t, is_fstring: cython.bint) -> cython.Py_ssize_t:\n charpos: cython.Py_ssize_t = start\n\n find_token = _FIND_FSTRING_TOKEN if is_fstring else _FIND_STRING_TOKEN\n\n while charpos != -1:\n token = find_token(code, charpos)\n if token is None:\n # This probably indicates an unclosed string literal, i.e. a broken file.\n append_new_label(code[start:])\n charpos = -1\n break\n charpos = token.end()\n\n if token['escape']:\n if len(token['escape']) % 2 == 0 and token['escaped_quote'] == quote_type[0]:\n # Quote is not actually escaped and might be part of a terminator, look at it next.\n charpos -= 1\n\n elif is_fstring and token['braces']:\n # Formats or brace(s) in fstring.\n if len(token['braces']) % 2 == 0:\n # Normal brace characters in string.\n continue\n if token['braces'][-1] == '{':\n if start < charpos-1:\n append_new_label(code[start : charpos-1])\n new_code.append('{')\n start = charpos = parse_code(charpos, in_fstring=True)\n\n elif token['quote'].startswith(quote_type):\n # Closing quote found (potentially together with further, unrelated quotes).\n charpos = token.start('quote')\n if charpos > start:\n append_new_label(code[start : charpos])\n new_code.append(quote_type)\n charpos += len(quote_type)\n break\n\n return charpos\n\n def parse_code(start: cython.Py_ssize_t, in_fstring: cython.bint = False) -> cython.Py_ssize_t:\n charpos: cython.Py_ssize_t = start\n end: cython.Py_ssize_t\n quote: str\n\n while charpos != -1:\n token = find_token(code, charpos)\n if token is None:\n new_code.append(code[start:])\n charpos = -1\n break\n charpos = end = token.end()\n\n if token['quote']:\n quote = token['quote']\n if len(quote) >= 6:\n # Ignore empty tripple-quoted strings: '''''' or \"\"\"\"\"\"\n quote = quote[:len(quote) % 6]\n if quote and len(quote) != 2:\n if len(quote) > 3:\n end -= len(quote) - 3\n quote = quote[:3]\n new_code.append(code[start:end])\n start = charpos = parse_string(quote, end, is_fstring=token['fstring'])\n\n elif token['comment']:\n new_code.append(code[start:end])\n charpos = code.find('\\n', end)\n append_new_label(code[end : charpos if charpos != -1 else None])\n if charpos == -1:\n break # EOF\n start = charpos\n\n elif in_fstring and token['brace']:\n if token['brace'] == '}':\n # Closing '}' of f-string.\n charpos = end = token.start() + 1\n new_code.append(code[start:end]) # with '}'\n break\n else:\n # Starting a calculated format modifier inside of an f-string format.\n end = token.start() + 1\n new_code.append(code[start:end]) # with '{'\n start = charpos = parse_code(end, in_fstring=True)\n\n return charpos\n\n parse_code(0)\n return \"\".join(new_code), literals\n\n\n# We need to allow spaces to allow for conditional compilation like\n# IF ...:\n# cimport ...\ndependency_regex = re.compile(r\"(?:^\\s*from +([0-9a-zA-Z_.]+) +cimport)|\"\n r\"(?:^\\s*cimport +([0-9a-zA-Z_.]+(?: *, *[0-9a-zA-Z_.]+)*))|\"\n r\"(?:^\\s*cdef +extern +from +['\\\"]([^'\\\"]+)['\\\"])|\"\n r\"(?:^\\s*include +['\\\"]([^'\\\"]+)['\\\"])\", re.M)\ndependency_after_from_regex = re.compile(\n r\"(?:^\\s+\\(([0-9a-zA-Z_., ]*)\\)[#\\n])|\"\n r\"(?:^\\s+([0-9a-zA-Z_., ]*)[#\\n])\",\n re.M)\n\n\ndef normalize_existing(base_path, rel_paths):\n return normalize_existing0(os.path.dirname(base_path), tuple(set(rel_paths)))\n\n\n@cached_function\ndef normalize_existing0(base_dir, rel_paths):\n \"\"\"\n Given some base directory ``base_dir`` and a list of path names\n ``rel_paths``, normalize each relative path name ``rel`` by\n replacing it by ``os.path.join(base, rel)`` if that file exists.\n\n Return a couple ``(normalized, needed_base)`` where ``normalized``\n if the list of normalized file names and ``needed_base`` is\n ``base_dir`` if we actually needed ``base_dir``. If no paths were\n changed (for example, if all paths were already absolute), then\n ``needed_base`` is ``None``.\n \"\"\"\n normalized = []\n needed_base = None\n for rel in rel_paths:\n if os.path.isabs(rel):\n normalized.append(rel)\n continue\n path = join_path(base_dir, rel)\n if path_exists(path):\n normalized.append(os.path.normpath(path))\n needed_base = base_dir\n else:\n normalized.append(rel)\n return (normalized, needed_base)\n\n\ndef resolve_depends(depends, include_dirs):\n include_dirs = tuple(include_dirs)\n resolved = []\n for depend in depends:\n path = resolve_depend(depend, include_dirs)\n if path is not None:\n resolved.append(path)\n return resolved\n\n\n@cached_function\ndef resolve_depend(depend, include_dirs):\n if depend[0] == '<' and depend[-1] == '>':\n return None\n for dir in include_dirs:\n path = join_path(dir, depend)\n if path_exists(path):\n return os.path.normpath(path)\n return None\n\n\n@cached_function\ndef package(filename):\n dir = os.path.dirname(os.path.abspath(str(filename)))\n if dir != filename and is_package_dir(dir):\n return package(dir) + (os.path.basename(dir),)\n else:\n return ()\n\n\n@cached_function\ndef fully_qualified_name(filename):\n module = os.path.splitext(os.path.basename(filename))[0]\n return '.'.join(package(filename) + (module,))\n\n\n@cached_function\ndef parse_dependencies(source_filename):\n # Actual parsing is way too slow, so we use regular expressions.\n # The only catch is that we must strip comments and string\n # literals ahead of time.\n with Utils.open_source_file(source_filename, error_handling='ignore') as fh:\n source = fh.read()\n distutils_info = DistutilsInfo(source)\n source, literals = strip_string_literals(source)\n source = source.replace('\\\\\\n', ' ').replace('\\t', ' ')\n\n # TODO: pure mode\n cimports = []\n includes = []\n externs = []\n for m in dependency_regex.finditer(source):\n cimport_from, cimport_list, extern, include = m.groups()\n if cimport_from:\n cimports.append(cimport_from)\n m_after_from = dependency_after_from_regex.search(source, pos=m.end())\n if m_after_from:\n multiline, one_line = m_after_from.groups()\n subimports = multiline or one_line\n cimports.extend(\"{}.{}\".format(cimport_from, s.strip())\n for s in subimports.split(','))\n\n elif cimport_list:\n cimports.extend(x.strip() for x in cimport_list.split(\",\"))\n elif extern:\n externs.append(literals[extern])\n else:\n includes.append(literals[include])\n return cimports, includes, externs, distutils_info\n\n\nclass DependencyTree:\n\n def __init__(self, context, quiet=False):\n self.context = context\n self.quiet = quiet\n self._transitive_cache = {}\n\n def parse_dependencies(self, source_filename):\n if path_exists(source_filename):\n source_filename = os.path.normpath(source_filename)\n return parse_dependencies(source_filename)\n\n @cached_method\n def included_files(self, filename):\n # This is messy because included files are textually included, resolving\n # cimports (but not includes) relative to the including file.\n all = set()\n for include in self.parse_dependencies(filename)[1]:\n include_path = join_path(os.path.dirname(filename), include)\n if not path_exists(include_path):\n include_path = self.context.find_include_file(include, source_file_path=filename)\n if include_path:\n if '.' + os.path.sep in include_path:\n include_path = os.path.normpath(include_path)\n all.add(include_path)\n all.update(self.included_files(include_path))\n elif not self.quiet:\n print(\"Unable to locate '%s' referenced from '%s'\" % (filename, include))\n return all\n\n @cached_method\n def cimports_externs_incdirs(self, filename):\n # This is really ugly. Nested cimports are resolved with respect to the\n # includer, but includes are resolved with respect to the includee.\n cimports, includes, externs = self.parse_dependencies(filename)[:3]\n cimports = set(cimports)\n externs = set(externs)\n incdirs = set()\n for include in self.included_files(filename):\n included_cimports, included_externs, included_incdirs = self.cimports_externs_incdirs(include)\n cimports.update(included_cimports)\n externs.update(included_externs)\n incdirs.update(included_incdirs)\n externs, incdir = normalize_existing(filename, externs)\n if incdir:\n incdirs.add(incdir)\n return tuple(cimports), externs, incdirs\n\n def cimports(self, filename):\n return self.cimports_externs_incdirs(filename)[0]\n\n def package(self, filename):\n return package(filename)\n\n def fully_qualified_name(self, filename):\n return fully_qualified_name(filename)\n\n @cached_method\n def find_pxd(self, module, filename=None):\n is_relative = module[0] == '.'\n if is_relative and not filename:\n raise NotImplementedError(\"New relative imports.\")\n if filename is not None:\n module_path = module.split('.')\n if is_relative:\n module_path.pop(0) # just explicitly relative\n package_path = list(self.package(filename))\n while module_path and not module_path[0]:\n try:\n package_path.pop()\n except IndexError:\n return None # FIXME: error?\n module_path.pop(0)\n relative = '.'.join(package_path + module_path)\n pxd = self.context.find_pxd_file(relative, source_file_path=filename)\n if pxd:\n return pxd\n if is_relative:\n return None # FIXME: error?\n return self.context.find_pxd_file(module, source_file_path=filename)\n\n @cached_method\n def cimported_files(self, filename):\n filename_root, filename_ext = os.path.splitext(filename)\n if filename_ext in ('.pyx', '.py') and path_exists(filename_root + '.pxd'):\n pxd_list = [filename_root + '.pxd']\n else:\n pxd_list = []\n # Cimports generates all possible combinations package.module\n # when imported as from package cimport module.\n for module in self.cimports(filename):\n if module[:7] == 'cython.' or module == 'cython':\n continue\n pxd_file = self.find_pxd(module, filename)\n if pxd_file is not None:\n pxd_list.append(pxd_file)\n return tuple(pxd_list)\n\n @cached_method\n def immediate_dependencies(self, filename):\n all_deps = {filename}\n all_deps.update(self.cimported_files(filename))\n all_deps.update(self.included_files(filename))\n return all_deps\n\n def all_dependencies(self, filename):\n return self.transitive_merge(filename, self.immediate_dependencies, set.union)\n\n @cached_method\n def timestamp(self, filename):\n return os.path.getmtime(filename)\n\n def extract_timestamp(self, filename):\n return self.timestamp(filename), filename\n\n def newest_dependency(self, filename):\n return max([self.extract_timestamp(f) for f in self.all_dependencies(filename)])\n\n def distutils_info0(self, filename):\n info = self.parse_dependencies(filename)[3]\n kwds = info.values\n cimports, externs, incdirs = self.cimports_externs_incdirs(filename)\n basedir = os.getcwd()\n # Add dependencies on \"cdef extern from ...\" files\n if externs:\n externs = _make_relative(externs, basedir)\n if 'depends' in kwds:\n kwds['depends'] = list(set(kwds['depends']).union(externs))\n else:\n kwds['depends'] = list(externs)\n # Add include_dirs to ensure that the C compiler will find the\n # \"cdef extern from ...\" files\n if incdirs:\n include_dirs = list(kwds.get('include_dirs', []))\n for inc in _make_relative(incdirs, basedir):\n if inc not in include_dirs:\n include_dirs.append(inc)\n kwds['include_dirs'] = include_dirs\n return info\n\n def distutils_info(self, filename, aliases=None, base=None):\n return (self.transitive_merge(filename, self.distutils_info0, DistutilsInfo.merge)\n .subs(aliases)\n .merge(base))\n\n def transitive_merge(self, node, extract, merge):\n try:\n seen = self._transitive_cache[extract, merge]\n except KeyError:\n seen = self._transitive_cache[extract, merge] = {}\n return self.transitive_merge_helper(\n node, extract, merge, seen, {}, self.cimported_files)[0]\n\n def transitive_merge_helper(self, node, extract, merge, seen, stack, outgoing):\n if node in seen:\n return seen[node], None\n deps = extract(node)\n if node in stack:\n return deps, node\n try:\n stack[node] = len(stack)\n loop = None\n for next in outgoing(node):\n sub_deps, sub_loop = self.transitive_merge_helper(next, extract, merge, seen, stack, outgoing)\n if sub_loop is not None:\n if loop is not None and stack[loop] < stack[sub_loop]:\n pass\n else:\n loop = sub_loop\n deps = merge(deps, sub_deps)\n if loop == node:\n loop = None\n if loop is None:\n seen[node] = deps\n return deps, loop\n finally:\n del stack[node]\n\n\n_dep_tree = None\n\ndef create_dependency_tree(ctx=None, quiet=False):\n global _dep_tree\n if _dep_tree is None:\n if ctx is None:\n ctx = Context([\".\"], get_directive_defaults(),\n options=CompilationOptions(default_options))\n _dep_tree = DependencyTree(ctx, quiet=quiet)\n return _dep_tree\n\n\n# If this changes, change also docs/src/reference/compilation.rst\n# which mentions this function\ndef default_create_extension(template, kwds):\n if 'depends' in kwds:\n include_dirs = kwds.get('include_dirs', []) + [\".\"]\n depends = resolve_depends(kwds['depends'], include_dirs)\n kwds['depends'] = sorted(set(depends + template.depends))\n\n t = template.__class__\n ext = t(**kwds)\n metadata = dict(distutils=kwds, module_name=kwds['name'])\n return (ext, metadata)\n\n\n# This may be useful for advanced users?\ndef create_extension_list(patterns, exclude=None, ctx=None, aliases=None, quiet=False, language=None,\n exclude_failures=False):\n if language is not None:\n print('Warning: passing language={0!r} to cythonize() is deprecated. '\n 'Instead, put \"# distutils: language={0}\" in your .pyx or .pxd file(s)'.format(language))\n if exclude is None:\n exclude = []\n if patterns is None:\n return [], {}\n elif isinstance(patterns, str) or not isinstance(patterns, Iterable):\n patterns = [patterns]\n\n from distutils.extension import Extension\n if 'setuptools' in sys.modules:\n # Support setuptools Extension instances as well.\n extension_classes = (\n Extension, # should normally be the same as 'setuptools.extension._Extension'\n sys.modules['setuptools.extension']._Extension,\n sys.modules['setuptools'].Extension,\n )\n else:\n extension_classes = (Extension,)\n\n explicit_modules = {m.name for m in patterns if isinstance(m, extension_classes)}\n deps = create_dependency_tree(ctx, quiet=quiet)\n\n to_exclude = set()\n if not isinstance(exclude, list):\n exclude = [exclude]\n for pattern in exclude:\n to_exclude.update(map(os.path.abspath, extended_iglob(pattern)))\n\n module_list = []\n module_metadata = {}\n\n # if no create_extension() function is defined, use a simple\n # default function.\n create_extension = ctx.options.create_extension or default_create_extension\n\n seen = set()\n for pattern in patterns:\n if isinstance(pattern, str):\n filepattern = pattern\n template = Extension(pattern, []) # Fake Extension without sources\n name = '*'\n base = None\n ext_language = language\n elif isinstance(pattern, extension_classes):\n cython_sources = [s for s in pattern.sources\n if os.path.splitext(s)[1] in ('.py', '.pyx')]\n if cython_sources:\n filepattern = cython_sources[0]\n if len(cython_sources) > 1:\n print(\"Warning: Multiple cython sources found for extension '%s': %s\\n\"\n \"See https://cython.readthedocs.io/en/latest/src/userguide/sharing_declarations.html \"\n \"for sharing declarations among Cython files.\" % (pattern.name, cython_sources))\n else:\n # ignore non-cython modules\n module_list.append(pattern)\n continue\n template = pattern\n name = template.name\n base = DistutilsInfo(exn=template)\n ext_language = None # do not override whatever the Extension says\n else:\n msg = str(\"pattern is not of type str nor subclass of Extension (%s)\"\n \" but of type %s and class %s\" % (repr(Extension),\n type(pattern),\n pattern.__class__))\n raise TypeError(msg)\n\n for file in nonempty(sorted(extended_iglob(filepattern)), \"'%s' doesn't match any files\" % filepattern):\n if os.path.abspath(file) in to_exclude:\n continue\n module_name = deps.fully_qualified_name(file)\n if '*' in name:\n if module_name in explicit_modules:\n continue\n elif name:\n module_name = name\n\n Utils.raise_error_if_module_name_forbidden(module_name)\n\n if module_name not in seen:\n try:\n kwds = deps.distutils_info(file, aliases, base).values\n except Exception:\n if exclude_failures:\n continue\n raise\n if base is not None:\n for key, value in base.values.items():\n if key not in kwds:\n kwds[key] = value\n\n kwds['name'] = module_name\n\n sources = [file] + [m for m in template.sources if m != filepattern]\n if 'sources' in kwds:\n # allow users to add .c files etc.\n for source in kwds['sources']:\n if source not in sources:\n sources.append(source)\n kwds['sources'] = sources\n\n if ext_language and 'language' not in kwds:\n kwds['language'] = ext_language\n\n np_pythran = kwds.pop('np_pythran', False)\n\n # Create the new extension\n m, metadata = create_extension(template, kwds)\n m.np_pythran = np_pythran or getattr(m, 'np_pythran', False)\n if m.np_pythran:\n update_pythran_extension(m)\n module_list.append(m)\n\n # Store metadata (this will be written as JSON in the\n # generated C file but otherwise has no purpose)\n module_metadata[module_name] = metadata\n\n if file not in m.sources:\n # Old setuptools unconditionally replaces .pyx with .c/.cpp\n target_file = os.path.splitext(file)[0] + ('.cpp' if m.language == 'c++' else '.c')\n try:\n m.sources.remove(target_file)\n except ValueError:\n # never seen this in the wild, but probably better to warn about this unexpected case\n print(\"Warning: Cython source file not found in sources list, adding %s\" % file)\n m.sources.insert(0, file)\n seen.add(name)\n return module_list, module_metadata\n\n\n# This is the user-exposed entry point.\ndef cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, force=None, language=None,\n exclude_failures=False, show_all_warnings=False, **options):\n \"\"\"\n Compile a set of source modules into C/C++ files and return a list of distutils\n Extension objects for them.\n\n :param module_list: As module list, pass either a glob pattern, a list of glob\n patterns or a list of Extension objects. The latter\n allows you to configure the extensions separately\n through the normal distutils options.\n You can also pass Extension objects that have\n glob patterns as their sources. Then, cythonize\n will resolve the pattern and create a\n copy of the Extension for every matching file.\n\n :param exclude: When passing glob patterns as ``module_list``, you can exclude certain\n module names explicitly by passing them into the ``exclude`` option.\n\n :param nthreads: The number of concurrent builds for parallel compilation\n (requires the ``multiprocessing`` module).\n\n :param aliases: If you want to use compiler directives like ``# distutils: ...`` but\n can only know at compile time (when running the ``setup.py``) which values\n to use, you can use aliases and pass a dictionary mapping those aliases\n to Python strings when calling :func:`cythonize`. As an example, say you\n want to use the compiler\n directive ``# distutils: include_dirs = ../static_libs/include/``\n but this path isn't always fixed and you want to find it when running\n the ``setup.py``. You can then do ``# distutils: include_dirs = MY_HEADERS``,\n find the value of ``MY_HEADERS`` in the ``setup.py``, put it in a python\n variable called ``foo`` as a string, and then call\n ``cythonize(..., aliases={'MY_HEADERS': foo})``.\n\n :param quiet: If True, Cython won't print error, warning, or status messages during the\n compilation.\n\n :param force: Forces the recompilation of the Cython modules, even if the timestamps\n don't indicate that a recompilation is necessary.\n\n :param language: To globally enable C++ mode, you can pass ``language='c++'``. Otherwise, this\n will be determined at a per-file level based on compiler directives. This\n affects only modules found based on file names. Extension instances passed\n into :func:`cythonize` will not be changed. It is recommended to rather\n use the compiler directive ``# distutils: language = c++`` than this option.\n\n :param exclude_failures: For a broad 'try to compile' mode that ignores compilation\n failures and simply excludes the failed extensions,\n pass ``exclude_failures=True``. Note that this only\n really makes sense for compiling ``.py`` files which can also\n be used without compilation.\n\n :param show_all_warnings: By default, not all Cython warnings are printed.\n Set to true to show all warnings.\n\n :param annotate: If ``True``, will produce a HTML file for each of the ``.pyx`` or ``.py``\n files compiled. The HTML file gives an indication\n of how much Python interaction there is in\n each of the source code lines, compared to plain C code.\n It also allows you to see the C/C++ code\n generated for each line of Cython code. This report is invaluable when\n optimizing a function for speed,\n and for determining when to :ref:`release the GIL <nogil>`:\n in general, a ``nogil`` block may contain only \"white\" code.\n See examples in :ref:`determining_where_to_add_types` or\n :ref:`primes`.\n\n\n :param annotate-fullc: If ``True`` will produce a colorized HTML version of\n the source which includes entire generated C/C++-code.\n\n\n :param compiler_directives: Allow to set compiler directives in the ``setup.py`` like this:\n ``compiler_directives={'embedsignature': True}``.\n See :ref:`compiler-directives`.\n\n :param depfile: produce depfiles for the sources if True.\n :param cache: If ``True`` the cache enabled with default path. If the value is a path to a directory,\n then the directory is used to cache generated ``.c``/``.cpp`` files. By default cache is disabled.\n See :ref:`cython-cache`.\n \"\"\"\n if exclude is None:\n exclude = []\n if 'include_path' not in options:\n options['include_path'] = ['.']\n if 'common_utility_include_dir' in options:\n safe_makedirs(options['common_utility_include_dir'])\n\n depfile = options.pop('depfile', None)\n\n if pythran is None:\n pythran_options = None\n else:\n pythran_options = CompilationOptions(**options)\n pythran_options.cplus = True\n pythran_options.np_pythran = True\n\n if force is None:\n force = os.environ.get(\"CYTHON_FORCE_REGEN\") == \"1\" # allow global overrides for build systems\n\n c_options = CompilationOptions(**options)\n cpp_options = CompilationOptions(**options); cpp_options.cplus = True\n ctx = Context.from_options(c_options)\n options = c_options\n module_list, module_metadata = create_extension_list(\n module_list,\n exclude=exclude,\n ctx=ctx,\n quiet=quiet,\n exclude_failures=exclude_failures,\n language=language,\n aliases=aliases)\n\n fix_windows_unicode_modules(module_list)\n\n deps = create_dependency_tree(ctx, quiet=quiet)\n build_dir = getattr(options, 'build_dir', None)\n if options.cache:\n # cache is enabled when:\n # * options.cache is True (the default path to the cache base dir is used)\n # * options.cache is the explicit path to the cache base dir\n cache_path = None if options.cache is True else options.cache\n cache = Cache(cache_path, getattr(options, 'cache_size', None))\n else:\n cache = None\n\n def copy_to_build_dir(filepath, root=os.getcwd()):\n filepath_abs = os.path.abspath(filepath)\n if os.path.isabs(filepath):\n filepath = filepath_abs\n if filepath_abs.startswith(root):\n # distutil extension depends are relative to cwd\n mod_dir = join_path(build_dir,\n os.path.dirname(_relpath(filepath, root)))\n copy_once_if_newer(filepath_abs, mod_dir)\n\n modules_by_cfile = collections.defaultdict(list)\n to_compile = []\n for m in module_list:\n if build_dir:\n for dep in m.depends:\n copy_to_build_dir(dep)\n\n cy_sources = [\n source for source in m.sources\n if os.path.splitext(source)[1] in ('.pyx', '.py')]\n if len(cy_sources) == 1:\n # normal \"special\" case: believe the Extension module name to allow user overrides\n full_module_name = m.name\n else:\n # infer FQMN from source files\n full_module_name = None\n\n new_sources = []\n for source in m.sources:\n base, ext = os.path.splitext(source)\n if ext in ('.pyx', '.py'):\n if m.np_pythran:\n c_file = base + '.cpp'\n options = pythran_options\n elif m.language == 'c++':\n c_file = base + '.cpp'\n options = cpp_options\n else:\n c_file = base + '.c'\n options = c_options\n\n # setup for out of place build directory if enabled\n if build_dir:\n if os.path.isabs(c_file):\n c_file = os.path.splitdrive(c_file)[1]\n c_file = c_file.split(os.sep, 1)[1]\n c_file = os.path.join(build_dir, c_file)\n dir = os.path.dirname(c_file)\n safe_makedirs_once(dir)\n\n # write out the depfile, if requested\n if depfile:\n dependencies = deps.all_dependencies(source)\n write_depfile(c_file, source, dependencies)\n\n # Missing files and those generated by other Cython versions should always be recreated.\n if Utils.file_generated_by_this_cython(c_file):\n c_timestamp = os.path.getmtime(c_file)\n else:\n c_timestamp = -1\n\n # Priority goes first to modified files, second to direct\n # dependents, and finally to indirect dependents.\n if c_timestamp < deps.timestamp(source):\n dep_timestamp, dep = deps.timestamp(source), source\n priority = 0\n else:\n dep_timestamp, dep = deps.newest_dependency(source)\n priority = 2 - (dep in deps.immediate_dependencies(source))\n if force or c_timestamp < dep_timestamp:\n if not quiet and not force:\n if source == dep:\n print(\"Compiling %s because it changed.\" % Utils.decode_filename(source))\n else:\n print(\"Compiling %s because it depends on %s.\" % (\n Utils.decode_filename(source),\n Utils.decode_filename(dep),\n ))\n if not force and cache:\n fingerprint = cache.transitive_fingerprint(\n source, deps.all_dependencies(source), options,\n FingerprintFlags(\n m.language or 'c',\n getattr(m, 'py_limited_api', False),\n getattr(m, 'np_pythran', False)\n )\n )\n else:\n fingerprint = None\n to_compile.append((\n priority, source, c_file, fingerprint, cache, quiet,\n options, not exclude_failures, module_metadata.get(m.name),\n full_module_name, show_all_warnings))\n new_sources.append(c_file)\n modules_by_cfile[c_file].append(m)\n else:\n new_sources.append(source)\n if build_dir:\n copy_to_build_dir(source)\n m.sources = new_sources\n\n to_compile.sort()\n # Drop \"priority\" component of \"to_compile\" entries and add a\n # simple progress indicator.\n N = len(to_compile)\n progress_fmt = \"[{0:%d}/{1}] \" % len(str(N))\n for i in range(N):\n progress = progress_fmt.format(i+1, N)\n to_compile[i] = to_compile[i][1:] + (progress,)\n\n if N <= 1:\n nthreads = 0\n if nthreads:\n import multiprocessing\n pool = multiprocessing.Pool(\n nthreads, initializer=_init_multiprocessing_helper)\n # This is a bit more involved than it should be, because KeyboardInterrupts\n # break the multiprocessing workers when using a normal pool.map().\n # See, for example:\n # https://noswap.com/blog/python-multiprocessing-keyboardinterrupt\n try:\n result = pool.map_async(cythonize_one_helper, to_compile, chunksize=1)\n pool.close()\n while not result.ready():\n try:\n result.get(99999) # seconds\n except multiprocessing.TimeoutError:\n pass\n except KeyboardInterrupt:\n pool.terminate()\n raise\n pool.join()\n else:\n for args in to_compile:\n cythonize_one(*args)\n\n if exclude_failures:\n failed_modules = set()\n for c_file, modules in modules_by_cfile.items():\n if not os.path.exists(c_file):\n failed_modules.update(modules)\n elif os.path.getsize(c_file) < 200:\n f = open(c_file, 'r', encoding='iso8859-1')\n try:\n if f.read(len('#error ')) == '#error ':\n # dead compilation result\n failed_modules.update(modules)\n finally:\n f.close()\n if failed_modules:\n for module in failed_modules:\n module_list.remove(module)\n print(\"Failed compilations: %s\" % ', '.join(sorted([\n module.name for module in failed_modules])))\n\n if cache:\n cache.cleanup_cache()\n\n # cythonize() is often followed by the (non-Python-buffered)\n # compiler output, flush now to avoid interleaving output.\n sys.stdout.flush()\n return module_list\n\n\ndef fix_windows_unicode_modules(module_list):\n # Hack around a distutils 3.[5678] bug on Windows for unicode module names.\n # https://bugs.python.org/issue39432\n if sys.platform != \"win32\":\n return\n if sys.version_info >= (3, 8, 2):\n return\n\n def make_filtered_list(ignored_symbol, old_entries):\n class FilteredExportSymbols(list):\n # export_symbols for unicode filename cause link errors on Windows\n # Cython doesn't need them (it already defines PyInit with the correct linkage)\n # so use this class as a temporary fix to stop them from being generated\n def __contains__(self, val):\n # so distutils doesn't \"helpfully\" add PyInit_<name>\n return val == ignored_symbol or list.__contains__(self, val)\n\n filtered_list = FilteredExportSymbols(old_entries)\n if old_entries:\n filtered_list.extend(name for name in old_entries if name != ignored_symbol)\n return filtered_list\n\n for m in module_list:\n if m.name.isascii():\n continue\n m.export_symbols = make_filtered_list(\n \"PyInit_\" + m.name.rsplit(\".\", 1)[-1],\n m.export_symbols,\n )\n\n\nif os.environ.get('XML_RESULTS'):\n compile_result_dir = os.environ['XML_RESULTS']\n def record_results(func):\n def with_record(*args):\n t = time.time()\n success = True\n try:\n try:\n func(*args)\n except:\n success = False\n finally:\n t = time.time() - t\n module = fully_qualified_name(args[0])\n name = \"cythonize.\" + module\n failures = 1 - success\n if success:\n failure_item = \"\"\n else:\n failure_item = \"failure\"\n output = open(os.path.join(compile_result_dir, name + \".xml\"), \"w\")\n output.write(\"\"\"\n <?xml version=\"1.0\" ?>\n <testsuite name=\"%(name)s\" errors=\"0\" failures=\"%(failures)s\" tests=\"1\" time=\"%(t)s\">\n <testcase classname=\"%(name)s\" name=\"cythonize\">\n %(failure_item)s\n </testcase>\n </testsuite>\n \"\"\".strip() % locals())\n output.close()\n return with_record\nelse:\n def record_results(func):\n return func\n\n\n# TODO: Share context? Issue: pyx processing leaks into pxd module\n@record_results\ndef cythonize_one(pyx_file, c_file, fingerprint, cache, quiet, options=None,\n raise_on_failure=True, embedded_metadata=None,\n full_module_name=None, show_all_warnings=False,\n progress=\"\"):\n from ..Compiler.Main import compile_single, default_options\n from ..Compiler.Errors import CompileError, PyrexError\n\n if cache and fingerprint:\n cached = cache.lookup_cache(c_file, fingerprint)\n if cached:\n if not quiet:\n print(\"%sFound compiled %s in cache\" % (progress, pyx_file))\n cache.load_from_cache(c_file, cached)\n return\n if not quiet:\n print(\"%sCythonizing %s\" % (progress, Utils.decode_filename(pyx_file)))\n if options is None:\n options = CompilationOptions(default_options)\n options.output_file = c_file\n options.embedded_metadata = embedded_metadata\n\n old_warning_level = Errors.LEVEL\n if show_all_warnings:\n Errors.LEVEL = 0\n\n any_failures = 0\n try:\n result = compile_single(pyx_file, options, full_module_name=full_module_name)\n if result.num_errors > 0:\n any_failures = 1\n except (OSError, PyrexError) as e:\n sys.stderr.write('%s\\n' % e)\n any_failures = 1\n # XXX\n import traceback\n traceback.print_exc()\n except Exception:\n if raise_on_failure:\n raise\n import traceback\n traceback.print_exc()\n any_failures = 1\n finally:\n if show_all_warnings:\n Errors.LEVEL = old_warning_level\n\n if any_failures:\n if raise_on_failure:\n raise CompileError(None, pyx_file)\n elif os.path.exists(c_file):\n os.remove(c_file)\n elif cache and fingerprint:\n cache.store_to_cache(c_file, fingerprint, result)\n\n\ndef cythonize_one_helper(m):\n import traceback\n try:\n return cythonize_one(*m)\n except Exception:\n traceback.print_exc()\n raise\n\n\ndef _init_multiprocessing_helper():\n # KeyboardInterrupt kills workers, so don't let them get it\n import signal\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n", "path": "Cython/Build/Dependencies.py" } ]
[ { "content": "import cython\n\nimport collections\nimport os\nimport re, sys, time\nfrom glob import iglob\nfrom io import StringIO\nfrom os.path import relpath as _relpath\nfrom .Cache import Cache, FingerprintFlags\n\nfrom collections.abc import Iterable\n\ntry:\n import pythran\nexcept:\n pythran = None\n\nfrom .. import Utils\nfrom ..Utils import (cached_function, cached_method, path_exists,\n safe_makedirs, copy_file_to_dir_if_newer, is_package_dir, write_depfile)\nfrom ..Compiler import Errors\nfrom ..Compiler.Main import Context\nfrom ..Compiler.Options import (CompilationOptions, default_options,\n get_directive_defaults)\n\njoin_path = cached_function(os.path.join)\ncopy_once_if_newer = cached_function(copy_file_to_dir_if_newer)\nsafe_makedirs_once = cached_function(safe_makedirs)\n\n\ndef _make_relative(file_paths, base=None):\n if not base:\n base = os.getcwd()\n if base[-1] != os.path.sep:\n base += os.path.sep\n return [_relpath(path, base) if path.startswith(base) else path\n for path in file_paths]\n\n\ndef extended_iglob(pattern):\n if '{' in pattern:\n m = re.match('(.*){([^}]+)}(.*)', pattern)\n if m:\n before, switch, after = m.groups()\n for case in switch.split(','):\n for path in extended_iglob(before + case + after):\n yield path\n return\n\n # We always accept '/' and also '\\' on Windows,\n # because '/' is generally common for relative paths.\n if '**/' in pattern or os.sep == '\\\\' and '**\\\\' in pattern:\n seen = set()\n first, rest = re.split(r'\\*\\*[%s]' % ('/\\\\\\\\' if os.sep == '\\\\' else '/'), pattern, 1)\n if first:\n first = iglob(first + os.sep)\n else:\n first = ['']\n for root in first:\n for path in extended_iglob(join_path(root, rest)):\n if path not in seen:\n seen.add(path)\n yield path\n for path in extended_iglob(join_path(root, '*', '**', rest)):\n if path not in seen:\n seen.add(path)\n yield path\n else:\n for path in iglob(pattern):\n yield path\n\n\ndef nonempty(it, error_msg=\"expected non-empty iterator\"):\n empty = True\n for value in it:\n empty = False\n yield value\n if empty:\n raise ValueError(error_msg)\n\n\ndef update_pythran_extension(ext):\n if pythran is None:\n raise RuntimeError(\"You first need to install Pythran to use the np_pythran directive.\")\n try:\n pythran_ext = pythran.config.make_extension(python=True)\n except TypeError: # older pythran version only\n pythran_ext = pythran.config.make_extension()\n\n ext.include_dirs.extend(pythran_ext['include_dirs'])\n ext.extra_compile_args.extend(pythran_ext['extra_compile_args'])\n ext.extra_link_args.extend(pythran_ext['extra_link_args'])\n ext.define_macros.extend(pythran_ext['define_macros'])\n ext.undef_macros.extend(pythran_ext['undef_macros'])\n ext.library_dirs.extend(pythran_ext['library_dirs'])\n ext.libraries.extend(pythran_ext['libraries'])\n ext.language = 'c++'\n\n # These options are not compatible with the way normal Cython extensions work\n for bad_option in [\"-fwhole-program\", \"-fvisibility=hidden\"]:\n try:\n ext.extra_compile_args.remove(bad_option)\n except ValueError:\n pass\n\n\ndef parse_list(s):\n \"\"\"\n >>> parse_list(\"\")\n []\n >>> parse_list(\"a\")\n ['a']\n >>> parse_list(\"a b c\")\n ['a', 'b', 'c']\n >>> parse_list(\"[a, b, c]\")\n ['a', 'b', 'c']\n >>> parse_list('a \" \" b')\n ['a', ' ', 'b']\n >>> parse_list('[a, \",a\", \"a,\", \",\", ]')\n ['a', ',a', 'a,', ',']\n \"\"\"\n if len(s) >= 2 and s[0] == '[' and s[-1] == ']':\n s = s[1:-1]\n delimiter = ','\n else:\n delimiter = ' '\n s, literals = strip_string_literals(s)\n def unquote(literal):\n literal = literal.strip()\n if literal[0] in \"'\\\"\":\n return literals[literal[1:-1]]\n else:\n return literal\n return [unquote(item) for item in s.split(delimiter) if item.strip()]\n\n\ntransitive_str = object()\ntransitive_list = object()\nbool_or = object()\n\ndistutils_settings = {\n 'name': str,\n 'sources': list,\n 'define_macros': list,\n 'undef_macros': list,\n 'libraries': transitive_list,\n 'library_dirs': transitive_list,\n 'runtime_library_dirs': transitive_list,\n 'include_dirs': transitive_list,\n 'extra_objects': list,\n 'extra_compile_args': transitive_list,\n 'extra_link_args': transitive_list,\n 'export_symbols': list,\n 'depends': transitive_list,\n 'language': transitive_str,\n 'np_pythran': bool_or\n}\n\n\ndef _legacy_strtobool(val):\n # Used to be \"distutils.util.strtobool\", adapted for deprecation warnings.\n if val == \"True\":\n return True\n elif val == \"False\":\n return False\n\n import warnings\n warnings.warn(\"The 'np_python' option requires 'True' or 'False'\", category=DeprecationWarning)\n val = val.lower()\n if val in ('y', 'yes', 't', 'true', 'on', '1'):\n return True\n elif val in ('n', 'no', 'f', 'false', 'off', '0'):\n return False\n else:\n raise ValueError(\"invalid truth value %r\" % (val,))\n\n\nclass DistutilsInfo:\n\n def __init__(self, source=None, exn=None):\n self.values = {}\n if source is not None:\n source_lines = StringIO(source) if isinstance(source, str) else source\n for line in source_lines:\n line = line.lstrip()\n if not line:\n continue\n if line[0] != '#':\n break\n line = line[1:].lstrip()\n kind = next((k for k in (\"distutils:\",\"cython:\") if line.startswith(k)), None)\n if kind is not None:\n key, _, value = [s.strip() for s in line[len(kind):].partition('=')]\n type = distutils_settings.get(key, None)\n if line.startswith(\"cython:\") and type is None: continue\n if type in (list, transitive_list):\n value = parse_list(value)\n if key == 'define_macros':\n value = [tuple(macro.split('=', 1))\n if '=' in macro else (macro, None)\n for macro in value]\n if type is bool_or:\n value = _legacy_strtobool(value)\n self.values[key] = value\n elif exn is not None:\n for key in distutils_settings:\n if key in ('name', 'sources','np_pythran'):\n continue\n value = getattr(exn, key, None)\n if value:\n self.values[key] = value\n\n def merge(self, other):\n if other is None:\n return self\n for key, value in other.values.items():\n type = distutils_settings[key]\n if type is transitive_str and key not in self.values:\n self.values[key] = value\n elif type is transitive_list:\n if key in self.values:\n # Change a *copy* of the list (Trac #845)\n all = self.values[key][:]\n for v in value:\n if v not in all:\n all.append(v)\n value = all\n self.values[key] = value\n elif type is bool_or:\n self.values[key] = self.values.get(key, False) | value\n return self\n\n def subs(self, aliases):\n if aliases is None:\n return self\n resolved = DistutilsInfo()\n for key, value in self.values.items():\n type = distutils_settings[key]\n if type in [list, transitive_list]:\n new_value_list = []\n for v in value:\n if v in aliases:\n v = aliases[v]\n if isinstance(v, list):\n new_value_list += v\n else:\n new_value_list.append(v)\n value = new_value_list\n else:\n if value in aliases:\n value = aliases[value]\n resolved.values[key] = value\n return resolved\n\n def apply(self, extension):\n for key, value in self.values.items():\n type = distutils_settings[key]\n if type in [list, transitive_list]:\n value = getattr(extension, key) + list(value)\n setattr(extension, key, value)\n\n\n_FIND_TOKEN = cython.declare(object, re.compile(r\"\"\"\n (?P<comment> [#] ) |\n (?P<brace> [{}] ) |\n (?P<fstring> f )? (?P<quote> '+ | \"+ )\n\"\"\", re.VERBOSE).search)\n\n_FIND_STRING_TOKEN = cython.declare(object, re.compile(r\"\"\"\n (?P<escape> [\\\\]+ ) (?P<escaped_quote> ['\"] ) |\n (?P<fstring> f )? (?P<quote> '+ | \"+ )\n\"\"\", re.VERBOSE).search)\n\n_FIND_FSTRING_TOKEN = cython.declare(object, re.compile(r\"\"\"\n (?P<braces> [{]+ | [}]+ ) |\n (?P<escape> [\\\\]+ ) (?P<escaped_quote> ['\"] ) |\n (?P<fstring> f )? (?P<quote> '+ | \"+ )\n\"\"\", re.VERBOSE).search)\n\n\ndef strip_string_literals(code: str, prefix: str = '__Pyx_L'):\n \"\"\"\n Normalizes every string literal to be of the form '__Pyx_Lxxx',\n returning the normalized code and a mapping of labels to\n string literals.\n \"\"\"\n new_code: list = []\n literals: dict = {}\n counter: cython.Py_ssize_t = 0\n find_token = _FIND_TOKEN\n\n def append_new_label(literal):\n nonlocal counter\n counter += 1\n label = f\"{prefix}{counter}_\"\n literals[label] = literal\n new_code.append(label)\n\n def parse_string(quote_type: str, start: cython.Py_ssize_t, is_fstring: cython.bint) -> cython.Py_ssize_t:\n charpos: cython.Py_ssize_t = start\n\n find_token = _FIND_FSTRING_TOKEN if is_fstring else _FIND_STRING_TOKEN\n\n while charpos != -1:\n token = find_token(code, charpos)\n if token is None:\n # This probably indicates an unclosed string literal, i.e. a broken file.\n append_new_label(code[start:])\n charpos = -1\n break\n charpos = token.end()\n\n if token['escape']:\n if len(token['escape']) % 2 == 0 and token['escaped_quote'] == quote_type[0]:\n # Quote is not actually escaped and might be part of a terminator, look at it next.\n charpos -= 1\n\n elif is_fstring and token['braces']:\n # Formats or brace(s) in fstring.\n if len(token['braces']) % 2 == 0:\n # Normal brace characters in string.\n continue\n if token['braces'][-1] == '{':\n if start < charpos-1:\n append_new_label(code[start : charpos-1])\n new_code.append('{')\n start = charpos = parse_code(charpos, in_fstring=True)\n\n elif token['quote'].startswith(quote_type):\n # Closing quote found (potentially together with further, unrelated quotes).\n charpos = token.start('quote')\n if charpos > start:\n append_new_label(code[start : charpos])\n new_code.append(quote_type)\n charpos += len(quote_type)\n break\n\n return charpos\n\n def parse_code(start: cython.Py_ssize_t, in_fstring: cython.bint = False) -> cython.Py_ssize_t:\n charpos: cython.Py_ssize_t = start\n end: cython.Py_ssize_t\n quote: str\n\n while charpos != -1:\n token = find_token(code, charpos)\n if token is None:\n new_code.append(code[start:])\n charpos = -1\n break\n charpos = end = token.end()\n\n if token['quote']:\n quote = token['quote']\n if len(quote) >= 6:\n # Ignore empty tripple-quoted strings: '''''' or \"\"\"\"\"\"\n quote = quote[:len(quote) % 6]\n if quote and len(quote) != 2:\n if len(quote) > 3:\n end -= len(quote) - 3\n quote = quote[:3]\n new_code.append(code[start:end])\n start = charpos = parse_string(quote, end, is_fstring=token['fstring'])\n\n elif token['comment']:\n new_code.append(code[start:end])\n charpos = code.find('\\n', end)\n append_new_label(code[end : charpos if charpos != -1 else None])\n if charpos == -1:\n break # EOF\n start = charpos\n\n elif in_fstring and token['brace']:\n if token['brace'] == '}':\n # Closing '}' of f-string.\n charpos = end = token.start() + 1\n new_code.append(code[start:end]) # with '}'\n break\n else:\n # Starting a calculated format modifier inside of an f-string format.\n end = token.start() + 1\n new_code.append(code[start:end]) # with '{'\n start = charpos = parse_code(end, in_fstring=True)\n\n return charpos\n\n parse_code(0)\n return \"\".join(new_code), literals\n\n\n# We need to allow spaces to allow for conditional compilation like\n# IF ...:\n# cimport ...\ndependency_regex = re.compile(r\"(?:^\\s*from +([0-9a-zA-Z_.]+) +cimport)|\"\n r\"(?:^\\s*cimport +([0-9a-zA-Z_.]+(?: *, *[0-9a-zA-Z_.]+)*))|\"\n r\"(?:^\\s*cdef +extern +from +['\\\"]([^'\\\"]+)['\\\"])|\"\n r\"(?:^\\s*include +['\\\"]([^'\\\"]+)['\\\"])\", re.M)\ndependency_after_from_regex = re.compile(\n r\"(?:^\\s+\\(([0-9a-zA-Z_., ]*)\\)[#\\n])|\"\n r\"(?:^\\s+([0-9a-zA-Z_., ]*)[#\\n])\",\n re.M)\n\n\ndef normalize_existing(base_path, rel_paths):\n return normalize_existing0(os.path.dirname(base_path), tuple(set(rel_paths)))\n\n\n@cached_function\ndef normalize_existing0(base_dir, rel_paths):\n \"\"\"\n Given some base directory ``base_dir`` and a list of path names\n ``rel_paths``, normalize each relative path name ``rel`` by\n replacing it by ``os.path.join(base, rel)`` if that file exists.\n\n Return a couple ``(normalized, needed_base)`` where ``normalized``\n if the list of normalized file names and ``needed_base`` is\n ``base_dir`` if we actually needed ``base_dir``. If no paths were\n changed (for example, if all paths were already absolute), then\n ``needed_base`` is ``None``.\n \"\"\"\n normalized = []\n needed_base = None\n for rel in rel_paths:\n if os.path.isabs(rel):\n normalized.append(rel)\n continue\n path = join_path(base_dir, rel)\n if path_exists(path):\n normalized.append(os.path.normpath(path))\n needed_base = base_dir\n else:\n normalized.append(rel)\n return (normalized, needed_base)\n\n\ndef resolve_depends(depends, include_dirs):\n include_dirs = tuple(include_dirs)\n resolved = []\n for depend in depends:\n path = resolve_depend(depend, include_dirs)\n if path is not None:\n resolved.append(path)\n return resolved\n\n\n@cached_function\ndef resolve_depend(depend, include_dirs):\n if depend[0] == '<' and depend[-1] == '>':\n return None\n for dir in include_dirs:\n path = join_path(dir, depend)\n if path_exists(path):\n return os.path.normpath(path)\n return None\n\n\n@cached_function\ndef package(filename):\n dir = os.path.dirname(os.path.abspath(str(filename)))\n if dir != filename and is_package_dir(dir):\n return package(dir) + (os.path.basename(dir),)\n else:\n return ()\n\n\n@cached_function\ndef fully_qualified_name(filename):\n module = os.path.splitext(os.path.basename(filename))[0]\n return '.'.join(package(filename) + (module,))\n\n\n@cached_function\ndef parse_dependencies(source_filename):\n # Actual parsing is way too slow, so we use regular expressions.\n # The only catch is that we must strip comments and string\n # literals ahead of time.\n with Utils.open_source_file(source_filename, error_handling='ignore') as fh:\n source = fh.read()\n distutils_info = DistutilsInfo(source)\n source, literals = strip_string_literals(source)\n source = source.replace('\\\\\\n', ' ').replace('\\t', ' ')\n\n # TODO: pure mode\n cimports = []\n includes = []\n externs = []\n for m in dependency_regex.finditer(source):\n cimport_from, cimport_list, extern, include = m.groups()\n if cimport_from:\n cimports.append(cimport_from)\n m_after_from = dependency_after_from_regex.search(source, pos=m.end())\n if m_after_from:\n multiline, one_line = m_after_from.groups()\n subimports = multiline or one_line\n cimports.extend(\"{}.{}\".format(cimport_from, s.strip())\n for s in subimports.split(','))\n\n elif cimport_list:\n cimports.extend(x.strip() for x in cimport_list.split(\",\"))\n elif extern:\n externs.append(literals[extern])\n else:\n includes.append(literals[include])\n return cimports, includes, externs, distutils_info\n\n\nclass DependencyTree:\n\n def __init__(self, context, quiet=False):\n self.context = context\n self.quiet = quiet\n self._transitive_cache = {}\n\n def parse_dependencies(self, source_filename):\n if path_exists(source_filename):\n source_filename = os.path.normpath(source_filename)\n return parse_dependencies(source_filename)\n\n @cached_method\n def included_files(self, filename):\n # This is messy because included files are textually included, resolving\n # cimports (but not includes) relative to the including file.\n all = set()\n for include in self.parse_dependencies(filename)[1]:\n include_path = join_path(os.path.dirname(filename), include)\n if not path_exists(include_path):\n include_path = self.context.find_include_file(include, source_file_path=filename)\n if include_path:\n if '.' + os.path.sep in include_path:\n include_path = os.path.normpath(include_path)\n all.add(include_path)\n all.update(self.included_files(include_path))\n elif not self.quiet:\n print(\"Unable to locate '%s' referenced from '%s'\" % (filename, include))\n return all\n\n @cached_method\n def cimports_externs_incdirs(self, filename):\n # This is really ugly. Nested cimports are resolved with respect to the\n # includer, but includes are resolved with respect to the includee.\n cimports, includes, externs = self.parse_dependencies(filename)[:3]\n cimports = set(cimports)\n externs = set(externs)\n incdirs = set()\n for include in self.included_files(filename):\n included_cimports, included_externs, included_incdirs = self.cimports_externs_incdirs(include)\n cimports.update(included_cimports)\n externs.update(included_externs)\n incdirs.update(included_incdirs)\n externs, incdir = normalize_existing(filename, externs)\n if incdir:\n incdirs.add(incdir)\n return tuple(cimports), externs, incdirs\n\n def cimports(self, filename):\n return self.cimports_externs_incdirs(filename)[0]\n\n def package(self, filename):\n return package(filename)\n\n def fully_qualified_name(self, filename):\n return fully_qualified_name(filename)\n\n @cached_method\n def find_pxd(self, module, filename=None):\n is_relative = module[0] == '.'\n if is_relative and not filename:\n raise NotImplementedError(\"New relative imports.\")\n if filename is not None:\n module_path = module.split('.')\n if is_relative:\n module_path.pop(0) # just explicitly relative\n package_path = list(self.package(filename))\n while module_path and not module_path[0]:\n try:\n package_path.pop()\n except IndexError:\n return None # FIXME: error?\n module_path.pop(0)\n relative = '.'.join(package_path + module_path)\n pxd = self.context.find_pxd_file(relative, source_file_path=filename)\n if pxd:\n return pxd\n if is_relative:\n return None # FIXME: error?\n return self.context.find_pxd_file(module, source_file_path=filename)\n\n @cached_method\n def cimported_files(self, filename):\n filename_root, filename_ext = os.path.splitext(filename)\n if filename_ext in ('.pyx', '.py') and path_exists(filename_root + '.pxd'):\n pxd_list = [filename_root + '.pxd']\n else:\n pxd_list = []\n # Cimports generates all possible combinations package.module\n # when imported as from package cimport module.\n for module in self.cimports(filename):\n if module[:7] == 'cython.' or module == 'cython':\n continue\n pxd_file = self.find_pxd(module, filename)\n if pxd_file is not None:\n pxd_list.append(pxd_file)\n return tuple(pxd_list)\n\n @cached_method\n def immediate_dependencies(self, filename):\n all_deps = {filename}\n all_deps.update(self.cimported_files(filename))\n all_deps.update(self.included_files(filename))\n return all_deps\n\n def all_dependencies(self, filename):\n return self.transitive_merge(filename, self.immediate_dependencies, set.union)\n\n @cached_method\n def timestamp(self, filename):\n return os.path.getmtime(filename)\n\n def extract_timestamp(self, filename):\n return self.timestamp(filename), filename\n\n def newest_dependency(self, filename):\n return max([self.extract_timestamp(f) for f in self.all_dependencies(filename)])\n\n def distutils_info0(self, filename):\n info = self.parse_dependencies(filename)[3]\n kwds = info.values\n cimports, externs, incdirs = self.cimports_externs_incdirs(filename)\n basedir = os.getcwd()\n # Add dependencies on \"cdef extern from ...\" files\n if externs:\n externs = _make_relative(externs, basedir)\n if 'depends' in kwds:\n kwds['depends'] = list(set(kwds['depends']).union(externs))\n else:\n kwds['depends'] = list(externs)\n # Add include_dirs to ensure that the C compiler will find the\n # \"cdef extern from ...\" files\n if incdirs:\n include_dirs = list(kwds.get('include_dirs', []))\n for inc in _make_relative(incdirs, basedir):\n if inc not in include_dirs:\n include_dirs.append(inc)\n kwds['include_dirs'] = include_dirs\n return info\n\n def distutils_info(self, filename, aliases=None, base=None):\n return (self.transitive_merge(filename, self.distutils_info0, DistutilsInfo.merge)\n .subs(aliases)\n .merge(base))\n\n def transitive_merge(self, node, extract, merge):\n try:\n seen = self._transitive_cache[extract, merge]\n except KeyError:\n seen = self._transitive_cache[extract, merge] = {}\n return self.transitive_merge_helper(\n node, extract, merge, seen, {}, self.cimported_files)[0]\n\n def transitive_merge_helper(self, node, extract, merge, seen, stack, outgoing):\n if node in seen:\n return seen[node], None\n deps = extract(node)\n if node in stack:\n return deps, node\n try:\n stack[node] = len(stack)\n loop = None\n for next in outgoing(node):\n sub_deps, sub_loop = self.transitive_merge_helper(next, extract, merge, seen, stack, outgoing)\n if sub_loop is not None:\n if loop is not None and stack[loop] < stack[sub_loop]:\n pass\n else:\n loop = sub_loop\n deps = merge(deps, sub_deps)\n if loop == node:\n loop = None\n if loop is None:\n seen[node] = deps\n return deps, loop\n finally:\n del stack[node]\n\n\n_dep_tree = None\n\ndef create_dependency_tree(ctx=None, quiet=False):\n global _dep_tree\n if _dep_tree is None:\n if ctx is None:\n ctx = Context([\".\"], get_directive_defaults(),\n options=CompilationOptions(default_options))\n _dep_tree = DependencyTree(ctx, quiet=quiet)\n return _dep_tree\n\n\n# If this changes, change also docs/src/reference/compilation.rst\n# which mentions this function\ndef default_create_extension(template, kwds):\n if 'depends' in kwds:\n include_dirs = kwds.get('include_dirs', []) + [\".\"]\n depends = resolve_depends(kwds['depends'], include_dirs)\n kwds['depends'] = sorted(set(depends + template.depends))\n\n t = template.__class__\n ext = t(**kwds)\n if hasattr(template, \"py_limited_api\"):\n ext.py_limited_api = template.py_limited_api\n metadata = dict(distutils=kwds, module_name=kwds['name'])\n return (ext, metadata)\n\n\n# This may be useful for advanced users?\ndef create_extension_list(patterns, exclude=None, ctx=None, aliases=None, quiet=False, language=None,\n exclude_failures=False):\n if language is not None:\n print('Warning: passing language={0!r} to cythonize() is deprecated. '\n 'Instead, put \"# distutils: language={0}\" in your .pyx or .pxd file(s)'.format(language))\n if exclude is None:\n exclude = []\n if patterns is None:\n return [], {}\n elif isinstance(patterns, str) or not isinstance(patterns, Iterable):\n patterns = [patterns]\n\n from distutils.extension import Extension\n if 'setuptools' in sys.modules:\n # Support setuptools Extension instances as well.\n extension_classes = (\n Extension, # should normally be the same as 'setuptools.extension._Extension'\n sys.modules['setuptools.extension']._Extension,\n sys.modules['setuptools'].Extension,\n )\n else:\n extension_classes = (Extension,)\n\n explicit_modules = {m.name for m in patterns if isinstance(m, extension_classes)}\n deps = create_dependency_tree(ctx, quiet=quiet)\n\n to_exclude = set()\n if not isinstance(exclude, list):\n exclude = [exclude]\n for pattern in exclude:\n to_exclude.update(map(os.path.abspath, extended_iglob(pattern)))\n\n module_list = []\n module_metadata = {}\n\n # if no create_extension() function is defined, use a simple\n # default function.\n create_extension = ctx.options.create_extension or default_create_extension\n\n seen = set()\n for pattern in patterns:\n if isinstance(pattern, str):\n filepattern = pattern\n template = Extension(pattern, []) # Fake Extension without sources\n name = '*'\n base = None\n ext_language = language\n elif isinstance(pattern, extension_classes):\n cython_sources = [s for s in pattern.sources\n if os.path.splitext(s)[1] in ('.py', '.pyx')]\n if cython_sources:\n filepattern = cython_sources[0]\n if len(cython_sources) > 1:\n print(\"Warning: Multiple cython sources found for extension '%s': %s\\n\"\n \"See https://cython.readthedocs.io/en/latest/src/userguide/sharing_declarations.html \"\n \"for sharing declarations among Cython files.\" % (pattern.name, cython_sources))\n else:\n # ignore non-cython modules\n module_list.append(pattern)\n continue\n template = pattern\n name = template.name\n base = DistutilsInfo(exn=template)\n ext_language = None # do not override whatever the Extension says\n else:\n msg = str(\"pattern is not of type str nor subclass of Extension (%s)\"\n \" but of type %s and class %s\" % (repr(Extension),\n type(pattern),\n pattern.__class__))\n raise TypeError(msg)\n\n for file in nonempty(sorted(extended_iglob(filepattern)), \"'%s' doesn't match any files\" % filepattern):\n if os.path.abspath(file) in to_exclude:\n continue\n module_name = deps.fully_qualified_name(file)\n if '*' in name:\n if module_name in explicit_modules:\n continue\n elif name:\n module_name = name\n\n Utils.raise_error_if_module_name_forbidden(module_name)\n\n if module_name not in seen:\n try:\n kwds = deps.distutils_info(file, aliases, base).values\n except Exception:\n if exclude_failures:\n continue\n raise\n if base is not None:\n for key, value in base.values.items():\n if key not in kwds:\n kwds[key] = value\n\n kwds['name'] = module_name\n\n sources = [file] + [m for m in template.sources if m != filepattern]\n if 'sources' in kwds:\n # allow users to add .c files etc.\n for source in kwds['sources']:\n if source not in sources:\n sources.append(source)\n kwds['sources'] = sources\n\n if ext_language and 'language' not in kwds:\n kwds['language'] = ext_language\n\n np_pythran = kwds.pop('np_pythran', False)\n\n # Create the new extension\n m, metadata = create_extension(template, kwds)\n m.np_pythran = np_pythran or getattr(m, 'np_pythran', False)\n if m.np_pythran:\n update_pythran_extension(m)\n module_list.append(m)\n\n # Store metadata (this will be written as JSON in the\n # generated C file but otherwise has no purpose)\n module_metadata[module_name] = metadata\n\n if file not in m.sources:\n # Old setuptools unconditionally replaces .pyx with .c/.cpp\n target_file = os.path.splitext(file)[0] + ('.cpp' if m.language == 'c++' else '.c')\n try:\n m.sources.remove(target_file)\n except ValueError:\n # never seen this in the wild, but probably better to warn about this unexpected case\n print(\"Warning: Cython source file not found in sources list, adding %s\" % file)\n m.sources.insert(0, file)\n seen.add(name)\n return module_list, module_metadata\n\n\n# This is the user-exposed entry point.\ndef cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, force=None, language=None,\n exclude_failures=False, show_all_warnings=False, **options):\n \"\"\"\n Compile a set of source modules into C/C++ files and return a list of distutils\n Extension objects for them.\n\n :param module_list: As module list, pass either a glob pattern, a list of glob\n patterns or a list of Extension objects. The latter\n allows you to configure the extensions separately\n through the normal distutils options.\n You can also pass Extension objects that have\n glob patterns as their sources. Then, cythonize\n will resolve the pattern and create a\n copy of the Extension for every matching file.\n\n :param exclude: When passing glob patterns as ``module_list``, you can exclude certain\n module names explicitly by passing them into the ``exclude`` option.\n\n :param nthreads: The number of concurrent builds for parallel compilation\n (requires the ``multiprocessing`` module).\n\n :param aliases: If you want to use compiler directives like ``# distutils: ...`` but\n can only know at compile time (when running the ``setup.py``) which values\n to use, you can use aliases and pass a dictionary mapping those aliases\n to Python strings when calling :func:`cythonize`. As an example, say you\n want to use the compiler\n directive ``# distutils: include_dirs = ../static_libs/include/``\n but this path isn't always fixed and you want to find it when running\n the ``setup.py``. You can then do ``# distutils: include_dirs = MY_HEADERS``,\n find the value of ``MY_HEADERS`` in the ``setup.py``, put it in a python\n variable called ``foo`` as a string, and then call\n ``cythonize(..., aliases={'MY_HEADERS': foo})``.\n\n :param quiet: If True, Cython won't print error, warning, or status messages during the\n compilation.\n\n :param force: Forces the recompilation of the Cython modules, even if the timestamps\n don't indicate that a recompilation is necessary.\n\n :param language: To globally enable C++ mode, you can pass ``language='c++'``. Otherwise, this\n will be determined at a per-file level based on compiler directives. This\n affects only modules found based on file names. Extension instances passed\n into :func:`cythonize` will not be changed. It is recommended to rather\n use the compiler directive ``# distutils: language = c++`` than this option.\n\n :param exclude_failures: For a broad 'try to compile' mode that ignores compilation\n failures and simply excludes the failed extensions,\n pass ``exclude_failures=True``. Note that this only\n really makes sense for compiling ``.py`` files which can also\n be used without compilation.\n\n :param show_all_warnings: By default, not all Cython warnings are printed.\n Set to true to show all warnings.\n\n :param annotate: If ``True``, will produce a HTML file for each of the ``.pyx`` or ``.py``\n files compiled. The HTML file gives an indication\n of how much Python interaction there is in\n each of the source code lines, compared to plain C code.\n It also allows you to see the C/C++ code\n generated for each line of Cython code. This report is invaluable when\n optimizing a function for speed,\n and for determining when to :ref:`release the GIL <nogil>`:\n in general, a ``nogil`` block may contain only \"white\" code.\n See examples in :ref:`determining_where_to_add_types` or\n :ref:`primes`.\n\n\n :param annotate-fullc: If ``True`` will produce a colorized HTML version of\n the source which includes entire generated C/C++-code.\n\n\n :param compiler_directives: Allow to set compiler directives in the ``setup.py`` like this:\n ``compiler_directives={'embedsignature': True}``.\n See :ref:`compiler-directives`.\n\n :param depfile: produce depfiles for the sources if True.\n :param cache: If ``True`` the cache enabled with default path. If the value is a path to a directory,\n then the directory is used to cache generated ``.c``/``.cpp`` files. By default cache is disabled.\n See :ref:`cython-cache`.\n \"\"\"\n if exclude is None:\n exclude = []\n if 'include_path' not in options:\n options['include_path'] = ['.']\n if 'common_utility_include_dir' in options:\n safe_makedirs(options['common_utility_include_dir'])\n\n depfile = options.pop('depfile', None)\n\n if pythran is None:\n pythran_options = None\n else:\n pythran_options = CompilationOptions(**options)\n pythran_options.cplus = True\n pythran_options.np_pythran = True\n\n if force is None:\n force = os.environ.get(\"CYTHON_FORCE_REGEN\") == \"1\" # allow global overrides for build systems\n\n c_options = CompilationOptions(**options)\n cpp_options = CompilationOptions(**options); cpp_options.cplus = True\n ctx = Context.from_options(c_options)\n options = c_options\n module_list, module_metadata = create_extension_list(\n module_list,\n exclude=exclude,\n ctx=ctx,\n quiet=quiet,\n exclude_failures=exclude_failures,\n language=language,\n aliases=aliases)\n\n fix_windows_unicode_modules(module_list)\n\n deps = create_dependency_tree(ctx, quiet=quiet)\n build_dir = getattr(options, 'build_dir', None)\n if options.cache:\n # cache is enabled when:\n # * options.cache is True (the default path to the cache base dir is used)\n # * options.cache is the explicit path to the cache base dir\n cache_path = None if options.cache is True else options.cache\n cache = Cache(cache_path, getattr(options, 'cache_size', None))\n else:\n cache = None\n\n def copy_to_build_dir(filepath, root=os.getcwd()):\n filepath_abs = os.path.abspath(filepath)\n if os.path.isabs(filepath):\n filepath = filepath_abs\n if filepath_abs.startswith(root):\n # distutil extension depends are relative to cwd\n mod_dir = join_path(build_dir,\n os.path.dirname(_relpath(filepath, root)))\n copy_once_if_newer(filepath_abs, mod_dir)\n\n modules_by_cfile = collections.defaultdict(list)\n to_compile = []\n for m in module_list:\n if build_dir:\n for dep in m.depends:\n copy_to_build_dir(dep)\n\n cy_sources = [\n source for source in m.sources\n if os.path.splitext(source)[1] in ('.pyx', '.py')]\n if len(cy_sources) == 1:\n # normal \"special\" case: believe the Extension module name to allow user overrides\n full_module_name = m.name\n else:\n # infer FQMN from source files\n full_module_name = None\n\n new_sources = []\n for source in m.sources:\n base, ext = os.path.splitext(source)\n if ext in ('.pyx', '.py'):\n if m.np_pythran:\n c_file = base + '.cpp'\n options = pythran_options\n elif m.language == 'c++':\n c_file = base + '.cpp'\n options = cpp_options\n else:\n c_file = base + '.c'\n options = c_options\n\n # setup for out of place build directory if enabled\n if build_dir:\n if os.path.isabs(c_file):\n c_file = os.path.splitdrive(c_file)[1]\n c_file = c_file.split(os.sep, 1)[1]\n c_file = os.path.join(build_dir, c_file)\n dir = os.path.dirname(c_file)\n safe_makedirs_once(dir)\n\n # write out the depfile, if requested\n if depfile:\n dependencies = deps.all_dependencies(source)\n write_depfile(c_file, source, dependencies)\n\n # Missing files and those generated by other Cython versions should always be recreated.\n if Utils.file_generated_by_this_cython(c_file):\n c_timestamp = os.path.getmtime(c_file)\n else:\n c_timestamp = -1\n\n # Priority goes first to modified files, second to direct\n # dependents, and finally to indirect dependents.\n if c_timestamp < deps.timestamp(source):\n dep_timestamp, dep = deps.timestamp(source), source\n priority = 0\n else:\n dep_timestamp, dep = deps.newest_dependency(source)\n priority = 2 - (dep in deps.immediate_dependencies(source))\n if force or c_timestamp < dep_timestamp:\n if not quiet and not force:\n if source == dep:\n print(\"Compiling %s because it changed.\" % Utils.decode_filename(source))\n else:\n print(\"Compiling %s because it depends on %s.\" % (\n Utils.decode_filename(source),\n Utils.decode_filename(dep),\n ))\n if not force and cache:\n fingerprint = cache.transitive_fingerprint(\n source, deps.all_dependencies(source), options,\n FingerprintFlags(\n m.language or 'c',\n getattr(m, 'py_limited_api', False),\n getattr(m, 'np_pythran', False)\n )\n )\n else:\n fingerprint = None\n to_compile.append((\n priority, source, c_file, fingerprint, cache, quiet,\n options, not exclude_failures, module_metadata.get(m.name),\n full_module_name, show_all_warnings))\n new_sources.append(c_file)\n modules_by_cfile[c_file].append(m)\n else:\n new_sources.append(source)\n if build_dir:\n copy_to_build_dir(source)\n m.sources = new_sources\n\n to_compile.sort()\n # Drop \"priority\" component of \"to_compile\" entries and add a\n # simple progress indicator.\n N = len(to_compile)\n progress_fmt = \"[{0:%d}/{1}] \" % len(str(N))\n for i in range(N):\n progress = progress_fmt.format(i+1, N)\n to_compile[i] = to_compile[i][1:] + (progress,)\n\n if N <= 1:\n nthreads = 0\n if nthreads:\n import multiprocessing\n pool = multiprocessing.Pool(\n nthreads, initializer=_init_multiprocessing_helper)\n # This is a bit more involved than it should be, because KeyboardInterrupts\n # break the multiprocessing workers when using a normal pool.map().\n # See, for example:\n # https://noswap.com/blog/python-multiprocessing-keyboardinterrupt\n try:\n result = pool.map_async(cythonize_one_helper, to_compile, chunksize=1)\n pool.close()\n while not result.ready():\n try:\n result.get(99999) # seconds\n except multiprocessing.TimeoutError:\n pass\n except KeyboardInterrupt:\n pool.terminate()\n raise\n pool.join()\n else:\n for args in to_compile:\n cythonize_one(*args)\n\n if exclude_failures:\n failed_modules = set()\n for c_file, modules in modules_by_cfile.items():\n if not os.path.exists(c_file):\n failed_modules.update(modules)\n elif os.path.getsize(c_file) < 200:\n f = open(c_file, 'r', encoding='iso8859-1')\n try:\n if f.read(len('#error ')) == '#error ':\n # dead compilation result\n failed_modules.update(modules)\n finally:\n f.close()\n if failed_modules:\n for module in failed_modules:\n module_list.remove(module)\n print(\"Failed compilations: %s\" % ', '.join(sorted([\n module.name for module in failed_modules])))\n\n if cache:\n cache.cleanup_cache()\n\n # cythonize() is often followed by the (non-Python-buffered)\n # compiler output, flush now to avoid interleaving output.\n sys.stdout.flush()\n return module_list\n\n\ndef fix_windows_unicode_modules(module_list):\n # Hack around a distutils 3.[5678] bug on Windows for unicode module names.\n # https://bugs.python.org/issue39432\n if sys.platform != \"win32\":\n return\n if sys.version_info >= (3, 8, 2):\n return\n\n def make_filtered_list(ignored_symbol, old_entries):\n class FilteredExportSymbols(list):\n # export_symbols for unicode filename cause link errors on Windows\n # Cython doesn't need them (it already defines PyInit with the correct linkage)\n # so use this class as a temporary fix to stop them from being generated\n def __contains__(self, val):\n # so distutils doesn't \"helpfully\" add PyInit_<name>\n return val == ignored_symbol or list.__contains__(self, val)\n\n filtered_list = FilteredExportSymbols(old_entries)\n if old_entries:\n filtered_list.extend(name for name in old_entries if name != ignored_symbol)\n return filtered_list\n\n for m in module_list:\n if m.name.isascii():\n continue\n m.export_symbols = make_filtered_list(\n \"PyInit_\" + m.name.rsplit(\".\", 1)[-1],\n m.export_symbols,\n )\n\n\nif os.environ.get('XML_RESULTS'):\n compile_result_dir = os.environ['XML_RESULTS']\n def record_results(func):\n def with_record(*args):\n t = time.time()\n success = True\n try:\n try:\n func(*args)\n except:\n success = False\n finally:\n t = time.time() - t\n module = fully_qualified_name(args[0])\n name = \"cythonize.\" + module\n failures = 1 - success\n if success:\n failure_item = \"\"\n else:\n failure_item = \"failure\"\n output = open(os.path.join(compile_result_dir, name + \".xml\"), \"w\")\n output.write(\"\"\"\n <?xml version=\"1.0\" ?>\n <testsuite name=\"%(name)s\" errors=\"0\" failures=\"%(failures)s\" tests=\"1\" time=\"%(t)s\">\n <testcase classname=\"%(name)s\" name=\"cythonize\">\n %(failure_item)s\n </testcase>\n </testsuite>\n \"\"\".strip() % locals())\n output.close()\n return with_record\nelse:\n def record_results(func):\n return func\n\n\n# TODO: Share context? Issue: pyx processing leaks into pxd module\n@record_results\ndef cythonize_one(pyx_file, c_file, fingerprint, cache, quiet, options=None,\n raise_on_failure=True, embedded_metadata=None,\n full_module_name=None, show_all_warnings=False,\n progress=\"\"):\n from ..Compiler.Main import compile_single, default_options\n from ..Compiler.Errors import CompileError, PyrexError\n\n if cache and fingerprint:\n cached = cache.lookup_cache(c_file, fingerprint)\n if cached:\n if not quiet:\n print(\"%sFound compiled %s in cache\" % (progress, pyx_file))\n cache.load_from_cache(c_file, cached)\n return\n if not quiet:\n print(\"%sCythonizing %s\" % (progress, Utils.decode_filename(pyx_file)))\n if options is None:\n options = CompilationOptions(default_options)\n options.output_file = c_file\n options.embedded_metadata = embedded_metadata\n\n old_warning_level = Errors.LEVEL\n if show_all_warnings:\n Errors.LEVEL = 0\n\n any_failures = 0\n try:\n result = compile_single(pyx_file, options, full_module_name=full_module_name)\n if result.num_errors > 0:\n any_failures = 1\n except (OSError, PyrexError) as e:\n sys.stderr.write('%s\\n' % e)\n any_failures = 1\n # XXX\n import traceback\n traceback.print_exc()\n except Exception:\n if raise_on_failure:\n raise\n import traceback\n traceback.print_exc()\n any_failures = 1\n finally:\n if show_all_warnings:\n Errors.LEVEL = old_warning_level\n\n if any_failures:\n if raise_on_failure:\n raise CompileError(None, pyx_file)\n elif os.path.exists(c_file):\n os.remove(c_file)\n elif cache and fingerprint:\n cache.store_to_cache(c_file, fingerprint, result)\n\n\ndef cythonize_one_helper(m):\n import traceback\n try:\n return cythonize_one(*m)\n except Exception:\n traceback.print_exc()\n raise\n\n\ndef _init_multiprocessing_helper():\n # KeyboardInterrupt kills workers, so don't let them get it\n import signal\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n", "path": "Cython/Build/Dependencies.py" } ]
diff --git a/Cython/Build/Dependencies.py b/Cython/Build/Dependencies.py index d75a103b25f..a791fc4b2a1 100644 --- a/Cython/Build/Dependencies.py +++ b/Cython/Build/Dependencies.py @@ -705,6 +705,8 @@ def default_create_extension(template, kwds): t = template.__class__ ext = t(**kwds) + if hasattr(template, "py_limited_api"): + ext.py_limited_api = template.py_limited_api metadata = dict(distutils=kwds, module_name=kwds['name']) return (ext, metadata) diff --git a/docs/src/userguide/source_files_and_compilation.rst b/docs/src/userguide/source_files_and_compilation.rst index 9241bbf0a8c..64e8f904f36 100644 --- a/docs/src/userguide/source_files_and_compilation.rst +++ b/docs/src/userguide/source_files_and_compilation.rst @@ -276,6 +276,8 @@ The default function (defined in ``Cython.Build.Dependencies``) is:: t = template.__class__ ext = t(**kwds) + if hasattr(template, "py_limited_api"): + ext.py_limited_api = template.py_limited_api metadata = dict(distutils=kwds, module_name=kwds['name']) return ext, metadata
ipython__ipython-2280
SSH passwordless check with OpenSSH checks for the wrong thing The pattern passed to pexpect is '[Ppassword]:', which looks for any of those letters followed by ':', and not, as the intention seems to be, for 'Password:'. The correct pattern is '[Pp]assword:'. This is at IPython/external/ssh/tunnel.py:100.
[ { "content": "\"\"\"Basic ssh tunnel utilities, and convenience functions for tunneling\nzeromq connections.\n\nAuthors\n-------\n* Min RK\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2010-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nfrom __future__ import print_function\n\nimport os,sys, atexit\nimport socket\nfrom multiprocessing import Process\nfrom getpass import getpass, getuser\nimport warnings\n\ntry:\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', DeprecationWarning)\n import paramiko\nexcept ImportError:\n paramiko = None\nelse:\n from forward import forward_tunnel\n\ntry:\n from IPython.external import pexpect\nexcept ImportError:\n pexpect = None\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\n# select_random_ports copied from IPython.parallel.util\n_random_ports = set()\n\ndef select_random_ports(n):\n \"\"\"Selects and return n random ports that are available.\"\"\"\n ports = []\n for i in xrange(n):\n sock = socket.socket()\n sock.bind(('', 0))\n while sock.getsockname()[1] in _random_ports:\n sock.close()\n sock = socket.socket()\n sock.bind(('', 0))\n ports.append(sock)\n for i, sock in enumerate(ports):\n port = sock.getsockname()[1]\n sock.close()\n ports[i] = port\n _random_ports.add(port)\n return ports\n\n\n#-----------------------------------------------------------------------------\n# Check for passwordless login\n#-----------------------------------------------------------------------------\n\ndef try_passwordless_ssh(server, keyfile, paramiko=None):\n \"\"\"Attempt to make an ssh connection without a password.\n This is mainly used for requiring password input only once\n when many tunnels may be connected to the same server.\n\n If paramiko is None, the default for the platform is chosen.\n \"\"\"\n if paramiko is None:\n paramiko = sys.platform == 'win32'\n if not paramiko:\n f = _try_passwordless_openssh\n else:\n f = _try_passwordless_paramiko\n return f(server, keyfile)\n\ndef _try_passwordless_openssh(server, keyfile):\n \"\"\"Try passwordless login with shell ssh command.\"\"\"\n if pexpect is None:\n raise ImportError(\"pexpect unavailable, use paramiko\")\n cmd = 'ssh -f '+ server\n if keyfile:\n cmd += ' -i ' + keyfile\n cmd += ' exit'\n p = pexpect.spawn(cmd)\n while True:\n try:\n p.expect('[Ppassword]:', timeout=.1)\n except pexpect.TIMEOUT:\n continue\n except pexpect.EOF:\n return True\n else:\n return False\n\ndef _try_passwordless_paramiko(server, keyfile):\n \"\"\"Try passwordless login with paramiko.\"\"\"\n if paramiko is None:\n msg = \"Paramiko unavaliable, \"\n if sys.platform == 'win32':\n msg += \"Paramiko is required for ssh tunneled connections on Windows.\"\n else:\n msg += \"use OpenSSH.\"\n raise ImportError(msg)\n username, server, port = _split_server(server)\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.WarningPolicy())\n try:\n client.connect(server, port, username=username, key_filename=keyfile,\n look_for_keys=True)\n except paramiko.AuthenticationException:\n return False\n else:\n client.close()\n return True\n\n\ndef tunnel_connection(socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60):\n \"\"\"Connect a socket to an address via an ssh tunnel.\n\n This is a wrapper for socket.connect(addr), when addr is not accessible\n from the local machine. It simply creates an ssh tunnel using the remaining args,\n and calls socket.connect('tcp://localhost:lport') where lport is the randomly\n selected local port of the tunnel.\n\n \"\"\"\n new_url, tunnel = open_tunnel(addr, server, keyfile=keyfile, password=password, paramiko=paramiko, timeout=timeout)\n socket.connect(new_url)\n return tunnel\n\n\ndef open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60):\n \"\"\"Open a tunneled connection from a 0MQ url.\n\n For use inside tunnel_connection.\n\n Returns\n -------\n\n (url, tunnel): The 0MQ url that has been forwarded, and the tunnel object\n \"\"\"\n\n lport = select_random_ports(1)[0]\n transport, addr = addr.split('://')\n ip,rport = addr.split(':')\n rport = int(rport)\n if paramiko is None:\n paramiko = sys.platform == 'win32'\n if paramiko:\n tunnelf = paramiko_tunnel\n else:\n tunnelf = openssh_tunnel\n\n tunnel = tunnelf(lport, rport, server, remoteip=ip, keyfile=keyfile, password=password, timeout=timeout)\n return 'tcp://127.0.0.1:%i'%lport, tunnel\n\ndef openssh_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):\n \"\"\"Create an ssh tunnel using command-line ssh that connects port lport\n on this machine to localhost:rport on server. The tunnel\n will automatically close when not in use, remaining open\n for a minimum of timeout seconds for an initial connection.\n\n This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,\n as seen from `server`.\n\n keyfile and password may be specified, but ssh config is checked for defaults.\n\n Parameters\n ----------\n\n lport : int\n local port for connecting to the tunnel from this machine.\n rport : int\n port on the remote machine to connect to.\n server : str\n The ssh server to connect to. The full ssh server string will be parsed.\n user@server:port\n remoteip : str [Default: 127.0.0.1]\n The remote ip, specifying the destination of the tunnel.\n Default is localhost, which means that the tunnel would redirect\n localhost:lport on this machine to localhost:rport on the *server*.\n\n keyfile : str; path to public key file\n This specifies a key to be used in ssh login, default None.\n Regular default ssh keys will be used without specifying this argument.\n password : str;\n Your ssh password to the ssh server. Note that if this is left None,\n you will be prompted for it if passwordless key based login is unavailable.\n timeout : int [default: 60]\n The time (in seconds) after which no activity will result in the tunnel\n closing. This prevents orphaned tunnels from running forever.\n \"\"\"\n if pexpect is None:\n raise ImportError(\"pexpect unavailable, use paramiko_tunnel\")\n ssh=\"ssh \"\n if keyfile:\n ssh += \"-i \" + keyfile\n \n if ':' in server:\n server, port = server.split(':')\n ssh += \" -p %s\" % port\n \n cmd = \"%s -f -L 127.0.0.1:%i:%s:%i %s sleep %i\" % (\n ssh, lport, remoteip, rport, server, timeout)\n tunnel = pexpect.spawn(cmd)\n failed = False\n while True:\n try:\n tunnel.expect('[Pp]assword:', timeout=.1)\n except pexpect.TIMEOUT:\n continue\n except pexpect.EOF:\n if tunnel.exitstatus:\n print (tunnel.exitstatus)\n print (tunnel.before)\n print (tunnel.after)\n raise RuntimeError(\"tunnel '%s' failed to start\"%(cmd))\n else:\n return tunnel.pid\n else:\n if failed:\n print(\"Password rejected, try again\")\n password=None\n if password is None:\n password = getpass(\"%s's password: \"%(server))\n tunnel.sendline(password)\n failed = True\n\ndef _split_server(server):\n if '@' in server:\n username,server = server.split('@', 1)\n else:\n username = getuser()\n if ':' in server:\n server, port = server.split(':')\n port = int(port)\n else:\n port = 22\n return username, server, port\n\ndef paramiko_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):\n \"\"\"launch a tunner with paramiko in a subprocess. This should only be used\n when shell ssh is unavailable (e.g. Windows).\n\n This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,\n as seen from `server`.\n\n If you are familiar with ssh tunnels, this creates the tunnel:\n\n ssh server -L localhost:lport:remoteip:rport\n\n keyfile and password may be specified, but ssh config is checked for defaults.\n\n\n Parameters\n ----------\n\n lport : int\n local port for connecting to the tunnel from this machine.\n rport : int\n port on the remote machine to connect to.\n server : str\n The ssh server to connect to. The full ssh server string will be parsed.\n user@server:port\n remoteip : str [Default: 127.0.0.1]\n The remote ip, specifying the destination of the tunnel.\n Default is localhost, which means that the tunnel would redirect\n localhost:lport on this machine to localhost:rport on the *server*.\n\n keyfile : str; path to public key file\n This specifies a key to be used in ssh login, default None.\n Regular default ssh keys will be used without specifying this argument.\n password : str;\n Your ssh password to the ssh server. Note that if this is left None,\n you will be prompted for it if passwordless key based login is unavailable.\n timeout : int [default: 60]\n The time (in seconds) after which no activity will result in the tunnel\n closing. This prevents orphaned tunnels from running forever.\n\n \"\"\"\n if paramiko is None:\n raise ImportError(\"Paramiko not available\")\n\n if password is None:\n if not _try_passwordless_paramiko(server, keyfile):\n password = getpass(\"%s's password: \"%(server))\n\n p = Process(target=_paramiko_tunnel,\n args=(lport, rport, server, remoteip),\n kwargs=dict(keyfile=keyfile, password=password))\n p.daemon=False\n p.start()\n atexit.register(_shutdown_process, p)\n return p\n\ndef _shutdown_process(p):\n if p.is_alive():\n p.terminate()\n\ndef _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None):\n \"\"\"Function for actually starting a paramiko tunnel, to be passed\n to multiprocessing.Process(target=this), and not called directly.\n \"\"\"\n username, server, port = _split_server(server)\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.WarningPolicy())\n\n try:\n client.connect(server, port, username=username, key_filename=keyfile,\n look_for_keys=True, password=password)\n# except paramiko.AuthenticationException:\n# if password is None:\n# password = getpass(\"%s@%s's password: \"%(username, server))\n# client.connect(server, port, username=username, password=password)\n# else:\n# raise\n except Exception as e:\n print ('*** Failed to connect to %s:%d: %r' % (server, port, e))\n sys.exit(1)\n\n # print ('Now forwarding port %d to %s:%d ...' % (lport, server, rport))\n\n try:\n forward_tunnel(lport, remoteip, rport, client.get_transport())\n except KeyboardInterrupt:\n print ('SIGINT: Port forwarding stopped cleanly')\n sys.exit(0)\n except Exception as e:\n print (\"Port forwarding stopped uncleanly: %s\"%e)\n sys.exit(255)\n\nif sys.platform == 'win32':\n ssh_tunnel = paramiko_tunnel\nelse:\n ssh_tunnel = openssh_tunnel\n\n\n__all__ = ['tunnel_connection', 'ssh_tunnel', 'openssh_tunnel', 'paramiko_tunnel', 'try_passwordless_ssh']\n\n\n", "path": "IPython/external/ssh/tunnel.py" } ]
[ { "content": "\"\"\"Basic ssh tunnel utilities, and convenience functions for tunneling\nzeromq connections.\n\nAuthors\n-------\n* Min RK\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2010-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nfrom __future__ import print_function\n\nimport os,sys, atexit\nimport socket\nfrom multiprocessing import Process\nfrom getpass import getpass, getuser\nimport warnings\n\ntry:\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', DeprecationWarning)\n import paramiko\nexcept ImportError:\n paramiko = None\nelse:\n from forward import forward_tunnel\n\ntry:\n from IPython.external import pexpect\nexcept ImportError:\n pexpect = None\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\n# select_random_ports copied from IPython.parallel.util\n_random_ports = set()\n\ndef select_random_ports(n):\n \"\"\"Selects and return n random ports that are available.\"\"\"\n ports = []\n for i in xrange(n):\n sock = socket.socket()\n sock.bind(('', 0))\n while sock.getsockname()[1] in _random_ports:\n sock.close()\n sock = socket.socket()\n sock.bind(('', 0))\n ports.append(sock)\n for i, sock in enumerate(ports):\n port = sock.getsockname()[1]\n sock.close()\n ports[i] = port\n _random_ports.add(port)\n return ports\n\n\n#-----------------------------------------------------------------------------\n# Check for passwordless login\n#-----------------------------------------------------------------------------\n\ndef try_passwordless_ssh(server, keyfile, paramiko=None):\n \"\"\"Attempt to make an ssh connection without a password.\n This is mainly used for requiring password input only once\n when many tunnels may be connected to the same server.\n\n If paramiko is None, the default for the platform is chosen.\n \"\"\"\n if paramiko is None:\n paramiko = sys.platform == 'win32'\n if not paramiko:\n f = _try_passwordless_openssh\n else:\n f = _try_passwordless_paramiko\n return f(server, keyfile)\n\ndef _try_passwordless_openssh(server, keyfile):\n \"\"\"Try passwordless login with shell ssh command.\"\"\"\n if pexpect is None:\n raise ImportError(\"pexpect unavailable, use paramiko\")\n cmd = 'ssh -f '+ server\n if keyfile:\n cmd += ' -i ' + keyfile\n cmd += ' exit'\n p = pexpect.spawn(cmd)\n while True:\n try:\n p.expect('[Pp]assword:', timeout=.1)\n except pexpect.TIMEOUT:\n continue\n except pexpect.EOF:\n return True\n else:\n return False\n\ndef _try_passwordless_paramiko(server, keyfile):\n \"\"\"Try passwordless login with paramiko.\"\"\"\n if paramiko is None:\n msg = \"Paramiko unavaliable, \"\n if sys.platform == 'win32':\n msg += \"Paramiko is required for ssh tunneled connections on Windows.\"\n else:\n msg += \"use OpenSSH.\"\n raise ImportError(msg)\n username, server, port = _split_server(server)\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.WarningPolicy())\n try:\n client.connect(server, port, username=username, key_filename=keyfile,\n look_for_keys=True)\n except paramiko.AuthenticationException:\n return False\n else:\n client.close()\n return True\n\n\ndef tunnel_connection(socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60):\n \"\"\"Connect a socket to an address via an ssh tunnel.\n\n This is a wrapper for socket.connect(addr), when addr is not accessible\n from the local machine. It simply creates an ssh tunnel using the remaining args,\n and calls socket.connect('tcp://localhost:lport') where lport is the randomly\n selected local port of the tunnel.\n\n \"\"\"\n new_url, tunnel = open_tunnel(addr, server, keyfile=keyfile, password=password, paramiko=paramiko, timeout=timeout)\n socket.connect(new_url)\n return tunnel\n\n\ndef open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60):\n \"\"\"Open a tunneled connection from a 0MQ url.\n\n For use inside tunnel_connection.\n\n Returns\n -------\n\n (url, tunnel): The 0MQ url that has been forwarded, and the tunnel object\n \"\"\"\n\n lport = select_random_ports(1)[0]\n transport, addr = addr.split('://')\n ip,rport = addr.split(':')\n rport = int(rport)\n if paramiko is None:\n paramiko = sys.platform == 'win32'\n if paramiko:\n tunnelf = paramiko_tunnel\n else:\n tunnelf = openssh_tunnel\n\n tunnel = tunnelf(lport, rport, server, remoteip=ip, keyfile=keyfile, password=password, timeout=timeout)\n return 'tcp://127.0.0.1:%i'%lport, tunnel\n\ndef openssh_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):\n \"\"\"Create an ssh tunnel using command-line ssh that connects port lport\n on this machine to localhost:rport on server. The tunnel\n will automatically close when not in use, remaining open\n for a minimum of timeout seconds for an initial connection.\n\n This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,\n as seen from `server`.\n\n keyfile and password may be specified, but ssh config is checked for defaults.\n\n Parameters\n ----------\n\n lport : int\n local port for connecting to the tunnel from this machine.\n rport : int\n port on the remote machine to connect to.\n server : str\n The ssh server to connect to. The full ssh server string will be parsed.\n user@server:port\n remoteip : str [Default: 127.0.0.1]\n The remote ip, specifying the destination of the tunnel.\n Default is localhost, which means that the tunnel would redirect\n localhost:lport on this machine to localhost:rport on the *server*.\n\n keyfile : str; path to public key file\n This specifies a key to be used in ssh login, default None.\n Regular default ssh keys will be used without specifying this argument.\n password : str;\n Your ssh password to the ssh server. Note that if this is left None,\n you will be prompted for it if passwordless key based login is unavailable.\n timeout : int [default: 60]\n The time (in seconds) after which no activity will result in the tunnel\n closing. This prevents orphaned tunnels from running forever.\n \"\"\"\n if pexpect is None:\n raise ImportError(\"pexpect unavailable, use paramiko_tunnel\")\n ssh=\"ssh \"\n if keyfile:\n ssh += \"-i \" + keyfile\n \n if ':' in server:\n server, port = server.split(':')\n ssh += \" -p %s\" % port\n \n cmd = \"%s -f -L 127.0.0.1:%i:%s:%i %s sleep %i\" % (\n ssh, lport, remoteip, rport, server, timeout)\n tunnel = pexpect.spawn(cmd)\n failed = False\n while True:\n try:\n tunnel.expect('[Pp]assword:', timeout=.1)\n except pexpect.TIMEOUT:\n continue\n except pexpect.EOF:\n if tunnel.exitstatus:\n print (tunnel.exitstatus)\n print (tunnel.before)\n print (tunnel.after)\n raise RuntimeError(\"tunnel '%s' failed to start\"%(cmd))\n else:\n return tunnel.pid\n else:\n if failed:\n print(\"Password rejected, try again\")\n password=None\n if password is None:\n password = getpass(\"%s's password: \"%(server))\n tunnel.sendline(password)\n failed = True\n\ndef _split_server(server):\n if '@' in server:\n username,server = server.split('@', 1)\n else:\n username = getuser()\n if ':' in server:\n server, port = server.split(':')\n port = int(port)\n else:\n port = 22\n return username, server, port\n\ndef paramiko_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):\n \"\"\"launch a tunner with paramiko in a subprocess. This should only be used\n when shell ssh is unavailable (e.g. Windows).\n\n This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,\n as seen from `server`.\n\n If you are familiar with ssh tunnels, this creates the tunnel:\n\n ssh server -L localhost:lport:remoteip:rport\n\n keyfile and password may be specified, but ssh config is checked for defaults.\n\n\n Parameters\n ----------\n\n lport : int\n local port for connecting to the tunnel from this machine.\n rport : int\n port on the remote machine to connect to.\n server : str\n The ssh server to connect to. The full ssh server string will be parsed.\n user@server:port\n remoteip : str [Default: 127.0.0.1]\n The remote ip, specifying the destination of the tunnel.\n Default is localhost, which means that the tunnel would redirect\n localhost:lport on this machine to localhost:rport on the *server*.\n\n keyfile : str; path to public key file\n This specifies a key to be used in ssh login, default None.\n Regular default ssh keys will be used without specifying this argument.\n password : str;\n Your ssh password to the ssh server. Note that if this is left None,\n you will be prompted for it if passwordless key based login is unavailable.\n timeout : int [default: 60]\n The time (in seconds) after which no activity will result in the tunnel\n closing. This prevents orphaned tunnels from running forever.\n\n \"\"\"\n if paramiko is None:\n raise ImportError(\"Paramiko not available\")\n\n if password is None:\n if not _try_passwordless_paramiko(server, keyfile):\n password = getpass(\"%s's password: \"%(server))\n\n p = Process(target=_paramiko_tunnel,\n args=(lport, rport, server, remoteip),\n kwargs=dict(keyfile=keyfile, password=password))\n p.daemon=False\n p.start()\n atexit.register(_shutdown_process, p)\n return p\n\ndef _shutdown_process(p):\n if p.is_alive():\n p.terminate()\n\ndef _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None):\n \"\"\"Function for actually starting a paramiko tunnel, to be passed\n to multiprocessing.Process(target=this), and not called directly.\n \"\"\"\n username, server, port = _split_server(server)\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.WarningPolicy())\n\n try:\n client.connect(server, port, username=username, key_filename=keyfile,\n look_for_keys=True, password=password)\n# except paramiko.AuthenticationException:\n# if password is None:\n# password = getpass(\"%s@%s's password: \"%(username, server))\n# client.connect(server, port, username=username, password=password)\n# else:\n# raise\n except Exception as e:\n print ('*** Failed to connect to %s:%d: %r' % (server, port, e))\n sys.exit(1)\n\n # print ('Now forwarding port %d to %s:%d ...' % (lport, server, rport))\n\n try:\n forward_tunnel(lport, remoteip, rport, client.get_transport())\n except KeyboardInterrupt:\n print ('SIGINT: Port forwarding stopped cleanly')\n sys.exit(0)\n except Exception as e:\n print (\"Port forwarding stopped uncleanly: %s\"%e)\n sys.exit(255)\n\nif sys.platform == 'win32':\n ssh_tunnel = paramiko_tunnel\nelse:\n ssh_tunnel = openssh_tunnel\n\n\n__all__ = ['tunnel_connection', 'ssh_tunnel', 'openssh_tunnel', 'paramiko_tunnel', 'try_passwordless_ssh']\n\n\n", "path": "IPython/external/ssh/tunnel.py" } ]
diff --git a/IPython/external/ssh/tunnel.py b/IPython/external/ssh/tunnel.py index 4fce68c008b..9ae2311fa69 100644 --- a/IPython/external/ssh/tunnel.py +++ b/IPython/external/ssh/tunnel.py @@ -97,7 +97,7 @@ def _try_passwordless_openssh(server, keyfile): p = pexpect.spawn(cmd) while True: try: - p.expect('[Ppassword]:', timeout=.1) + p.expect('[Pp]assword:', timeout=.1) except pexpect.TIMEOUT: continue except pexpect.EOF:
google__mobly-578
yaml.load_all is unsafe with default loader tests/mobly/output_test.py::OutputTest::test_teardown_class_output T:\src\github\mobly\tests\mobly\output_test.py:258: YAMLLoadWarning: *** Calling yaml.load_all() without Loader=... is deprecated. *** The default Loader is unsafe. *** Please read https://msg.pyyaml.org/load for full details. for entry in yaml.load_all(f): tests/mobly/records_test.py::RecordsTest::test_summary_user_data T:\src\github\mobly\tests\mobly\records_test.py:401: YAMLLoadWarning: *** Calling yaml.load_all() without Loader=... is deprecated. *** The default Loader is unsafe. *** Please read https://msg.pyyaml.org/load for full details. for c in yaml.load_all(f): tests/mobly/records_test.py::RecordsTest::test_summary_write_dump T:\src\github\mobly\tests\mobly\records_test.py:364: YAMLLoadWarning: *** Calling yaml.load() without Loader=... is deprecated. *** The default Loader is unsafe. *** Please read https://msg.pyyaml.org/load for full details. content = yaml.load(f) tests/mobly/records_test.py::RecordsTest::test_summary_write_dump_with_unicode T:\src\github\mobly\tests\mobly\records_test.py:383: YAMLLoadWarning: *** Calling yaml.load() without Loader=... is deprecated. *** The default Loader is unsafe. *** Please read https://msg.pyyaml.org/load for full details. content = yaml.load(f) tests/mobly/test_runner_test.py::TestRunnerTest::test_summary_file_entries T:\src\github\mobly\tests\mobly\test_runner_test.py:135: YAMLLoadWarning: *** Calling yaml.load_all() without Loader=... is deprecated. *** The default Loader is unsafe. *** Please read https://msg.pyyaml.org/load for full details. summary_entries = list(yaml.load_all(f))
[ { "content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom builtins import str\n\nimport copy\nimport io\nimport pprint\nimport os\nimport yaml\n\nfrom mobly import keys\nfrom mobly import utils\n\n# An environment variable defining the base location for Mobly logs.\nENV_MOBLY_LOGPATH = 'MOBLY_LOGPATH'\n_DEFAULT_LOG_PATH = '/tmp/logs/mobly/'\n\n\nclass MoblyConfigError(Exception):\n \"\"\"Raised when there is a problem in test configuration file.\"\"\"\n\n\ndef _validate_test_config(test_config):\n \"\"\"Validates the raw configuration loaded from the config file.\n\n Making sure the required key 'TestBeds' is present.\n \"\"\"\n required_key = keys.Config.key_testbed.value\n if required_key not in test_config:\n raise MoblyConfigError(\n 'Required key %s missing in test config.' % required_key)\n\n\ndef _validate_testbed_name(name):\n \"\"\"Validates the name of a test bed.\n\n Since test bed names are used as part of the test run id, it needs to meet\n certain requirements.\n\n Args:\n name: The test bed's name specified in config file.\n\n Raises:\n MoblyConfigError: The name does not meet any criteria.\n \"\"\"\n if not name:\n raise MoblyConfigError(\"Test bed names can't be empty.\")\n name = str(name)\n for char in name:\n if char not in utils.valid_filename_chars:\n raise MoblyConfigError(\n 'Char \"%s\" is not allowed in test bed names.' % char)\n\n\ndef _validate_testbed_configs(testbed_configs):\n \"\"\"Validates the testbed configurations.\n\n Args:\n testbed_configs: A list of testbed configuration dicts.\n\n Raises:\n MoblyConfigError: Some parts of the configuration is invalid.\n \"\"\"\n seen_names = set()\n # Cross checks testbed configs for resource conflicts.\n for config in testbed_configs:\n # Check for conflicts between multiple concurrent testbed configs.\n # No need to call it if there's only one testbed config.\n name = config[keys.Config.key_testbed_name.value]\n _validate_testbed_name(name)\n # Test bed names should be unique.\n if name in seen_names:\n raise MoblyConfigError('Duplicate testbed name %s found.' % name)\n seen_names.add(name)\n\n\ndef load_test_config_file(test_config_path, tb_filters=None):\n \"\"\"Processes the test configuration file provied by user.\n\n Loads the configuration file into a dict, unpacks each testbed\n config into its own dict, and validate the configuration in the\n process.\n\n Args:\n test_config_path: Path to the test configuration file.\n tb_filters: A subset of test bed names to be pulled from the config\n file. If None, then all test beds will be selected.\n\n Returns:\n A list of test configuration dicts to be passed to\n test_runner.TestRunner.\n \"\"\"\n configs = _load_config_file(test_config_path)\n if tb_filters:\n tbs = []\n for tb in configs[keys.Config.key_testbed.value]:\n if tb[keys.Config.key_testbed_name.value] in tb_filters:\n tbs.append(tb)\n if len(tbs) != len(tb_filters):\n raise MoblyConfigError(\n 'Expect to find %d test bed configs, found %d. Check if'\n ' you have the correct test bed names.' % (len(tb_filters),\n len(tbs)))\n configs[keys.Config.key_testbed.value] = tbs\n mobly_params = configs.get(keys.Config.key_mobly_params.value, {})\n # Decide log path.\n log_path = mobly_params.get(keys.Config.key_log_path.value,\n _DEFAULT_LOG_PATH)\n if ENV_MOBLY_LOGPATH in os.environ:\n log_path = os.environ[ENV_MOBLY_LOGPATH]\n log_path = utils.abs_path(log_path)\n # Validate configs\n _validate_test_config(configs)\n _validate_testbed_configs(configs[keys.Config.key_testbed.value])\n # Transform config dict from user-facing key mapping to internal config object.\n test_configs = []\n for original_bed_config in configs[keys.Config.key_testbed.value]:\n test_run_config = TestRunConfig()\n test_run_config.test_bed_name = original_bed_config[\n keys.Config.key_testbed_name.value]\n test_run_config.log_path = log_path\n test_run_config.controller_configs = original_bed_config.get(\n keys.Config.key_testbed_controllers.value, {})\n test_run_config.user_params = original_bed_config.get(\n keys.Config.key_testbed_test_params.value, {})\n test_configs.append(test_run_config)\n return test_configs\n\n\ndef _load_config_file(path):\n \"\"\"Loads a test config file.\n\n The test config file has to be in YAML format.\n\n Args:\n path: A string that is the full path to the config file, including the\n file name.\n\n Returns:\n A dict that represents info in the config file.\n \"\"\"\n with io.open(utils.abs_path(path), 'r', encoding='utf-8') as f:\n conf = yaml.load(f)\n return conf\n\n\nclass TestRunConfig(object):\n \"\"\"The data class that holds all the information needed for a test run.\n\n Attributes:\n log_path: string, specifies the root directory for all logs written by\n a test run.\n test_bed_name: string, the name of the test bed used by a test run.\n controller_configs: dict, configs used for instantiating controller\n objects.\n user_params: dict, all the parameters to be consumed by the test logic.\n summary_writer: records.TestSummaryWriter, used to write elements to\n the test result summary file.\n test_class_name_suffix: string, suffix to append to the class name for\n reporting. This is used for differentiating the same class\n executed with different parameters in a suite.\n \"\"\"\n\n def __init__(self):\n self.log_path = None\n self.test_bed_name = None\n self.controller_configs = None\n self.user_params = None\n self.summary_writer = None\n self.test_class_name_suffix = None\n\n def copy(self):\n \"\"\"Returns a deep copy of the current config.\n \"\"\"\n return copy.deepcopy(self)\n\n def __str__(self):\n content = dict(self.__dict__)\n content.pop('summary_writer')\n return pprint.pformat(content)\n", "path": "mobly/config_parser.py" } ]
[ { "content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom builtins import str\n\nimport copy\nimport io\nimport pprint\nimport os\nimport yaml\n\nfrom mobly import keys\nfrom mobly import utils\n\n# An environment variable defining the base location for Mobly logs.\nENV_MOBLY_LOGPATH = 'MOBLY_LOGPATH'\n_DEFAULT_LOG_PATH = '/tmp/logs/mobly/'\n\n\nclass MoblyConfigError(Exception):\n \"\"\"Raised when there is a problem in test configuration file.\"\"\"\n\n\ndef _validate_test_config(test_config):\n \"\"\"Validates the raw configuration loaded from the config file.\n\n Making sure the required key 'TestBeds' is present.\n \"\"\"\n required_key = keys.Config.key_testbed.value\n if required_key not in test_config:\n raise MoblyConfigError(\n 'Required key %s missing in test config.' % required_key)\n\n\ndef _validate_testbed_name(name):\n \"\"\"Validates the name of a test bed.\n\n Since test bed names are used as part of the test run id, it needs to meet\n certain requirements.\n\n Args:\n name: The test bed's name specified in config file.\n\n Raises:\n MoblyConfigError: The name does not meet any criteria.\n \"\"\"\n if not name:\n raise MoblyConfigError(\"Test bed names can't be empty.\")\n name = str(name)\n for char in name:\n if char not in utils.valid_filename_chars:\n raise MoblyConfigError(\n 'Char \"%s\" is not allowed in test bed names.' % char)\n\n\ndef _validate_testbed_configs(testbed_configs):\n \"\"\"Validates the testbed configurations.\n\n Args:\n testbed_configs: A list of testbed configuration dicts.\n\n Raises:\n MoblyConfigError: Some parts of the configuration is invalid.\n \"\"\"\n seen_names = set()\n # Cross checks testbed configs for resource conflicts.\n for config in testbed_configs:\n # Check for conflicts between multiple concurrent testbed configs.\n # No need to call it if there's only one testbed config.\n name = config[keys.Config.key_testbed_name.value]\n _validate_testbed_name(name)\n # Test bed names should be unique.\n if name in seen_names:\n raise MoblyConfigError('Duplicate testbed name %s found.' % name)\n seen_names.add(name)\n\n\ndef load_test_config_file(test_config_path, tb_filters=None):\n \"\"\"Processes the test configuration file provied by user.\n\n Loads the configuration file into a dict, unpacks each testbed\n config into its own dict, and validate the configuration in the\n process.\n\n Args:\n test_config_path: Path to the test configuration file.\n tb_filters: A subset of test bed names to be pulled from the config\n file. If None, then all test beds will be selected.\n\n Returns:\n A list of test configuration dicts to be passed to\n test_runner.TestRunner.\n \"\"\"\n configs = _load_config_file(test_config_path)\n if tb_filters:\n tbs = []\n for tb in configs[keys.Config.key_testbed.value]:\n if tb[keys.Config.key_testbed_name.value] in tb_filters:\n tbs.append(tb)\n if len(tbs) != len(tb_filters):\n raise MoblyConfigError(\n 'Expect to find %d test bed configs, found %d. Check if'\n ' you have the correct test bed names.' % (len(tb_filters),\n len(tbs)))\n configs[keys.Config.key_testbed.value] = tbs\n mobly_params = configs.get(keys.Config.key_mobly_params.value, {})\n # Decide log path.\n log_path = mobly_params.get(keys.Config.key_log_path.value,\n _DEFAULT_LOG_PATH)\n if ENV_MOBLY_LOGPATH in os.environ:\n log_path = os.environ[ENV_MOBLY_LOGPATH]\n log_path = utils.abs_path(log_path)\n # Validate configs\n _validate_test_config(configs)\n _validate_testbed_configs(configs[keys.Config.key_testbed.value])\n # Transform config dict from user-facing key mapping to internal config object.\n test_configs = []\n for original_bed_config in configs[keys.Config.key_testbed.value]:\n test_run_config = TestRunConfig()\n test_run_config.test_bed_name = original_bed_config[\n keys.Config.key_testbed_name.value]\n test_run_config.log_path = log_path\n test_run_config.controller_configs = original_bed_config.get(\n keys.Config.key_testbed_controllers.value, {})\n test_run_config.user_params = original_bed_config.get(\n keys.Config.key_testbed_test_params.value, {})\n test_configs.append(test_run_config)\n return test_configs\n\n\ndef _load_config_file(path):\n \"\"\"Loads a test config file.\n\n The test config file has to be in YAML format.\n\n Args:\n path: A string that is the full path to the config file, including the\n file name.\n\n Returns:\n A dict that represents info in the config file.\n \"\"\"\n with io.open(utils.abs_path(path), 'r', encoding='utf-8') as f:\n conf = yaml.safe_load(f)\n return conf\n\n\nclass TestRunConfig(object):\n \"\"\"The data class that holds all the information needed for a test run.\n\n Attributes:\n log_path: string, specifies the root directory for all logs written by\n a test run.\n test_bed_name: string, the name of the test bed used by a test run.\n controller_configs: dict, configs used for instantiating controller\n objects.\n user_params: dict, all the parameters to be consumed by the test logic.\n summary_writer: records.TestSummaryWriter, used to write elements to\n the test result summary file.\n test_class_name_suffix: string, suffix to append to the class name for\n reporting. This is used for differentiating the same class\n executed with different parameters in a suite.\n \"\"\"\n\n def __init__(self):\n self.log_path = None\n self.test_bed_name = None\n self.controller_configs = None\n self.user_params = None\n self.summary_writer = None\n self.test_class_name_suffix = None\n\n def copy(self):\n \"\"\"Returns a deep copy of the current config.\n \"\"\"\n return copy.deepcopy(self)\n\n def __str__(self):\n content = dict(self.__dict__)\n content.pop('summary_writer')\n return pprint.pformat(content)\n", "path": "mobly/config_parser.py" } ]
diff --git a/mobly/config_parser.py b/mobly/config_parser.py index 2278ca6a..d26f34ba 100644 --- a/mobly/config_parser.py +++ b/mobly/config_parser.py @@ -152,7 +152,7 @@ def _load_config_file(path): A dict that represents info in the config file. """ with io.open(utils.abs_path(path), 'r', encoding='utf-8') as f: - conf = yaml.load(f) + conf = yaml.safe_load(f) return conf diff --git a/tests/mobly/base_test_test.py b/tests/mobly/base_test_test.py index 43951f4b..abe3a13e 100755 --- a/tests/mobly/base_test_test.py +++ b/tests/mobly/base_test_test.py @@ -148,8 +148,8 @@ def test_never(self): bt_cls = MockBaseTest(self.mock_test_cls_configs) expected_msg = ( - 'Test method name not_a_test_something does not follow ' - 'naming convention test_\*, abort.') + r'Test method name not_a_test_something does not follow ' + r'naming convention test_\*, abort.') with self.assertRaisesRegex(base_test.Error, expected_msg): bt_cls.run() @@ -186,8 +186,8 @@ def test_never(self): bt_cls = MockBaseTest(self.mock_test_cls_configs) expected_msg = ( - 'Test method name not_a_test_something does not follow ' - 'naming convention test_\*, abort.') + r'Test method name not_a_test_something does not follow ' + r'naming convention test_\*, abort.') with self.assertRaisesRegex(base_test.Error, expected_msg): bt_cls.run(test_names=["not_a_test_something"]) @@ -1994,7 +1994,7 @@ def test_something(self): self.assertEqual(actual_record.test_name, "test_something") hit = False with io.open(self.summary_file, 'r', encoding='utf-8') as f: - for c in yaml.load_all(f): + for c in yaml.safe_load_all(f): if c['Type'] != records.TestSummaryEntryType.USER_DATA.value: continue hit = True diff --git a/tests/mobly/output_test.py b/tests/mobly/output_test.py index 8a879028..6caf9b81 100755 --- a/tests/mobly/output_test.py +++ b/tests/mobly/output_test.py @@ -232,7 +232,7 @@ def test_basic_output(self): info_log_path) = self.assert_output_logs_exist(output_dir) summary_entries = [] with io.open(summary_file_path, 'r', encoding='utf-8') as f: - for entry in yaml.load_all(f): + for entry in yaml.safe_load_all(f): self.assertTrue(entry['Type']) summary_entries.append(entry) self.assert_log_contents(debug_log_path, whitelist=['DEBUG', 'INFO']) @@ -255,7 +255,7 @@ def test_teardown_class_output(self): with io.open(summary_file_path, 'r', encoding='utf-8') as f: raw_content = f.read() f.seek(0) - for entry in yaml.load_all(f): + for entry in yaml.safe_load_all(f): if (entry['Type'] == 'Record' and entry[records.TestResultEnums.RECORD_NAME] == 'teardown_class'): diff --git a/tests/mobly/records_test.py b/tests/mobly/records_test.py index ba25a690..a374347d 100755 --- a/tests/mobly/records_test.py +++ b/tests/mobly/records_test.py @@ -361,7 +361,7 @@ def test_summary_write_dump(self): writer = records.TestSummaryWriter(dump_path) writer.dump(record1.to_dict(), records.TestSummaryEntryType.RECORD) with io.open(dump_path, 'r', encoding='utf-8') as f: - content = yaml.load(f) + content = yaml.safe_load(f) self.assertEqual(content['Type'], records.TestSummaryEntryType.RECORD.value) self.assertEqual(content[records.TestResultEnums.RECORD_DETAILS], @@ -380,7 +380,7 @@ def test_summary_write_dump_with_unicode(self): writer = records.TestSummaryWriter(dump_path) writer.dump(record1.to_dict(), records.TestSummaryEntryType.RECORD) with io.open(dump_path, 'r', encoding='utf-8') as f: - content = yaml.load(f) + content = yaml.safe_load(f) self.assertEqual(content['Type'], records.TestSummaryEntryType.RECORD.value) self.assertEqual(content[records.TestResultEnums.RECORD_DETAILS], @@ -398,7 +398,7 @@ def test_summary_user_data(self): writer.dump(data, records.TestSummaryEntryType.USER_DATA) with io.open(dump_path, 'r', encoding='utf-8') as f: contents = [] - for c in yaml.load_all(f): + for c in yaml.safe_load_all(f): contents.append(c) for content in contents: self.assertEqual(content['Type'], diff --git a/tests/mobly/test_runner_test.py b/tests/mobly/test_runner_test.py index 0d282ba4..26af2eee 100755 --- a/tests/mobly/test_runner_test.py +++ b/tests/mobly/test_runner_test.py @@ -132,7 +132,7 @@ def test_summary_file_entries(self): summary_path = os.path.join(logging.log_path, records.OUTPUT_FILE_SUMMARY) with io.open(summary_path, 'r', encoding='utf-8') as f: - summary_entries = list(yaml.load_all(f)) + summary_entries = list(yaml.safe_load_all(f)) self.assertEqual(len(summary_entries), 4) # Verify the first entry is the list of test names. self.assertEqual(summary_entries[0]['Type'],
kivy__python-for-android-1351
Python2 Build fails with make: *** [Makefile:426: sharedmods] Error 139 # Python version: 3.6 # OS: Arch Linux # python-for-android version: 0.6.0 The command I use to build is: ` p4a apk --private ~/Projects/Python/Mobile_Apps/BeerApp/ --package=org.drink.recommendations --name "Drink Recommendations" --version 0.2 --bootstrap=sdl2 --requirements=python2,kivy --ndk_version r9c ` The error is: ` make: *** [Makefile:426: sharedmods] Error 139 ` The build logs are in the following file. [p4a_errors.txt](https://github.com/kivy/python-for-android/files/2091833/p4a_errors.txt) Initally I thought that this was a buildozer issue, as I attempted it that way first. So, I opened an issue on their github page and multiple users pointed out that they too were experiencing this issue. I've tried with both python3 and python2, the out come is the same. There is absolutely no unicode in any of my source files, I've also attempted the build with pygame instead of sdl2 (for python 2). There are also multiple simillar SO threads open about this. Does anyone have any sugesstions or ideas as to why this is happening and how to go about fixing it? It's also worth noting that if I use the kivy buildozer vm, I can use buildozer to carry out a successful build. Just not on any other machine using either buildozer or p4a, using the same source and build commands. The buildozer issue is here: https://github.com/kivy/buildozer/issues/673 The output from the dump file is: ` Reading symbols from /home/suroh/.local/share/python-for-android/build/other_builds/hostpython2/desktop/hostpython2/python...done. [New LWP 28854] [Thread debugging using libthread_db enabled] Using host libthread_db library "/usr/lib/libthread_db.so.1". Core was generated by ./python -E ./setup.py -q build. Program terminated with signal SIGSEGV, Segmentation fault. #0 0x000055731803eb2a in PyInstance_NewRaw (klass=klass@entry=0x7f7cbf1d1c18, dict=0x557319325210, dict@entry=0x0) at Objects/classobject.c:534 534 inst->in_dict = dict; File "/home/suroh/.local/share/python-for-android/build/other_builds/hostpython2/desktop/hostpython2/python-gdb.py", line 55 Py_TPFLAGS_HEAPTYPE = (1L << 9) ^ SyntaxError: invalid syntax `
[ { "content": "\nfrom pythonforandroid.toolchain import Recipe, shprint, current_directory, info, warning\nfrom os.path import join, exists\nimport os\nimport sh\n\n\nclass Hostpython2Recipe(Recipe):\n version = '2.7.2'\n url = 'https://python.org/ftp/python/{version}/Python-{version}.tar.bz2'\n name = 'hostpython2'\n\n conflicts = ['hostpython3']\n\n def get_build_container_dir(self, arch=None):\n choices = self.check_recipe_choices()\n dir_name = '-'.join([self.name] + choices)\n return join(self.ctx.build_dir, 'other_builds', dir_name, 'desktop')\n\n def get_build_dir(self, arch=None):\n return join(self.get_build_container_dir(), self.name)\n\n def prebuild_arch(self, arch):\n # Override hostpython Setup?\n shprint(sh.cp, join(self.get_recipe_dir(), 'Setup'),\n join(self.get_build_dir(), 'Modules', 'Setup'))\n\n def build_arch(self, arch):\n with current_directory(self.get_build_dir()):\n\n if exists('hostpython'):\n info('hostpython already exists, skipping build')\n self.ctx.hostpython = join(self.get_build_dir(),\n 'hostpython')\n self.ctx.hostpgen = join(self.get_build_dir(),\n 'hostpgen')\n return\n\n if 'LIBS' in os.environ:\n os.environ.pop('LIBS')\n configure = sh.Command('./configure')\n\n shprint(configure)\n shprint(sh.make, '-j5')\n\n shprint(sh.mv, join('Parser', 'pgen'), 'hostpgen')\n\n if exists('python.exe'):\n shprint(sh.mv, 'python.exe', 'hostpython')\n elif exists('python'):\n shprint(sh.mv, 'python', 'hostpython')\n else:\n warning('Unable to find the python executable after '\n 'hostpython build! Exiting.')\n exit(1)\n\n self.ctx.hostpython = join(self.get_build_dir(), 'hostpython')\n self.ctx.hostpgen = join(self.get_build_dir(), 'hostpgen')\n\n\nrecipe = Hostpython2Recipe()\n", "path": "pythonforandroid/recipes/hostpython2/__init__.py" } ]
[ { "content": "\nfrom pythonforandroid.toolchain import Recipe, shprint, current_directory, info, warning\nfrom os.path import join, exists\nfrom os import chdir\nimport os\nimport sh\n\n\nclass Hostpython2Recipe(Recipe):\n version = '2.7.2'\n url = 'https://python.org/ftp/python/{version}/Python-{version}.tar.bz2'\n name = 'hostpython2'\n patches = ['fix-segfault-pygchead.patch']\n\n conflicts = ['hostpython3']\n\n def get_build_container_dir(self, arch=None):\n choices = self.check_recipe_choices()\n dir_name = '-'.join([self.name] + choices)\n return join(self.ctx.build_dir, 'other_builds', dir_name, 'desktop')\n\n def get_build_dir(self, arch=None):\n return join(self.get_build_container_dir(), self.name)\n\n def prebuild_arch(self, arch):\n # Override hostpython Setup?\n shprint(sh.cp, join(self.get_recipe_dir(), 'Setup'),\n join(self.get_build_dir(), 'Modules', 'Setup'))\n\n def build_arch(self, arch):\n with current_directory(self.get_build_dir()):\n\n if exists('hostpython'):\n info('hostpython already exists, skipping build')\n self.ctx.hostpython = join(self.get_build_dir(),\n 'hostpython')\n self.ctx.hostpgen = join(self.get_build_dir(),\n 'hostpgen')\n return\n \n if 'LIBS' in os.environ:\n os.environ.pop('LIBS')\n configure = sh.Command('./configure')\n\n shprint(configure)\n shprint(sh.make, '-j5')\n\n shprint(sh.mv, join('Parser', 'pgen'), 'hostpgen')\n\n if exists('python.exe'):\n shprint(sh.mv, 'python.exe', 'hostpython')\n elif exists('python'):\n shprint(sh.mv, 'python', 'hostpython')\n else:\n warning('Unable to find the python executable after '\n 'hostpython build! Exiting.')\n exit(1)\n\n self.ctx.hostpython = join(self.get_build_dir(), 'hostpython')\n self.ctx.hostpgen = join(self.get_build_dir(), 'hostpgen')\n\n\nrecipe = Hostpython2Recipe()\n", "path": "pythonforandroid/recipes/hostpython2/__init__.py" } ]
diff --git a/pythonforandroid/recipes/hostpython2/__init__.py b/pythonforandroid/recipes/hostpython2/__init__.py index 5a5b362f59..dc1ccb4cbc 100644 --- a/pythonforandroid/recipes/hostpython2/__init__.py +++ b/pythonforandroid/recipes/hostpython2/__init__.py @@ -10,6 +10,7 @@ class Hostpython2Recipe(Recipe): version = '2.7.2' url = 'https://python.org/ftp/python/{version}/Python-{version}.tar.bz2' name = 'hostpython2' + patches = ['fix-segfault-pygchead.patch'] conflicts = ['hostpython3'] diff --git a/pythonforandroid/recipes/hostpython2/fix-segfault-pygchead.patch b/pythonforandroid/recipes/hostpython2/fix-segfault-pygchead.patch new file mode 100644 index 0000000000..25d4599cb0 --- /dev/null +++ b/pythonforandroid/recipes/hostpython2/fix-segfault-pygchead.patch @@ -0,0 +1,12 @@ +diff -Naur Python-2.7.2.orig/Include/objimpl.h Python-2.7.2/Include/objimpl.h +--- Python-2.7.2.orig/Include/objimpl.h 2011-06-11 17:46:23.000000000 +0200 ++++ Python-2.7.2/Include/objimpl.h 2018-09-04 17:33:09.254654565 +0200 +@@ -255,7 +255,7 @@ + union _gc_head *gc_prev; + Py_ssize_t gc_refs; + } gc; +- long double dummy; /* force worst-case alignment */ ++ double dummy; /* force worst-case alignment */ + } PyGC_Head; + + extern PyGC_Head *_PyGC_generation0;
DistrictDataLabs__yellowbrick-1162
pytest-runner is deprecated pytest-runner is deprecated: https://github.com/pytest-dev/pytest-runner/#deprecation-notice If I find time, then I can make a PR, but I thought I'd let you know in the meantime.
[ { "content": "#!/usr/bin/env python\n# setup\n# Setup script for installing yellowbrick\n#\n# Author: Benjamin Bengfort\n# Created: Wed May 18 14:33:26 2016 -0400\n#\n# Copyright (C) 2016 The scikit-yb developers\n# For license information, see LICENSE.txt and NOTICE.md\n#\n# ID: setup.py [c4f3ba7] [email protected] $\n\n\"\"\"\nSetup script for installing yellowbrick.\nSee http://bbengfort.github.io/programmer/2016/01/20/packaging-with-pypi.html\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport os\nimport codecs\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\n##########################################################################\n## Package Information\n##########################################################################\n\n## Basic information\n## Basic information\nNAME = \"yellowbrick\"\nDESCRIPTION = \"A suite of visual analysis and diagnostic tools for machine learning.\"\nAUTHOR = \"The scikit-yb developers\"\nEMAIL = \"[email protected]\"\nMAINTAINER = \"The scikit-yb developers\"\nLICENSE = \"Apache 2\"\nREPOSITORY = \"https://github.com/DistrictDataLabs/yellowbrick\"\nPACKAGE = \"yellowbrick\"\nURL = \"http://scikit-yb.org/\"\n\n## Define the keywords\nKEYWORDS = (\n \"visualization\",\n \"machine learning\",\n \"scikit-learn\",\n \"matplotlib\",\n \"data science\",\n)\n\n## Define the classifiers\n## See https://pypi.python.org/pypi?%3Aaction=list_classifiers\nCLASSIFIERS = (\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n)\n\n## Important Paths\nPROJECT = os.path.abspath(os.path.dirname(__file__))\nREQUIRE_PATH = \"requirements.txt\"\nVERSION_PATH = os.path.join(PACKAGE, \"version.py\")\nPKG_DESCRIBE = \"DESCRIPTION.md\"\n\n## Directories to ignore in find_packages\nEXCLUDES = (\n \"tests\", \"tests.*\",\n \"bin\",\n \"docs\", \"docs.*\",\n \"fixtures\",\n \"register\",\n \"notebooks\", \"notebooks.*\",\n \"examples\", \"examples.*\",\n \"binder\", \"binder.*\",\n \"paper\",\n)\n\n##########################################################################\n## Helper Functions\n##########################################################################\n\n\ndef read(*parts):\n \"\"\"\n Assume UTF-8 encoding and return the contents of the file located at the\n absolute path from the REPOSITORY joined with *parts.\n \"\"\"\n with codecs.open(os.path.join(PROJECT, *parts), \"rb\", \"utf-8\") as f:\n return f.read()\n\n\ndef get_version(path=VERSION_PATH):\n \"\"\"\n Reads the python file defined in the VERSION_PATH to find the get_version\n function, and executes it to ensure that it is loaded correctly. Separating\n the version in this way ensures no additional code is executed.\n \"\"\"\n namespace = {}\n exec(read(path), namespace)\n return namespace[\"get_version\"](short=True)\n\n\ndef get_requires(path=REQUIRE_PATH):\n \"\"\"\n Yields a generator of requirements as defined by the REQUIRE_PATH which\n should point to a requirements.txt output by `pip freeze`.\n \"\"\"\n for line in read(path).splitlines():\n line = line.strip()\n if line and not line.startswith(\"#\"):\n yield line\n\n\ndef get_description_type(path=PKG_DESCRIBE):\n \"\"\"\n Returns the long_description_content_type based on the extension of the\n package describe path (e.g. .txt, .rst, or .md).\n \"\"\"\n _, ext = os.path.splitext(path)\n return {\".rst\": \"text/x-rst\", \".txt\": \"text/plain\", \".md\": \"text/markdown\"}[ext]\n\n\n##########################################################################\n## Define the configuration\n##########################################################################\n\nconfig = {\n \"name\": NAME,\n \"version\": get_version(),\n \"description\": DESCRIPTION,\n \"long_description\": read(PKG_DESCRIBE),\n \"long_description_content_type\": get_description_type(PKG_DESCRIBE),\n \"classifiers\": CLASSIFIERS,\n \"keywords\": KEYWORDS,\n \"license\": LICENSE,\n \"author\": AUTHOR,\n \"author_email\": EMAIL,\n \"url\": URL,\n \"maintainer\": MAINTAINER,\n \"maintainer_email\": EMAIL,\n \"project_urls\": {\n \"Documentation\": URL,\n \"Download\": \"{}/tarball/v{}\".format(REPOSITORY, get_version()),\n \"Source\": REPOSITORY,\n \"Tracker\": \"{}/issues\".format(REPOSITORY),\n },\n \"download_url\": \"{}/tarball/v{}\".format(REPOSITORY, get_version()),\n \"packages\": find_packages(where=PROJECT, exclude=EXCLUDES),\n \"package_data\": {\"yellowbrick\": [\"datasets/manifest.json\"]},\n \"zip_safe\": False,\n \"entry_points\": {\"console_scripts\": []},\n \"install_requires\": list(get_requires()),\n \"python_requires\": \">=3.4, <4\",\n \"setup_requires\": [\"pytest-runner\"],\n \"tests_require\": [\"pytest\"],\n}\n\n\n##########################################################################\n## Run setup script\n##########################################################################\n\nif __name__ == \"__main__\":\n setup(**config)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n# setup\n# Setup script for installing yellowbrick\n#\n# Author: Benjamin Bengfort\n# Created: Wed May 18 14:33:26 2016 -0400\n#\n# Copyright (C) 2016 The scikit-yb developers\n# For license information, see LICENSE.txt and NOTICE.md\n#\n# ID: setup.py [c4f3ba7] [email protected] $\n\n\"\"\"\nSetup script for installing yellowbrick.\nSee http://bbengfort.github.io/programmer/2016/01/20/packaging-with-pypi.html\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport os\nimport codecs\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\n##########################################################################\n## Package Information\n##########################################################################\n\n## Basic information\n## Basic information\nNAME = \"yellowbrick\"\nDESCRIPTION = \"A suite of visual analysis and diagnostic tools for machine learning.\"\nAUTHOR = \"The scikit-yb developers\"\nEMAIL = \"[email protected]\"\nMAINTAINER = \"The scikit-yb developers\"\nLICENSE = \"Apache 2\"\nREPOSITORY = \"https://github.com/DistrictDataLabs/yellowbrick\"\nPACKAGE = \"yellowbrick\"\nURL = \"http://scikit-yb.org/\"\n\n## Define the keywords\nKEYWORDS = (\n \"visualization\",\n \"machine learning\",\n \"scikit-learn\",\n \"matplotlib\",\n \"data science\",\n)\n\n## Define the classifiers\n## See https://pypi.python.org/pypi?%3Aaction=list_classifiers\nCLASSIFIERS = (\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n)\n\n## Important Paths\nPROJECT = os.path.abspath(os.path.dirname(__file__))\nREQUIRE_PATH = \"requirements.txt\"\nVERSION_PATH = os.path.join(PACKAGE, \"version.py\")\nPKG_DESCRIBE = \"DESCRIPTION.md\"\n\n## Directories to ignore in find_packages\nEXCLUDES = (\n \"tests\", \"tests.*\",\n \"bin\",\n \"docs\", \"docs.*\",\n \"fixtures\",\n \"register\",\n \"notebooks\", \"notebooks.*\",\n \"examples\", \"examples.*\",\n \"binder\", \"binder.*\",\n \"paper\",\n)\n\n##########################################################################\n## Helper Functions\n##########################################################################\n\n\ndef read(*parts):\n \"\"\"\n Assume UTF-8 encoding and return the contents of the file located at the\n absolute path from the REPOSITORY joined with *parts.\n \"\"\"\n with codecs.open(os.path.join(PROJECT, *parts), \"rb\", \"utf-8\") as f:\n return f.read()\n\n\ndef get_version(path=VERSION_PATH):\n \"\"\"\n Reads the python file defined in the VERSION_PATH to find the get_version\n function, and executes it to ensure that it is loaded correctly. Separating\n the version in this way ensures no additional code is executed.\n \"\"\"\n namespace = {}\n exec(read(path), namespace)\n return namespace[\"get_version\"](short=True)\n\n\ndef get_requires(path=REQUIRE_PATH):\n \"\"\"\n Yields a generator of requirements as defined by the REQUIRE_PATH which\n should point to a requirements.txt output by `pip freeze`.\n \"\"\"\n for line in read(path).splitlines():\n line = line.strip()\n if line and not line.startswith(\"#\"):\n yield line\n\n\ndef get_description_type(path=PKG_DESCRIBE):\n \"\"\"\n Returns the long_description_content_type based on the extension of the\n package describe path (e.g. .txt, .rst, or .md).\n \"\"\"\n _, ext = os.path.splitext(path)\n return {\".rst\": \"text/x-rst\", \".txt\": \"text/plain\", \".md\": \"text/markdown\"}[ext]\n\n\n##########################################################################\n## Define the configuration\n##########################################################################\n\nconfig = {\n \"name\": NAME,\n \"version\": get_version(),\n \"description\": DESCRIPTION,\n \"long_description\": read(PKG_DESCRIBE),\n \"long_description_content_type\": get_description_type(PKG_DESCRIBE),\n \"classifiers\": CLASSIFIERS,\n \"keywords\": KEYWORDS,\n \"license\": LICENSE,\n \"author\": AUTHOR,\n \"author_email\": EMAIL,\n \"url\": URL,\n \"maintainer\": MAINTAINER,\n \"maintainer_email\": EMAIL,\n \"project_urls\": {\n \"Documentation\": URL,\n \"Download\": \"{}/tarball/v{}\".format(REPOSITORY, get_version()),\n \"Source\": REPOSITORY,\n \"Tracker\": \"{}/issues\".format(REPOSITORY),\n },\n \"download_url\": \"{}/tarball/v{}\".format(REPOSITORY, get_version()),\n \"packages\": find_packages(where=PROJECT, exclude=EXCLUDES),\n \"package_data\": {\"yellowbrick\": [\"datasets/manifest.json\"]},\n \"zip_safe\": False,\n \"entry_points\": {\"console_scripts\": []},\n \"install_requires\": list(get_requires()),\n \"python_requires\": \">=3.4, <4\"\n}\n\n\n##########################################################################\n## Run setup script\n##########################################################################\n\nif __name__ == \"__main__\":\n setup(**config)\n", "path": "setup.py" } ]
diff --git a/Makefile b/Makefile index d8ba922f9..4cc479eff 100644 --- a/Makefile +++ b/Makefile @@ -26,7 +26,7 @@ clean: # Targets for testing test: - python setup.py test + pytest # Publish to gh-pages publish: @@ -48,3 +48,4 @@ install: deploy: python setup.py register twine upload dist/* + diff --git a/setup.py b/setup.py index d47d5f66e..6ed128329 100755 --- a/setup.py +++ b/setup.py @@ -163,9 +163,7 @@ def get_description_type(path=PKG_DESCRIBE): "zip_safe": False, "entry_points": {"console_scripts": []}, "install_requires": list(get_requires()), - "python_requires": ">=3.4, <4", - "setup_requires": ["pytest-runner"], - "tests_require": ["pytest"], + "python_requires": ">=3.4, <4" }
agconti__cookiecutter-django-rest-250
Update Managers to Admins in `common.py`
[ { "content": "import os\nfrom os.path import join\n\nfrom configurations import Configuration, values\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\nclass Common(Configuration):\n\n INSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n\n # Third party apps\n 'rest_framework', # utilities for rest apis\n 'rest_framework.authtoken', # token authentication\n 'django_rq', # asynchronous queuing\n 'versatileimagefield', # image manipulation\n\n # Your apps\n 'authentication',\n 'users'\n\n )\n\n # https://docs.djangoproject.com/en/1.8/topics/http/middleware/\n MIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware'\n )\n\n ROOT_URLCONF = 'urls'\n\n SECRET_KEY = 'Not a secret'\n WSGI_APPLICATION = 'wsgi.application'\n\n # Email\n EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend')\n\n MANAGERS = (\n ('Author', '{{cookiecutter.email}}'),\n )\n\n # Postgres\n DATABASES = values.DatabaseURLValue('postgres://localhost/{{cookiecutter.app_name}}')\n\n # General\n APPEND_SLASH = values.BooleanValue(False)\n TIME_ZONE = 'UTC'\n LANGUAGE_CODE = 'en-us'\n # If you set this to False, Django will make some optimizations so as not\n # to load the internationalization machinery.\n USE_I18N = False\n USE_L10N = True\n USE_TZ = True\n LOGIN_REDIRECT_URL = '/'\n\n # Static Files\n STATIC_ROOT = join(os.path.dirname(BASE_DIR), 'staticfiles')\n STATICFILES_DIRS = [join(os.path.dirname(BASE_DIR), 'static'), ]\n STATIC_URL = '/static/'\n STATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n )\n\n # Media files\n MEDIA_ROOT = join(os.path.dirname(BASE_DIR), 'media')\n MEDIA_URL = '/media/'\n\n TEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [STATICFILES_DIRS],\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages'\n ],\n 'loaders':[\n ('django.template.loaders.cached.Loader', [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n ]),\n ],\n },\n },\n ]\n\n # Set DEBUG to False as a default for safety\n # https://docs.djangoproject.com/en/dev/ref/settings/#debug\n DEBUG = values.BooleanValue(False)\n for config in TEMPLATES:\n config['OPTIONS']['debug'] = DEBUG\n\n # Logging\n LOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n 'rq_console': {\n 'format': '%(asctime)s %(message)s',\n 'datefmt': '%H:%M:%S',\n },\n },\n 'filters': {\n 'require_debug_true': {\n '()': 'django.utils.log.RequireDebugTrue',\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'filters': ['require_debug_true'],\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple'\n },\n 'rq_console': {\n 'level': 'DEBUG',\n 'class': 'rq.utils.ColorizingStreamHandler',\n 'formatter': 'rq_console',\n 'exclude': ['%(asctime)s'],\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'propagate': True,\n },\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': False,\n },\n 'rq.worker': {\n 'handlers': ['rq_console'],\n 'level': 'DEBUG'\n }\n }\n }\n\n # Custom user app\n AUTH_USER_MODEL = 'users.User'\n\n # Django Rest Framework\n REST_FRAMEWORK = {\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',\n 'PAGE_SIZE': int(os.getenv('DJANGO_PAGINATION_LIMIT', 10)),\n 'DATETIME_FORMAT': '%Y-%m-%dT%H:%M:%S%z',\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n ),\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.IsAuthenticated',\n ],\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.SessionAuthentication',\n 'rest_framework.authentication.TokenAuthentication',\n )\n }\n\n # Versatile Image Field\n VERSATILEIMAGEFIELD_SETTINGS = {\n # The amount of time, in seconds, that references to created images\n # should be stored in the cache. Defaults to `2592000` (30 days)\n 'cache_length': 2592000,\n 'cache_name': 'versatileimagefield_cache',\n 'jpeg_resize_quality': 70,\n 'sized_directory_name': '__sized__',\n 'filtered_directory_name': '__filtered__',\n 'placeholder_directory_name': '__placeholder__',\n 'create_images_on_demand': False\n }\n\n # django-rq\n # Adds dashboard link for queues in /admin, This will override the default\n # admin template so it may interfere with other apps that modify the\n # default admin template. If you're using such an app, simply remove this.\n RQ_SHOW_ADMIN_LINK = True\n", "path": "{{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/config/common.py" } ]
[ { "content": "import os\nfrom os.path import join\n\nfrom configurations import Configuration, values\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\nclass Common(Configuration):\n\n INSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n\n # Third party apps\n 'rest_framework', # utilities for rest apis\n 'rest_framework.authtoken', # token authentication\n 'django_rq', # asynchronous queuing\n 'versatileimagefield', # image manipulation\n\n # Your apps\n 'authentication',\n 'users'\n\n )\n\n # https://docs.djangoproject.com/en/1.8/topics/http/middleware/\n MIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware'\n )\n\n ROOT_URLCONF = 'urls'\n\n SECRET_KEY = 'Not a secret'\n WSGI_APPLICATION = 'wsgi.application'\n\n # Email\n EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend')\n\n ADMINS = (\n ('Author', '{{cookiecutter.email}}'),\n )\n\n # Postgres\n DATABASES = values.DatabaseURLValue('postgres://localhost/{{cookiecutter.app_name}}')\n\n # General\n APPEND_SLASH = values.BooleanValue(False)\n TIME_ZONE = 'UTC'\n LANGUAGE_CODE = 'en-us'\n # If you set this to False, Django will make some optimizations so as not\n # to load the internationalization machinery.\n USE_I18N = False\n USE_L10N = True\n USE_TZ = True\n LOGIN_REDIRECT_URL = '/'\n\n # Static Files\n STATIC_ROOT = join(os.path.dirname(BASE_DIR), 'staticfiles')\n STATICFILES_DIRS = [join(os.path.dirname(BASE_DIR), 'static'), ]\n STATIC_URL = '/static/'\n STATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n )\n\n # Media files\n MEDIA_ROOT = join(os.path.dirname(BASE_DIR), 'media')\n MEDIA_URL = '/media/'\n\n TEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [STATICFILES_DIRS],\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages'\n ],\n 'loaders':[\n ('django.template.loaders.cached.Loader', [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n ]),\n ],\n },\n },\n ]\n\n # Set DEBUG to False as a default for safety\n # https://docs.djangoproject.com/en/dev/ref/settings/#debug\n DEBUG = values.BooleanValue(False)\n for config in TEMPLATES:\n config['OPTIONS']['debug'] = DEBUG\n\n # Logging\n LOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n 'rq_console': {\n 'format': '%(asctime)s %(message)s',\n 'datefmt': '%H:%M:%S',\n },\n },\n 'filters': {\n 'require_debug_true': {\n '()': 'django.utils.log.RequireDebugTrue',\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'filters': ['require_debug_true'],\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple'\n },\n 'rq_console': {\n 'level': 'DEBUG',\n 'class': 'rq.utils.ColorizingStreamHandler',\n 'formatter': 'rq_console',\n 'exclude': ['%(asctime)s'],\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'propagate': True,\n },\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': False,\n },\n 'rq.worker': {\n 'handlers': ['rq_console'],\n 'level': 'DEBUG'\n }\n }\n }\n\n # Custom user app\n AUTH_USER_MODEL = 'users.User'\n\n # Django Rest Framework\n REST_FRAMEWORK = {\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',\n 'PAGE_SIZE': int(os.getenv('DJANGO_PAGINATION_LIMIT', 10)),\n 'DATETIME_FORMAT': '%Y-%m-%dT%H:%M:%S%z',\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n ),\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.IsAuthenticated',\n ],\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.SessionAuthentication',\n 'rest_framework.authentication.TokenAuthentication',\n )\n }\n\n # Versatile Image Field\n VERSATILEIMAGEFIELD_SETTINGS = {\n # The amount of time, in seconds, that references to created images\n # should be stored in the cache. Defaults to `2592000` (30 days)\n 'cache_length': 2592000,\n 'cache_name': 'versatileimagefield_cache',\n 'jpeg_resize_quality': 70,\n 'sized_directory_name': '__sized__',\n 'filtered_directory_name': '__filtered__',\n 'placeholder_directory_name': '__placeholder__',\n 'create_images_on_demand': False\n }\n\n # django-rq\n # Adds dashboard link for queues in /admin, This will override the default\n # admin template so it may interfere with other apps that modify the\n # default admin template. If you're using such an app, simply remove this.\n RQ_SHOW_ADMIN_LINK = True\n", "path": "{{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/config/common.py" } ]
diff --git a/{{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/config/common.py b/{{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/config/common.py index 9df3857eb..3b7996df6 100755 --- a/{{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/config/common.py +++ b/{{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/config/common.py @@ -48,7 +48,7 @@ class Common(Configuration): # Email EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend') - MANAGERS = ( + ADMINS = ( ('Author', '{{cookiecutter.email}}'), )
RedHatInsights__insights-core-2890
first_file in insights_archive isn't bound to the right context The first_file helper in [insights_archive.py](https://github.com/RedHatInsights/insights-core/blob/master/insights/specs/insights_archive.py#L7) isn't bound to the `HostArchiveContext`, so it will try to fire for any context that has a filesystem root.
[ { "content": "from insights.core.spec_factory import glob_file, simple_file, head, first_file\nfrom functools import partial\nfrom insights.core.context import HostArchiveContext\nfrom insights.specs import Specs\n\nsimple_file = partial(simple_file, context=HostArchiveContext)\nglob_file = partial(glob_file, context=HostArchiveContext)\n\n\nclass InsightsArchiveSpecs(Specs):\n\n abrt_status_bare = simple_file(\"insights_commands/abrt_status_--bare_True\")\n all_installed_rpms = glob_file(\"insights_commands/rpm_-qa*\")\n alternatives_display_python = simple_file(\"insights_commands/alternatives_--display_python\")\n auditctl_status = simple_file(\"insights_commands/auditctl_-s\")\n aws_instance_id_doc = simple_file(\"insights_commands/python_-m_insights.tools.cat_--no-header_aws_instance_id_doc\")\n aws_instance_id_pkcs7 = simple_file(\"insights_commands/python_-m_insights.tools.cat_--no-header_aws_instance_id_pkcs7\")\n awx_manage_check_license = simple_file(\"insights_commands/awx-manage_check_license\")\n azure_instance_type = simple_file(\"insights_commands/python_-m_insights.tools.cat_--no-header_azure_instance_type\")\n bios_uuid = simple_file(\"insights_commands/dmidecode_-s_system-uuid\")\n blkid = simple_file(\"insights_commands/blkid_-c_.dev.null\")\n brctl_show = simple_file(\"insights_commands/brctl_show\")\n ceph_df_detail = first_file([\"insights_commands/ceph_df_detail_-f_json-pretty\", \"insights_commands/ceph_df_detail_-f_json\"])\n ceph_health_detail = first_file([\"insights_commands/ceph_health_detail_-f_json-pretty\", \"insights_commands/ceph_health_detail_-f_json\"])\n ceph_insights = simple_file(\"insights_commands/python_-m_insights.tools.cat_--no-header_ceph_insights\")\n ceph_osd_dump = first_file([\"insights_commands/ceph_osd_dump_-f_json-pretty\", \"insights_commands/ceph_osd_dump_-f_json\"])\n ceph_osd_tree = first_file([\"insights_commands/ceph_osd_tree_-f_json-pretty\", \"insights_commands/ceph_osd_tree_-f_json\"])\n ceph_s = first_file([\"insights_commands/ceph_-s_-f_json-pretty\", \"insights_commands/ceph_-s_-f_json\"])\n ceph_v = simple_file(\"insights_commands/ceph_-v\")\n certificates_enddate = first_file([\"insights_commands/find_.etc.origin.node_.etc.origin.master_.etc.pki_.etc.ipa_-type_f_-exec_.usr.bin.openssl_x509_-noout_-enddate_-in_-exec_echo_FileName\", \"insights_commands/find_.etc.origin.node_.etc.origin.master_.etc.pki_-type_f_-exec_.usr.bin.openssl_x509_-noout_-enddate_-in_-exec_echo_FileName\"])\n chkconfig = simple_file(\"insights_commands/chkconfig_--list\")\n chronyc_sources = simple_file(\"insights_commands/chronyc_sources\")\n corosync_cmapctl = glob_file(\"insights_commands/corosync-cmapctl*\")\n cpupower_frequency_info = simple_file(\"insights_commands/cpupower_-c_all_frequency-info\")\n date = simple_file(\"insights_commands/date\")\n date_utc = simple_file(\"insights_commands/date_--utc\")\n df__al = first_file([\"insights_commands/df_-al_-x_autofs\", \"insights_commands/df_-al\"])\n df__alP = first_file([\"insights_commands/df_-alP_-x_autofs\", \"insights_commands/df_-alP\"])\n df__li = first_file([\"insights_commands/df_-li_-x_autofs\", \"insights_commands/df_-li\"])\n dig_dnssec = simple_file(\"insights_commands/dig_dnssec_._SOA\")\n dig_edns = simple_file(\"insights_commands/dig_edns_0_._SOA\")\n dig_noedns = simple_file(\"insights_commands/dig_noedns_._SOA\")\n display_name = simple_file(\"display_name\")\n dmesg = simple_file(\"insights_commands/dmesg\")\n dmidecode = simple_file(\"insights_commands/dmidecode\")\n dmsetup_info = simple_file(\"insights_commands/dmsetup_info_-C\")\n docker_info = simple_file(\"insights_commands/docker_info\")\n docker_list_containers = simple_file(\"insights_commands/docker_ps_--all_--no-trunc\")\n docker_list_images = simple_file(\"insights_commands/docker_images_--all_--no-trunc_--digests\")\n dotnet_version = simple_file(\"insights_commands/dotnet_--version\")\n doveconf = simple_file(\"insights_commands/doveconf\")\n du_dirs = glob_file(\"insights_commands/du_-s_-k_*\")\n engine_db_query_vdsm_version = simple_file(\"insights_commands/engine-db-query_--statement_SELECT_vs.vds_name_rpm_version_FROM_vds_dynamic_vd_vds_static_vs_WHERE_vd.vds_id_vs.vds_id_--json\")\n ethtool = glob_file(\"insights_commands/ethtool_*\", ignore=\"ethtool_-.*\")\n ethtool_S = glob_file(\"insights_commands/ethtool_-S_*\")\n ethtool_T = glob_file(\"insights_commands/ethtool_-T_*\")\n ethtool_c = glob_file(\"insights_commands/ethtool_-c_*\")\n ethtool_g = glob_file(\"insights_commands/ethtool_-g_*\")\n ethtool_i = glob_file(\"insights_commands/ethtool_-i_*\")\n ethtool_k = glob_file(\"insights_commands/ethtool_-k_*\")\n facter = simple_file(\"insights_commands/facter\")\n fc_match = simple_file(\"insights_commands/fc-match_-sv_sans_regular_roman_family_fontformat\")\n fcoeadm_i = simple_file(\"insights_commands/fcoeadm_-i\")\n findmnt_lo_propagation = simple_file(\"insights_commands/findmnt_-lo_PROPAGATION\")\n firewall_cmd_list_all_zones = simple_file(\"insights_commands/firewall-cmd_--list-all-zones\")\n getconf_page_size = simple_file(\"insights_commands/getconf_PAGE_SIZE\")\n getenforce = simple_file(\"insights_commands/getenforce\")\n getsebool = simple_file(\"insights_commands/getsebool_-a\")\n grub1_config_perms = simple_file(\"insights_commands/ls_-l_.boot.grub.grub.conf\")\n grub_config_perms = simple_file(\"insights_commands/ls_-l_.boot.grub2.grub.cfg\")\n grubby_default_index = simple_file(\"insights_commands/grubby_--default-index\")\n grubby_default_kernel = simple_file(\"insights_commands/grubby_--default-kernel\")\n gluster_v_info = simple_file(\"insights_commands/gluster_volume_info\")\n hammer_task_list = simple_file(\"insights_commands/hammer_--config_.root..hammer.cli.modules.d.foreman.yml_--output_csv_task_list_--search_state_running_AND_label_Actions_Candlepin_ListenOnCandlepinEvents_OR_label_Actions_Katello_EventQueue_Monitor\")\n installed_rpms = head(all_installed_rpms)\n hostname = simple_file(\"insights_commands/hostname_-f\")\n hostname_default = simple_file(\"insights_commands/hostname\")\n hostname_short = simple_file(\"insights_commands/hostname_-s\")\n httpd_M = glob_file(\"insights_commands/*httpd*_-M\")\n httpd_on_nfs = simple_file(\"insights_commands/python_-m_insights.tools.cat_--no-header_httpd_on_nfs\")\n httpd_V = glob_file(\"insights_commands/*httpd*_-V\")\n initctl_lst = simple_file(\"insights_commands/initctl_--system_list\")\n ip6tables = simple_file(\"insights_commands/ip6tables-save\")\n ip_addr = simple_file(\"insights_commands/ip_addr\")\n ip_addresses = simple_file(\"insights_commands/hostname_-I\")\n ip_route_show_table_all = simple_file(\"insights_commands/ip_route_show_table_all\")\n ip_s_link = first_file([\"insights_commands/ip_-s_-d_link\", \"insights_commands/ip_-s_link\"])\n ipcs_m = simple_file(\"insights_commands/ipcs_-m\")\n ipcs_m_p = simple_file(\"insights_commands/ipcs_-m_-p\")\n ipcs_s = simple_file(\"insights_commands/ipcs_-s\")\n iptables = simple_file(\"insights_commands/iptables-save\")\n ipv4_neigh = simple_file(\"insights_commands/ip_-4_neighbor_show_nud_all\")\n ipv6_neigh = simple_file(\"insights_commands/ip_-6_neighbor_show_nud_all\")\n iscsiadm_m_session = simple_file(\"insights_commands/iscsiadm_-m_session\")\n keystone_crontab = simple_file(\"insights_commands/crontab_-l_-u_keystone\")\n kpatch_list = simple_file(\"insights_commands/kpatch_list\")\n localtime = simple_file(\"insights_commands/file_-L_.etc.localtime\")\n lpstat_p = simple_file(\"insights_commands/lpstat_-p\")\n ls_boot = simple_file(\"insights_commands/ls_-lanR_.boot\")\n ls_dev = simple_file(\"insights_commands/ls_-lanR_.dev\")\n ls_disk = simple_file(\"insights_commands/ls_-lanR_.dev.disk\")\n ls_edac_mc = simple_file(\"insights_commands/ls_-lan_.sys.devices.system.edac.mc\")\n ls_etc = simple_file(\"insights_commands/ls_-lan_.etc_.etc.cloud.cloud.cfg.d_.etc.nova.migration_.etc.pki.ovirt-vmconsole_.etc.pki.tls.certs_.etc.pki.tls.private_.etc.rc.d.init.d_.etc.sysconfig\")\n ls_ipa_idoverride_memberof = simple_file(\"insights_commands/ls_-lan_.usr.share.ipa.ui.js.plugins.idoverride-memberof\")\n ls_lib_firmware = simple_file(\"insights_commands/ls_-lanR_.lib.firmware\")\n ls_ocp_cni_openshift_sdn = simple_file(\"insights_commands/ls_-l_.var.lib.cni.networks.openshift-sdn\")\n ls_origin_local_volumes_pods = simple_file(\"insights_commands/ls_-l_.var.lib.origin.openshift.local.volumes.pods\")\n ls_osroot = simple_file(\"insights_commands/ls_-lan\")\n ls_run_systemd_generator = simple_file(\"insights_commands/ls_-lan_.run.systemd.generator\")\n ls_R_var_lib_nova_instances = simple_file(\"insights_commands/ls_-laR_.var.lib.nova.instances\")\n ls_sys_firmware = simple_file(\"insights_commands/ls_-lanR_.sys.firmware\")\n ls_tmp = simple_file(\"insights_commands/ls_-la_.tmp\")\n ls_usr_bin = simple_file(\"insights_commands/ls_-lan_.usr.bin\")\n ls_usr_lib64 = simple_file(\"insights_commands/ls_-lan_.usr.lib64\")\n ls_var_lib_mongodb = simple_file(\"insights_commands/ls_-la_.var.lib.mongodb\")\n ls_var_lib_nova_instances = simple_file(\"insights_commands/ls_-laRZ_.var.lib.nova.instances\")\n ls_var_log = simple_file(\"insights_commands/ls_-la_.var.log_.var.log.audit\")\n ls_var_opt_mssql = simple_file(\"insights_commands/ls_-ld_.var.opt.mssql\")\n ls_var_opt_mssql_log = simple_file(\"insights_commands/ls_-la_.var.opt.mssql.log\")\n ls_var_spool_clientmq = simple_file(\"insights_commands/ls_-ln_.var.spool.clientmqueue\")\n ls_var_spool_postfix_maildrop = simple_file(\"insights_commands/ls_-ln_.var.spool.postfix.maildrop\")\n ls_var_tmp = simple_file(\"insights_commands/ls_-ln_.var.tmp\")\n ls_var_run = simple_file(\"insights_commands/ls_-lnL_.var.run\")\n ls_var_www = simple_file(\"insights_commands/ls_-la_.dev.null_.var.www\")\n lsblk = simple_file(\"insights_commands/lsblk\")\n lsblk_pairs = simple_file(\"insights_commands/lsblk_-P_-o_NAME_KNAME_MAJ_MIN_FSTYPE_MOUNTPOINT_LABEL_UUID_RA_RO_RM_MODEL_SIZE_STATE_OWNER_GROUP_MODE_ALIGNMENT_MIN-IO_OPT-IO_PHY-SEC_LOG-SEC_ROTA_SCHED_RQ-SIZE_TYPE_DISC-ALN_DISC-GRAN_DISC-MAX_DISC-ZERO\")\n lscpu = simple_file(\"insights_commands/lscpu\")\n lsmod = simple_file(\"insights_commands/lsmod\")\n lsof = simple_file(\"insights_commands/lsof\")\n lspci = simple_file(\"insights_commands/lspci_-k\")\n lssap = simple_file(\"insights_commands/usr.sap.hostctrl.exe.lssap\")\n lsscsi = simple_file(\"insights_commands/lsscsi\")\n lsvmbus = simple_file(\"insights_commands/lsvmbus_-vv\")\n lvmconfig = first_file([\n \"insights_commands/lvmconfig_--type_full\",\n \"insights_commands/lvm_dumpconfig_--type_full\"\n ])\n lvs_noheadings = first_file(\n [\n \"insights_commands/lvs_--nameprefixes_--noheadings_--separator_-a_-o_lv_name_lv_size_lv_attr_mirror_log_vg_name_devices_region_size_data_percent_metadata_percent_segtype_seg_monitor_lv_kernel_major_lv_kernel_minor_--config_global_locking_type_0\",\n \"insights_commands/lvs_--nameprefixes_--noheadings_--separator_-a_-o_lv_name_lv_size_lv_attr_mirror_log_vg_name_devices_region_size_data_percent_metadata_percent_segtype_seg_monitor_--config_global_locking_type_0\"\n ]\n )\n max_uid = simple_file(\"insights_commands/awk_-F_if_3_max_max_3_END_print_max_.etc.passwd\")\n md5chk_files = glob_file(\"insights_commands/md5sum_*\")\n mount = simple_file(\"insights_commands/mount\")\n modinfo = glob_file(\"insights_commands/modinfo_*\")\n modinfo_i40e = simple_file(\"insights_commands/modinfo_i40e\")\n modinfo_igb = simple_file(\"insights_commands/modinfo_igb\")\n modinfo_ixgbe = simple_file(\"insights_commands/modinfo_ixgbe\")\n modinfo_veth = simple_file(\"insights_commands/modinfo_veth\")\n modinfo_vmxnet3 = simple_file(\"insights_commands/modinfo_vmxnet3\")\n mokutil_sbstate = simple_file(\"insights_commands/mokutil_--sb-state\")\n multicast_querier = simple_file(\"insights_commands/find_.sys.devices.virtual.net._-name_multicast_querier_-print_-exec_cat\")\n multipath_conf_initramfs = simple_file(\"insights_commands/lsinitrd_-f_.etc.multipath.conf\")\n multipath__v4__ll = simple_file(\"insights_commands/multipath_-v4_-ll\")\n mysqladmin_vars = simple_file(\"insights_commands/mysqladmin_variables\")\n named_checkconf_p = simple_file(\"insights_commands/named-checkconf_-p\")\n ndctl_list_Ni = simple_file(\"insights_commands/ndctl_list_-Ni\")\n netstat = simple_file(\"insights_commands/netstat_-neopa\")\n netstat_agn = simple_file(\"insights_commands/netstat_-agn\")\n netstat_i = simple_file(\"insights_commands/netstat_-i\")\n netstat_s = simple_file(\"insights_commands/netstat_-s\")\n nmcli_conn_show = simple_file(\"insights_commands/nmcli_conn_show\")\n nmcli_dev_show = simple_file(\"insights_commands/nmcli_dev_show\")\n nova_crontab = simple_file(\"insights_commands/crontab_-l_-u_nova\")\n nova_uid = simple_file(\"insights_commands/id_-u_nova\")\n ntpq_leap = simple_file(\"insights_commands/ntpq_-c_rv_0_leap\")\n ntptime = simple_file(\"insights_commands/ntptime\")\n numeric_user_group_name = simple_file(\"insights_commands/grep_-c_digit_.etc.passwd_.etc.group\")\n oc_get_clusterrole_with_config = simple_file(\"insights_commands/oc_get_clusterrole_--config_.etc.origin.master.admin.kubeconfig\")\n oc_get_clusterrolebinding_with_config = simple_file(\"insights_commands/oc_get_clusterrolebinding_--config_.etc.origin.master.admin.kubeconfig\")\n open_vm_tools_stat_raw_text_session = simple_file(\"insights_commands/vmware-toolbox-cmd_stat_raw_text_session\")\n openvswitch_other_config = simple_file(\"insights_commands/ovs-vsctl_-t_5_get_Open_vSwitch_._other_config\")\n ovs_vsctl_list_bridge = simple_file(\"insights_commands/ovs-vsctl_list_bridge\")\n ovs_vsctl_show = simple_file(\"insights_commands/ovs-vsctl_show\")\n package_provides_command = glob_file(\"insights_commands/echo_*java*\")\n passenger_status = simple_file(\"insights_commands/passenger-status\")\n pci_rport_target_disk_paths = simple_file(\"insights_commands/find_.sys.devices._-maxdepth_10_-mindepth_9_-name_stat_-type_f\")\n pcp_metrics = simple_file(\"insights_commands/curl_-s_http_..127.0.0.1_44322.metrics_--connect-timeout_5\")\n pcs_quorum_status = simple_file(\"insights_commands/pcs_quorum_status\")\n pcs_status = simple_file(\"insights_commands/pcs_status\")\n postconf_builtin = simple_file(\"insights_commands/postconf_-C_builtin\")\n postconf = simple_file(\"insights_commands/postconf\")\n ps_alxwww = simple_file(\"insights_commands/ps_alxwww\")\n ps_aux = simple_file(\"insights_commands/ps_aux\")\n ps_auxcww = simple_file(\"insights_commands/ps_auxcww\")\n ps_auxww = simple_file(\"insights_commands/ps_auxww\")\n ps_ef = simple_file(\"insights_commands/ps_-ef\")\n ps_eo = simple_file(\"insights_commands/ps_-eo_pid_ppid_comm\")\n puppet_ca_cert_expire_date = simple_file(\"insights_commands/openssl_x509_-in_.etc.puppetlabs.puppet.ssl.ca.ca_crt.pem_-enddate_-noout\")\n pvs_noheadings = simple_file(\"insights_commands/pvs_--nameprefixes_--noheadings_--separator_-a_-o_pv_all_vg_name_--config_global_locking_type_0\")\n qpid_stat_g = simple_file(\"insights_commands/qpid-stat_-g_--ssl-certificate_.etc.pki.katello.qpid_client_striped.crt_-b_amqps_..localhost_5671\")\n rabbitmq_report = simple_file(\"insights_commands/rabbitmqctl_report\")\n rabbitmq_users = simple_file(\"insights_commands/rabbitmqctl_list_users\")\n readlink_e_etc_mtab = simple_file(\"insights_commands/readlink_-e_.etc.mtab\")\n readlink_e_shift_cert_client = simple_file(\"insights_commands/readlink_-e_.etc.origin.node.certificates.kubelet-client-current.pem\")\n readlink_e_shift_cert_server = simple_file(\"insights_commands/readlink_-e_.etc.origin.node.certificates.kubelet-server-current.pem\")\n rhn_schema_version = simple_file(\"insights_commands/rhn-schema-version\")\n rhev_data_center = simple_file(\"insights_commands/python_-m_insights.tools.cat_--no-header_rhev_data_center\")\n rndc_status = simple_file(\"insights_commands/rndc_status\")\n rpm_V_packages = first_file([\"insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo_chrony\", \"insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo\"])\n sap_hdb_version = simple_file(\"insights_commands/python_-m_insights.tools.cat_--no-header_sap_hdb_version\")\n saphostctl_getcimobject_sapinstance = simple_file(\"insights_commands/usr.sap.hostctrl.exe.saphostctrl_-function_GetCIMObject_-enuminstances_SAPInstance\")\n satellite_content_hosts_count = simple_file(\"insights_commands/sudo_-iu_postgres_psql_-d_foreman_-c_select_count_from_hosts\")\n saphostexec_status = simple_file(\"insights_commands/usr.sap.hostctrl.exe.saphostexec_-status\")\n saphostexec_version = simple_file(\"insights_commands/usr.sap.hostctrl.exe.saphostexec_-version\")\n satellite_mongodb_storage_engine = simple_file(\"insights_commands/mongo_pulp_database_--eval_db.serverStatus_.storageEngine\")\n sealert = simple_file('insights_commands/sealert_-l')\n sestatus = simple_file(\"insights_commands/sestatus_-b\")\n smbstatus_p = simple_file(\"insights_commands/smbstatus_-p\")\n software_collections_list = simple_file('insights_commands/scl_--list')\n spamassassin_channels = simple_file('insights_commands/grep_-r_s_CHANNELURL_.etc.mail.spamassassin.channel.d')\n ss = simple_file(\"insights_commands/ss_-tupna\")\n sshd_config_perms = simple_file(\"insights_commands/ls_-l_.etc.ssh.sshd_config\")\n subscription_manager_id = simple_file(\"insights_commands/subscription-manager_identity\")\n subscription_manager_installed_product_ids = simple_file(\"insights_commands/find_.etc.pki.product-default._.etc.pki.product._-name_pem_-exec_rct_cat-cert_--no-content\")\n sysctl = simple_file(\"insights_commands/sysctl_-a\")\n systemctl_cat_rpcbind_socket = simple_file(\"insights_commands/systemctl_cat_rpcbind.socket\")\n systemctl_cinder_volume = simple_file(\"insights_commands/systemctl_show_openstack-cinder-volume\")\n systemctl_httpd = simple_file(\"insights_commands/systemctl_show_httpd\")\n systemctl_nginx = simple_file(\"insights_commands/systemctl_show_nginx\")\n systemctl_list_unit_files = simple_file(\"insights_commands/systemctl_list-unit-files\")\n systemctl_list_units = simple_file(\"insights_commands/systemctl_list-units\")\n systemctl_mariadb = simple_file(\"insights_commands/systemctl_show_mariadb\")\n systemctl_qpidd = simple_file(\"insights_commands/systemctl_show_qpidd\")\n systemctl_qdrouterd = simple_file(\"insights_commands/systemctl_show_qdrouterd\")\n systemctl_show_all_services = simple_file(\"insights_commands/systemctl_show_.service\")\n systemctl_show_target = simple_file(\"insights_commands/systemctl_show_.target\")\n systemctl_smartpdc = simple_file(\"insights_commands/systemctl_show_smart_proxy_dynflow_core\")\n systemd_analyze_blame = simple_file(\"insights_commands/systemd-analyze_blame\")\n systemd_docker = first_file([\"insights_commands/systemctl_cat_docker.service\", \"/usr/lib/systemd/system/docker.service\"])\n systemd_openshift_node = first_file([\"insights_commands/systemctl_cat_atomic-openshift-node.service\", \"/usr/lib/systemd/system/atomic-openshift-node.service\"])\n systool_b_scsi_v = simple_file(\"insights_commands/systool_-b_scsi_-v\")\n testparm_s = simple_file(\"insights_commands/testparm_-s\")\n testparm_v_s = simple_file(\"insights_commands/testparm_-v_-s\")\n tomcat_vdc_fallback = simple_file(\"insights_commands/find_.usr.share_-maxdepth_1_-name_tomcat_-exec_.bin.grep_-R_-s_VirtualDirContext_--include_.xml\")\n tuned_adm = simple_file(\"insights_commands/tuned-adm_list\")\n uname = simple_file(\"insights_commands/uname_-a\")\n uptime = simple_file(\"insights_commands/uptime\")\n version_info = simple_file(\"version_info\")\n vdo_status = simple_file(\"insights_commands/vdo_status\")\n vgdisplay = simple_file(\"insights_commands/vgdisplay\")\n vgs_noheadings = simple_file(\"insights_commands/vgs_--nameprefixes_--noheadings_--separator_-a_-o_vg_all_--config_global_locking_type_0\")\n virsh_list_all = simple_file(\"insights_commands/virsh_--readonly_list_--all\")\n virt_what = simple_file(\"insights_commands/virt-what\")\n yum_list_available = simple_file(\"insights_commands/yum_-C_--noplugins_list_available\")\n yum_repolist = first_file([\"insights_commands/yum_-C_--noplugins_repolist\", \"insights_commands/yum_-C_repolist\"])\n", "path": "insights/specs/insights_archive.py" } ]
[ { "content": "from insights.core.spec_factory import glob_file, simple_file, head, first_file\nfrom functools import partial\nfrom insights.core.context import HostArchiveContext\nfrom insights.specs import Specs\n\nsimple_file = partial(simple_file, context=HostArchiveContext)\nglob_file = partial(glob_file, context=HostArchiveContext)\nfirst_file = partial(first_file, context=HostArchiveContext)\n\n\nclass InsightsArchiveSpecs(Specs):\n\n abrt_status_bare = simple_file(\"insights_commands/abrt_status_--bare_True\")\n all_installed_rpms = glob_file(\"insights_commands/rpm_-qa*\")\n alternatives_display_python = simple_file(\"insights_commands/alternatives_--display_python\")\n auditctl_status = simple_file(\"insights_commands/auditctl_-s\")\n aws_instance_id_doc = simple_file(\"insights_commands/python_-m_insights.tools.cat_--no-header_aws_instance_id_doc\")\n aws_instance_id_pkcs7 = simple_file(\"insights_commands/python_-m_insights.tools.cat_--no-header_aws_instance_id_pkcs7\")\n awx_manage_check_license = simple_file(\"insights_commands/awx-manage_check_license\")\n azure_instance_type = simple_file(\"insights_commands/python_-m_insights.tools.cat_--no-header_azure_instance_type\")\n bios_uuid = simple_file(\"insights_commands/dmidecode_-s_system-uuid\")\n blkid = simple_file(\"insights_commands/blkid_-c_.dev.null\")\n brctl_show = simple_file(\"insights_commands/brctl_show\")\n ceph_df_detail = first_file([\"insights_commands/ceph_df_detail_-f_json-pretty\", \"insights_commands/ceph_df_detail_-f_json\"])\n ceph_health_detail = first_file([\"insights_commands/ceph_health_detail_-f_json-pretty\", \"insights_commands/ceph_health_detail_-f_json\"])\n ceph_insights = simple_file(\"insights_commands/python_-m_insights.tools.cat_--no-header_ceph_insights\")\n ceph_osd_dump = first_file([\"insights_commands/ceph_osd_dump_-f_json-pretty\", \"insights_commands/ceph_osd_dump_-f_json\"])\n ceph_osd_tree = first_file([\"insights_commands/ceph_osd_tree_-f_json-pretty\", \"insights_commands/ceph_osd_tree_-f_json\"])\n ceph_s = first_file([\"insights_commands/ceph_-s_-f_json-pretty\", \"insights_commands/ceph_-s_-f_json\"])\n ceph_v = simple_file(\"insights_commands/ceph_-v\")\n certificates_enddate = first_file([\"insights_commands/find_.etc.origin.node_.etc.origin.master_.etc.pki_.etc.ipa_-type_f_-exec_.usr.bin.openssl_x509_-noout_-enddate_-in_-exec_echo_FileName\", \"insights_commands/find_.etc.origin.node_.etc.origin.master_.etc.pki_-type_f_-exec_.usr.bin.openssl_x509_-noout_-enddate_-in_-exec_echo_FileName\"])\n chkconfig = simple_file(\"insights_commands/chkconfig_--list\")\n chronyc_sources = simple_file(\"insights_commands/chronyc_sources\")\n corosync_cmapctl = glob_file(\"insights_commands/corosync-cmapctl*\")\n cpupower_frequency_info = simple_file(\"insights_commands/cpupower_-c_all_frequency-info\")\n date = simple_file(\"insights_commands/date\")\n date_utc = simple_file(\"insights_commands/date_--utc\")\n df__al = first_file([\"insights_commands/df_-al_-x_autofs\", \"insights_commands/df_-al\"])\n df__alP = first_file([\"insights_commands/df_-alP_-x_autofs\", \"insights_commands/df_-alP\"])\n df__li = first_file([\"insights_commands/df_-li_-x_autofs\", \"insights_commands/df_-li\"])\n dig_dnssec = simple_file(\"insights_commands/dig_dnssec_._SOA\")\n dig_edns = simple_file(\"insights_commands/dig_edns_0_._SOA\")\n dig_noedns = simple_file(\"insights_commands/dig_noedns_._SOA\")\n display_name = simple_file(\"display_name\")\n dmesg = simple_file(\"insights_commands/dmesg\")\n dmidecode = simple_file(\"insights_commands/dmidecode\")\n dmsetup_info = simple_file(\"insights_commands/dmsetup_info_-C\")\n docker_info = simple_file(\"insights_commands/docker_info\")\n docker_list_containers = simple_file(\"insights_commands/docker_ps_--all_--no-trunc\")\n docker_list_images = simple_file(\"insights_commands/docker_images_--all_--no-trunc_--digests\")\n dotnet_version = simple_file(\"insights_commands/dotnet_--version\")\n doveconf = simple_file(\"insights_commands/doveconf\")\n du_dirs = glob_file(\"insights_commands/du_-s_-k_*\")\n engine_db_query_vdsm_version = simple_file(\"insights_commands/engine-db-query_--statement_SELECT_vs.vds_name_rpm_version_FROM_vds_dynamic_vd_vds_static_vs_WHERE_vd.vds_id_vs.vds_id_--json\")\n ethtool = glob_file(\"insights_commands/ethtool_*\", ignore=\"ethtool_-.*\")\n ethtool_S = glob_file(\"insights_commands/ethtool_-S_*\")\n ethtool_T = glob_file(\"insights_commands/ethtool_-T_*\")\n ethtool_c = glob_file(\"insights_commands/ethtool_-c_*\")\n ethtool_g = glob_file(\"insights_commands/ethtool_-g_*\")\n ethtool_i = glob_file(\"insights_commands/ethtool_-i_*\")\n ethtool_k = glob_file(\"insights_commands/ethtool_-k_*\")\n facter = simple_file(\"insights_commands/facter\")\n fc_match = simple_file(\"insights_commands/fc-match_-sv_sans_regular_roman_family_fontformat\")\n fcoeadm_i = simple_file(\"insights_commands/fcoeadm_-i\")\n findmnt_lo_propagation = simple_file(\"insights_commands/findmnt_-lo_PROPAGATION\")\n firewall_cmd_list_all_zones = simple_file(\"insights_commands/firewall-cmd_--list-all-zones\")\n getconf_page_size = simple_file(\"insights_commands/getconf_PAGE_SIZE\")\n getenforce = simple_file(\"insights_commands/getenforce\")\n getsebool = simple_file(\"insights_commands/getsebool_-a\")\n grub1_config_perms = simple_file(\"insights_commands/ls_-l_.boot.grub.grub.conf\")\n grub_config_perms = simple_file(\"insights_commands/ls_-l_.boot.grub2.grub.cfg\")\n grubby_default_index = simple_file(\"insights_commands/grubby_--default-index\")\n grubby_default_kernel = simple_file(\"insights_commands/grubby_--default-kernel\")\n gluster_v_info = simple_file(\"insights_commands/gluster_volume_info\")\n hammer_task_list = simple_file(\"insights_commands/hammer_--config_.root..hammer.cli.modules.d.foreman.yml_--output_csv_task_list_--search_state_running_AND_label_Actions_Candlepin_ListenOnCandlepinEvents_OR_label_Actions_Katello_EventQueue_Monitor\")\n installed_rpms = head(all_installed_rpms)\n hostname = simple_file(\"insights_commands/hostname_-f\")\n hostname_default = simple_file(\"insights_commands/hostname\")\n hostname_short = simple_file(\"insights_commands/hostname_-s\")\n httpd_M = glob_file(\"insights_commands/*httpd*_-M\")\n httpd_on_nfs = simple_file(\"insights_commands/python_-m_insights.tools.cat_--no-header_httpd_on_nfs\")\n httpd_V = glob_file(\"insights_commands/*httpd*_-V\")\n initctl_lst = simple_file(\"insights_commands/initctl_--system_list\")\n ip6tables = simple_file(\"insights_commands/ip6tables-save\")\n ip_addr = simple_file(\"insights_commands/ip_addr\")\n ip_addresses = simple_file(\"insights_commands/hostname_-I\")\n ip_route_show_table_all = simple_file(\"insights_commands/ip_route_show_table_all\")\n ip_s_link = first_file([\"insights_commands/ip_-s_-d_link\", \"insights_commands/ip_-s_link\"])\n ipcs_m = simple_file(\"insights_commands/ipcs_-m\")\n ipcs_m_p = simple_file(\"insights_commands/ipcs_-m_-p\")\n ipcs_s = simple_file(\"insights_commands/ipcs_-s\")\n iptables = simple_file(\"insights_commands/iptables-save\")\n ipv4_neigh = simple_file(\"insights_commands/ip_-4_neighbor_show_nud_all\")\n ipv6_neigh = simple_file(\"insights_commands/ip_-6_neighbor_show_nud_all\")\n iscsiadm_m_session = simple_file(\"insights_commands/iscsiadm_-m_session\")\n keystone_crontab = simple_file(\"insights_commands/crontab_-l_-u_keystone\")\n kpatch_list = simple_file(\"insights_commands/kpatch_list\")\n localtime = simple_file(\"insights_commands/file_-L_.etc.localtime\")\n lpstat_p = simple_file(\"insights_commands/lpstat_-p\")\n ls_boot = simple_file(\"insights_commands/ls_-lanR_.boot\")\n ls_dev = simple_file(\"insights_commands/ls_-lanR_.dev\")\n ls_disk = simple_file(\"insights_commands/ls_-lanR_.dev.disk\")\n ls_edac_mc = simple_file(\"insights_commands/ls_-lan_.sys.devices.system.edac.mc\")\n ls_etc = simple_file(\"insights_commands/ls_-lan_.etc_.etc.cloud.cloud.cfg.d_.etc.nova.migration_.etc.pki.ovirt-vmconsole_.etc.pki.tls.certs_.etc.pki.tls.private_.etc.rc.d.init.d_.etc.sysconfig\")\n ls_ipa_idoverride_memberof = simple_file(\"insights_commands/ls_-lan_.usr.share.ipa.ui.js.plugins.idoverride-memberof\")\n ls_lib_firmware = simple_file(\"insights_commands/ls_-lanR_.lib.firmware\")\n ls_ocp_cni_openshift_sdn = simple_file(\"insights_commands/ls_-l_.var.lib.cni.networks.openshift-sdn\")\n ls_origin_local_volumes_pods = simple_file(\"insights_commands/ls_-l_.var.lib.origin.openshift.local.volumes.pods\")\n ls_osroot = simple_file(\"insights_commands/ls_-lan\")\n ls_run_systemd_generator = simple_file(\"insights_commands/ls_-lan_.run.systemd.generator\")\n ls_R_var_lib_nova_instances = simple_file(\"insights_commands/ls_-laR_.var.lib.nova.instances\")\n ls_sys_firmware = simple_file(\"insights_commands/ls_-lanR_.sys.firmware\")\n ls_tmp = simple_file(\"insights_commands/ls_-la_.tmp\")\n ls_usr_bin = simple_file(\"insights_commands/ls_-lan_.usr.bin\")\n ls_usr_lib64 = simple_file(\"insights_commands/ls_-lan_.usr.lib64\")\n ls_var_lib_mongodb = simple_file(\"insights_commands/ls_-la_.var.lib.mongodb\")\n ls_var_lib_nova_instances = simple_file(\"insights_commands/ls_-laRZ_.var.lib.nova.instances\")\n ls_var_log = simple_file(\"insights_commands/ls_-la_.var.log_.var.log.audit\")\n ls_var_opt_mssql = simple_file(\"insights_commands/ls_-ld_.var.opt.mssql\")\n ls_var_opt_mssql_log = simple_file(\"insights_commands/ls_-la_.var.opt.mssql.log\")\n ls_var_spool_clientmq = simple_file(\"insights_commands/ls_-ln_.var.spool.clientmqueue\")\n ls_var_spool_postfix_maildrop = simple_file(\"insights_commands/ls_-ln_.var.spool.postfix.maildrop\")\n ls_var_tmp = simple_file(\"insights_commands/ls_-ln_.var.tmp\")\n ls_var_run = simple_file(\"insights_commands/ls_-lnL_.var.run\")\n ls_var_www = simple_file(\"insights_commands/ls_-la_.dev.null_.var.www\")\n lsblk = simple_file(\"insights_commands/lsblk\")\n lsblk_pairs = simple_file(\"insights_commands/lsblk_-P_-o_NAME_KNAME_MAJ_MIN_FSTYPE_MOUNTPOINT_LABEL_UUID_RA_RO_RM_MODEL_SIZE_STATE_OWNER_GROUP_MODE_ALIGNMENT_MIN-IO_OPT-IO_PHY-SEC_LOG-SEC_ROTA_SCHED_RQ-SIZE_TYPE_DISC-ALN_DISC-GRAN_DISC-MAX_DISC-ZERO\")\n lscpu = simple_file(\"insights_commands/lscpu\")\n lsmod = simple_file(\"insights_commands/lsmod\")\n lsof = simple_file(\"insights_commands/lsof\")\n lspci = simple_file(\"insights_commands/lspci_-k\")\n lssap = simple_file(\"insights_commands/usr.sap.hostctrl.exe.lssap\")\n lsscsi = simple_file(\"insights_commands/lsscsi\")\n lsvmbus = simple_file(\"insights_commands/lsvmbus_-vv\")\n lvmconfig = first_file([\n \"insights_commands/lvmconfig_--type_full\",\n \"insights_commands/lvm_dumpconfig_--type_full\"\n ])\n lvs_noheadings = first_file(\n [\n \"insights_commands/lvs_--nameprefixes_--noheadings_--separator_-a_-o_lv_name_lv_size_lv_attr_mirror_log_vg_name_devices_region_size_data_percent_metadata_percent_segtype_seg_monitor_lv_kernel_major_lv_kernel_minor_--config_global_locking_type_0\",\n \"insights_commands/lvs_--nameprefixes_--noheadings_--separator_-a_-o_lv_name_lv_size_lv_attr_mirror_log_vg_name_devices_region_size_data_percent_metadata_percent_segtype_seg_monitor_--config_global_locking_type_0\"\n ]\n )\n max_uid = simple_file(\"insights_commands/awk_-F_if_3_max_max_3_END_print_max_.etc.passwd\")\n md5chk_files = glob_file(\"insights_commands/md5sum_*\")\n mount = simple_file(\"insights_commands/mount\")\n modinfo = glob_file(\"insights_commands/modinfo_*\")\n modinfo_i40e = simple_file(\"insights_commands/modinfo_i40e\")\n modinfo_igb = simple_file(\"insights_commands/modinfo_igb\")\n modinfo_ixgbe = simple_file(\"insights_commands/modinfo_ixgbe\")\n modinfo_veth = simple_file(\"insights_commands/modinfo_veth\")\n modinfo_vmxnet3 = simple_file(\"insights_commands/modinfo_vmxnet3\")\n mokutil_sbstate = simple_file(\"insights_commands/mokutil_--sb-state\")\n multicast_querier = simple_file(\"insights_commands/find_.sys.devices.virtual.net._-name_multicast_querier_-print_-exec_cat\")\n multipath_conf_initramfs = simple_file(\"insights_commands/lsinitrd_-f_.etc.multipath.conf\")\n multipath__v4__ll = simple_file(\"insights_commands/multipath_-v4_-ll\")\n mysqladmin_vars = simple_file(\"insights_commands/mysqladmin_variables\")\n named_checkconf_p = simple_file(\"insights_commands/named-checkconf_-p\")\n ndctl_list_Ni = simple_file(\"insights_commands/ndctl_list_-Ni\")\n netstat = simple_file(\"insights_commands/netstat_-neopa\")\n netstat_agn = simple_file(\"insights_commands/netstat_-agn\")\n netstat_i = simple_file(\"insights_commands/netstat_-i\")\n netstat_s = simple_file(\"insights_commands/netstat_-s\")\n nmcli_conn_show = simple_file(\"insights_commands/nmcli_conn_show\")\n nmcli_dev_show = simple_file(\"insights_commands/nmcli_dev_show\")\n nova_crontab = simple_file(\"insights_commands/crontab_-l_-u_nova\")\n nova_uid = simple_file(\"insights_commands/id_-u_nova\")\n ntpq_leap = simple_file(\"insights_commands/ntpq_-c_rv_0_leap\")\n ntptime = simple_file(\"insights_commands/ntptime\")\n numeric_user_group_name = simple_file(\"insights_commands/grep_-c_digit_.etc.passwd_.etc.group\")\n oc_get_clusterrole_with_config = simple_file(\"insights_commands/oc_get_clusterrole_--config_.etc.origin.master.admin.kubeconfig\")\n oc_get_clusterrolebinding_with_config = simple_file(\"insights_commands/oc_get_clusterrolebinding_--config_.etc.origin.master.admin.kubeconfig\")\n open_vm_tools_stat_raw_text_session = simple_file(\"insights_commands/vmware-toolbox-cmd_stat_raw_text_session\")\n openvswitch_other_config = simple_file(\"insights_commands/ovs-vsctl_-t_5_get_Open_vSwitch_._other_config\")\n ovs_vsctl_list_bridge = simple_file(\"insights_commands/ovs-vsctl_list_bridge\")\n ovs_vsctl_show = simple_file(\"insights_commands/ovs-vsctl_show\")\n package_provides_command = glob_file(\"insights_commands/echo_*java*\")\n passenger_status = simple_file(\"insights_commands/passenger-status\")\n pci_rport_target_disk_paths = simple_file(\"insights_commands/find_.sys.devices._-maxdepth_10_-mindepth_9_-name_stat_-type_f\")\n pcp_metrics = simple_file(\"insights_commands/curl_-s_http_..127.0.0.1_44322.metrics_--connect-timeout_5\")\n pcs_quorum_status = simple_file(\"insights_commands/pcs_quorum_status\")\n pcs_status = simple_file(\"insights_commands/pcs_status\")\n postconf_builtin = simple_file(\"insights_commands/postconf_-C_builtin\")\n postconf = simple_file(\"insights_commands/postconf\")\n ps_alxwww = simple_file(\"insights_commands/ps_alxwww\")\n ps_aux = simple_file(\"insights_commands/ps_aux\")\n ps_auxcww = simple_file(\"insights_commands/ps_auxcww\")\n ps_auxww = simple_file(\"insights_commands/ps_auxww\")\n ps_ef = simple_file(\"insights_commands/ps_-ef\")\n ps_eo = simple_file(\"insights_commands/ps_-eo_pid_ppid_comm\")\n puppet_ca_cert_expire_date = simple_file(\"insights_commands/openssl_x509_-in_.etc.puppetlabs.puppet.ssl.ca.ca_crt.pem_-enddate_-noout\")\n pvs_noheadings = simple_file(\"insights_commands/pvs_--nameprefixes_--noheadings_--separator_-a_-o_pv_all_vg_name_--config_global_locking_type_0\")\n qpid_stat_g = simple_file(\"insights_commands/qpid-stat_-g_--ssl-certificate_.etc.pki.katello.qpid_client_striped.crt_-b_amqps_..localhost_5671\")\n rabbitmq_report = simple_file(\"insights_commands/rabbitmqctl_report\")\n rabbitmq_users = simple_file(\"insights_commands/rabbitmqctl_list_users\")\n readlink_e_etc_mtab = simple_file(\"insights_commands/readlink_-e_.etc.mtab\")\n readlink_e_shift_cert_client = simple_file(\"insights_commands/readlink_-e_.etc.origin.node.certificates.kubelet-client-current.pem\")\n readlink_e_shift_cert_server = simple_file(\"insights_commands/readlink_-e_.etc.origin.node.certificates.kubelet-server-current.pem\")\n rhn_schema_version = simple_file(\"insights_commands/rhn-schema-version\")\n rhev_data_center = simple_file(\"insights_commands/python_-m_insights.tools.cat_--no-header_rhev_data_center\")\n rndc_status = simple_file(\"insights_commands/rndc_status\")\n rpm_V_packages = first_file([\"insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo_chrony\", \"insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo\"])\n sap_hdb_version = simple_file(\"insights_commands/python_-m_insights.tools.cat_--no-header_sap_hdb_version\")\n saphostctl_getcimobject_sapinstance = simple_file(\"insights_commands/usr.sap.hostctrl.exe.saphostctrl_-function_GetCIMObject_-enuminstances_SAPInstance\")\n satellite_content_hosts_count = simple_file(\"insights_commands/sudo_-iu_postgres_psql_-d_foreman_-c_select_count_from_hosts\")\n saphostexec_status = simple_file(\"insights_commands/usr.sap.hostctrl.exe.saphostexec_-status\")\n saphostexec_version = simple_file(\"insights_commands/usr.sap.hostctrl.exe.saphostexec_-version\")\n satellite_mongodb_storage_engine = simple_file(\"insights_commands/mongo_pulp_database_--eval_db.serverStatus_.storageEngine\")\n sealert = simple_file('insights_commands/sealert_-l')\n sestatus = simple_file(\"insights_commands/sestatus_-b\")\n smbstatus_p = simple_file(\"insights_commands/smbstatus_-p\")\n software_collections_list = simple_file('insights_commands/scl_--list')\n spamassassin_channels = simple_file('insights_commands/grep_-r_s_CHANNELURL_.etc.mail.spamassassin.channel.d')\n ss = simple_file(\"insights_commands/ss_-tupna\")\n sshd_config_perms = simple_file(\"insights_commands/ls_-l_.etc.ssh.sshd_config\")\n subscription_manager_id = simple_file(\"insights_commands/subscription-manager_identity\")\n subscription_manager_installed_product_ids = simple_file(\"insights_commands/find_.etc.pki.product-default._.etc.pki.product._-name_pem_-exec_rct_cat-cert_--no-content\")\n sysctl = simple_file(\"insights_commands/sysctl_-a\")\n systemctl_cat_rpcbind_socket = simple_file(\"insights_commands/systemctl_cat_rpcbind.socket\")\n systemctl_cinder_volume = simple_file(\"insights_commands/systemctl_show_openstack-cinder-volume\")\n systemctl_httpd = simple_file(\"insights_commands/systemctl_show_httpd\")\n systemctl_nginx = simple_file(\"insights_commands/systemctl_show_nginx\")\n systemctl_list_unit_files = simple_file(\"insights_commands/systemctl_list-unit-files\")\n systemctl_list_units = simple_file(\"insights_commands/systemctl_list-units\")\n systemctl_mariadb = simple_file(\"insights_commands/systemctl_show_mariadb\")\n systemctl_qpidd = simple_file(\"insights_commands/systemctl_show_qpidd\")\n systemctl_qdrouterd = simple_file(\"insights_commands/systemctl_show_qdrouterd\")\n systemctl_show_all_services = simple_file(\"insights_commands/systemctl_show_.service\")\n systemctl_show_target = simple_file(\"insights_commands/systemctl_show_.target\")\n systemctl_smartpdc = simple_file(\"insights_commands/systemctl_show_smart_proxy_dynflow_core\")\n systemd_analyze_blame = simple_file(\"insights_commands/systemd-analyze_blame\")\n systemd_docker = first_file([\"insights_commands/systemctl_cat_docker.service\", \"/usr/lib/systemd/system/docker.service\"])\n systemd_openshift_node = first_file([\"insights_commands/systemctl_cat_atomic-openshift-node.service\", \"/usr/lib/systemd/system/atomic-openshift-node.service\"])\n systool_b_scsi_v = simple_file(\"insights_commands/systool_-b_scsi_-v\")\n testparm_s = simple_file(\"insights_commands/testparm_-s\")\n testparm_v_s = simple_file(\"insights_commands/testparm_-v_-s\")\n tomcat_vdc_fallback = simple_file(\"insights_commands/find_.usr.share_-maxdepth_1_-name_tomcat_-exec_.bin.grep_-R_-s_VirtualDirContext_--include_.xml\")\n tuned_adm = simple_file(\"insights_commands/tuned-adm_list\")\n uname = simple_file(\"insights_commands/uname_-a\")\n uptime = simple_file(\"insights_commands/uptime\")\n version_info = simple_file(\"version_info\")\n vdo_status = simple_file(\"insights_commands/vdo_status\")\n vgdisplay = simple_file(\"insights_commands/vgdisplay\")\n vgs_noheadings = simple_file(\"insights_commands/vgs_--nameprefixes_--noheadings_--separator_-a_-o_vg_all_--config_global_locking_type_0\")\n virsh_list_all = simple_file(\"insights_commands/virsh_--readonly_list_--all\")\n virt_what = simple_file(\"insights_commands/virt-what\")\n yum_list_available = simple_file(\"insights_commands/yum_-C_--noplugins_list_available\")\n yum_repolist = first_file([\"insights_commands/yum_-C_--noplugins_repolist\", \"insights_commands/yum_-C_repolist\"])\n", "path": "insights/specs/insights_archive.py" } ]
diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 068cf5af35..2e88c7ac4d 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -5,6 +5,7 @@ simple_file = partial(simple_file, context=HostArchiveContext) glob_file = partial(glob_file, context=HostArchiveContext) +first_file = partial(first_file, context=HostArchiveContext) class InsightsArchiveSpecs(Specs):
matrix-org__synapse-13326
Ubuntu 21.10 (Impish Indri) has reached end of life as of July 14, 2022 See https://lists.ubuntu.com/archives/ubuntu-announce/2022-July/000281.html I don't think we have good docs for removing a distribution, but should be the opposite of [gitlab.matrix.org/new-vector/internal/-/wikis/Synapse-Debian-Packages#adding-a-new-distribution](https://gitlab.matrix.org/new-vector/internal/-/wikis/Synapse-Debian-Packages#adding-a-new-distribution).
[ { "content": "#!/usr/bin/env python3\n\n# Build the Debian packages using Docker images.\n#\n# This script builds the Docker images and then executes them sequentially, each\n# one building a Debian package for the targeted operating system. It is\n# designed to be a \"single command\" to produce all the images.\n#\n# By default, builds for all known distributions, but a list of distributions\n# can be passed on the commandline for debugging.\n\nimport argparse\nimport json\nimport os\nimport signal\nimport subprocess\nimport sys\nimport threading\nfrom concurrent.futures import ThreadPoolExecutor\nfrom types import FrameType\nfrom typing import Collection, Optional, Sequence, Set\n\nDISTS = (\n \"debian:buster\", # oldstable: EOL 2022-08\n \"debian:bullseye\",\n \"debian:bookworm\",\n \"debian:sid\",\n \"ubuntu:focal\", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)\n \"ubuntu:impish\", # 21.10 (EOL 2022-07)\n \"ubuntu:jammy\", # 22.04 LTS (EOL 2027-04)\n)\n\nDESC = \"\"\"\\\nBuilds .debs for synapse, using a Docker image for the build environment.\n\nBy default, builds for all known distributions, but a list of distributions\ncan be passed on the commandline for debugging.\n\"\"\"\n\nprojdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n\n\nclass Builder(object):\n def __init__(\n self,\n redirect_stdout: bool = False,\n docker_build_args: Optional[Sequence[str]] = None,\n ):\n self.redirect_stdout = redirect_stdout\n self._docker_build_args = tuple(docker_build_args or ())\n self.active_containers: Set[str] = set()\n self._lock = threading.Lock()\n self._failed = False\n\n def run_build(self, dist: str, skip_tests: bool = False) -> None:\n \"\"\"Build deb for a single distribution\"\"\"\n\n if self._failed:\n print(\"not building %s due to earlier failure\" % (dist,))\n raise Exception(\"failed\")\n\n try:\n self._inner_build(dist, skip_tests)\n except Exception as e:\n print(\"build of %s failed: %s\" % (dist, e), file=sys.stderr)\n self._failed = True\n raise\n\n def _inner_build(self, dist: str, skip_tests: bool = False) -> None:\n tag = dist.split(\":\", 1)[1]\n\n # Make the dir where the debs will live.\n #\n # Note that we deliberately put this outside the source tree, otherwise\n # we tend to get source packages which are full of debs. (We could hack\n # around that with more magic in the build_debian.sh script, but that\n # doesn't solve the problem for natively-run dpkg-buildpakage).\n debsdir = os.path.join(projdir, \"../debs\")\n os.makedirs(debsdir, exist_ok=True)\n\n if self.redirect_stdout:\n logfile = os.path.join(debsdir, \"%s.buildlog\" % (tag,))\n print(\"building %s: directing output to %s\" % (dist, logfile))\n stdout = open(logfile, \"w\")\n else:\n stdout = None\n\n # first build a docker image for the build environment\n build_args = (\n (\n \"docker\",\n \"build\",\n \"--tag\",\n \"dh-venv-builder:\" + tag,\n \"--build-arg\",\n \"distro=\" + dist,\n \"-f\",\n \"docker/Dockerfile-dhvirtualenv\",\n )\n + self._docker_build_args\n + (\"docker\",)\n )\n\n subprocess.check_call(\n build_args,\n stdout=stdout,\n stderr=subprocess.STDOUT,\n cwd=projdir,\n )\n\n container_name = \"synapse_build_\" + tag\n with self._lock:\n self.active_containers.add(container_name)\n\n # then run the build itself\n subprocess.check_call(\n [\n \"docker\",\n \"run\",\n \"--rm\",\n \"--name\",\n container_name,\n \"--volume=\" + projdir + \":/synapse/source:ro\",\n \"--volume=\" + debsdir + \":/debs\",\n \"-e\",\n \"TARGET_USERID=%i\" % (os.getuid(),),\n \"-e\",\n \"TARGET_GROUPID=%i\" % (os.getgid(),),\n \"-e\",\n \"DEB_BUILD_OPTIONS=%s\" % (\"nocheck\" if skip_tests else \"\"),\n \"dh-venv-builder:\" + tag,\n ],\n stdout=stdout,\n stderr=subprocess.STDOUT,\n )\n\n with self._lock:\n self.active_containers.remove(container_name)\n\n if stdout is not None:\n stdout.close()\n print(\"Completed build of %s\" % (dist,))\n\n def kill_containers(self) -> None:\n with self._lock:\n active = list(self.active_containers)\n\n for c in active:\n print(\"killing container %s\" % (c,))\n subprocess.run(\n [\n \"docker\",\n \"kill\",\n c,\n ],\n stdout=subprocess.DEVNULL,\n )\n with self._lock:\n self.active_containers.remove(c)\n\n\ndef run_builds(\n builder: Builder, dists: Collection[str], jobs: int = 1, skip_tests: bool = False\n) -> None:\n def sig(signum: int, _frame: Optional[FrameType]) -> None:\n print(\"Caught SIGINT\")\n builder.kill_containers()\n\n signal.signal(signal.SIGINT, sig)\n\n with ThreadPoolExecutor(max_workers=jobs) as e:\n res = e.map(lambda dist: builder.run_build(dist, skip_tests), dists)\n\n # make sure we consume the iterable so that exceptions are raised.\n for _ in res:\n pass\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=DESC,\n )\n parser.add_argument(\n \"-j\",\n \"--jobs\",\n type=int,\n default=1,\n help=\"specify the number of builds to run in parallel\",\n )\n parser.add_argument(\n \"--no-check\",\n action=\"store_true\",\n help=\"skip running tests after building\",\n )\n parser.add_argument(\n \"--docker-build-arg\",\n action=\"append\",\n help=\"specify an argument to pass to docker build\",\n )\n parser.add_argument(\n \"--show-dists-json\",\n action=\"store_true\",\n help=\"instead of building the packages, just list the dists to build for, as a json array\",\n )\n parser.add_argument(\n \"dist\",\n nargs=\"*\",\n default=DISTS,\n help=\"a list of distributions to build for. Default: %(default)s\",\n )\n args = parser.parse_args()\n if args.show_dists_json:\n print(json.dumps(DISTS))\n else:\n builder = Builder(\n redirect_stdout=(args.jobs > 1), docker_build_args=args.docker_build_arg\n )\n run_builds(\n builder,\n dists=args.dist,\n jobs=args.jobs,\n skip_tests=args.no_check,\n )\n", "path": "scripts-dev/build_debian_packages.py" } ]
[ { "content": "#!/usr/bin/env python3\n\n# Build the Debian packages using Docker images.\n#\n# This script builds the Docker images and then executes them sequentially, each\n# one building a Debian package for the targeted operating system. It is\n# designed to be a \"single command\" to produce all the images.\n#\n# By default, builds for all known distributions, but a list of distributions\n# can be passed on the commandline for debugging.\n\nimport argparse\nimport json\nimport os\nimport signal\nimport subprocess\nimport sys\nimport threading\nfrom concurrent.futures import ThreadPoolExecutor\nfrom types import FrameType\nfrom typing import Collection, Optional, Sequence, Set\n\nDISTS = (\n \"debian:buster\", # oldstable: EOL 2022-08\n \"debian:bullseye\",\n \"debian:bookworm\",\n \"debian:sid\",\n \"ubuntu:focal\", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)\n \"ubuntu:jammy\", # 22.04 LTS (EOL 2027-04)\n)\n\nDESC = \"\"\"\\\nBuilds .debs for synapse, using a Docker image for the build environment.\n\nBy default, builds for all known distributions, but a list of distributions\ncan be passed on the commandline for debugging.\n\"\"\"\n\nprojdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n\n\nclass Builder(object):\n def __init__(\n self,\n redirect_stdout: bool = False,\n docker_build_args: Optional[Sequence[str]] = None,\n ):\n self.redirect_stdout = redirect_stdout\n self._docker_build_args = tuple(docker_build_args or ())\n self.active_containers: Set[str] = set()\n self._lock = threading.Lock()\n self._failed = False\n\n def run_build(self, dist: str, skip_tests: bool = False) -> None:\n \"\"\"Build deb for a single distribution\"\"\"\n\n if self._failed:\n print(\"not building %s due to earlier failure\" % (dist,))\n raise Exception(\"failed\")\n\n try:\n self._inner_build(dist, skip_tests)\n except Exception as e:\n print(\"build of %s failed: %s\" % (dist, e), file=sys.stderr)\n self._failed = True\n raise\n\n def _inner_build(self, dist: str, skip_tests: bool = False) -> None:\n tag = dist.split(\":\", 1)[1]\n\n # Make the dir where the debs will live.\n #\n # Note that we deliberately put this outside the source tree, otherwise\n # we tend to get source packages which are full of debs. (We could hack\n # around that with more magic in the build_debian.sh script, but that\n # doesn't solve the problem for natively-run dpkg-buildpakage).\n debsdir = os.path.join(projdir, \"../debs\")\n os.makedirs(debsdir, exist_ok=True)\n\n if self.redirect_stdout:\n logfile = os.path.join(debsdir, \"%s.buildlog\" % (tag,))\n print(\"building %s: directing output to %s\" % (dist, logfile))\n stdout = open(logfile, \"w\")\n else:\n stdout = None\n\n # first build a docker image for the build environment\n build_args = (\n (\n \"docker\",\n \"build\",\n \"--tag\",\n \"dh-venv-builder:\" + tag,\n \"--build-arg\",\n \"distro=\" + dist,\n \"-f\",\n \"docker/Dockerfile-dhvirtualenv\",\n )\n + self._docker_build_args\n + (\"docker\",)\n )\n\n subprocess.check_call(\n build_args,\n stdout=stdout,\n stderr=subprocess.STDOUT,\n cwd=projdir,\n )\n\n container_name = \"synapse_build_\" + tag\n with self._lock:\n self.active_containers.add(container_name)\n\n # then run the build itself\n subprocess.check_call(\n [\n \"docker\",\n \"run\",\n \"--rm\",\n \"--name\",\n container_name,\n \"--volume=\" + projdir + \":/synapse/source:ro\",\n \"--volume=\" + debsdir + \":/debs\",\n \"-e\",\n \"TARGET_USERID=%i\" % (os.getuid(),),\n \"-e\",\n \"TARGET_GROUPID=%i\" % (os.getgid(),),\n \"-e\",\n \"DEB_BUILD_OPTIONS=%s\" % (\"nocheck\" if skip_tests else \"\"),\n \"dh-venv-builder:\" + tag,\n ],\n stdout=stdout,\n stderr=subprocess.STDOUT,\n )\n\n with self._lock:\n self.active_containers.remove(container_name)\n\n if stdout is not None:\n stdout.close()\n print(\"Completed build of %s\" % (dist,))\n\n def kill_containers(self) -> None:\n with self._lock:\n active = list(self.active_containers)\n\n for c in active:\n print(\"killing container %s\" % (c,))\n subprocess.run(\n [\n \"docker\",\n \"kill\",\n c,\n ],\n stdout=subprocess.DEVNULL,\n )\n with self._lock:\n self.active_containers.remove(c)\n\n\ndef run_builds(\n builder: Builder, dists: Collection[str], jobs: int = 1, skip_tests: bool = False\n) -> None:\n def sig(signum: int, _frame: Optional[FrameType]) -> None:\n print(\"Caught SIGINT\")\n builder.kill_containers()\n\n signal.signal(signal.SIGINT, sig)\n\n with ThreadPoolExecutor(max_workers=jobs) as e:\n res = e.map(lambda dist: builder.run_build(dist, skip_tests), dists)\n\n # make sure we consume the iterable so that exceptions are raised.\n for _ in res:\n pass\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=DESC,\n )\n parser.add_argument(\n \"-j\",\n \"--jobs\",\n type=int,\n default=1,\n help=\"specify the number of builds to run in parallel\",\n )\n parser.add_argument(\n \"--no-check\",\n action=\"store_true\",\n help=\"skip running tests after building\",\n )\n parser.add_argument(\n \"--docker-build-arg\",\n action=\"append\",\n help=\"specify an argument to pass to docker build\",\n )\n parser.add_argument(\n \"--show-dists-json\",\n action=\"store_true\",\n help=\"instead of building the packages, just list the dists to build for, as a json array\",\n )\n parser.add_argument(\n \"dist\",\n nargs=\"*\",\n default=DISTS,\n help=\"a list of distributions to build for. Default: %(default)s\",\n )\n args = parser.parse_args()\n if args.show_dists_json:\n print(json.dumps(DISTS))\n else:\n builder = Builder(\n redirect_stdout=(args.jobs > 1), docker_build_args=args.docker_build_arg\n )\n run_builds(\n builder,\n dists=args.dist,\n jobs=args.jobs,\n skip_tests=args.no_check,\n )\n", "path": "scripts-dev/build_debian_packages.py" } ]
diff --git a/changelog.d/13326.removal b/changelog.d/13326.removal new file mode 100644 index 000000000000..8112286671d7 --- /dev/null +++ b/changelog.d/13326.removal @@ -0,0 +1 @@ +Stop builindg `.deb` packages for Ubuntu 21.10 (Impish Indri), which has reached end of life. diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py index 38564893e95b..cd2e64b75f9d 100755 --- a/scripts-dev/build_debian_packages.py +++ b/scripts-dev/build_debian_packages.py @@ -26,7 +26,6 @@ "debian:bookworm", "debian:sid", "ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14) - "ubuntu:impish", # 21.10 (EOL 2022-07) "ubuntu:jammy", # 22.04 LTS (EOL 2027-04) )
twisted__twisted-11838
twisted.web.template._flattenElement spends a significant amount of runtime in typing.py **Describe the incorrect behavior you saw** `_flattenElement` defines a closure `keepGoing` which is used to recursively call `_flattenElement`. `keepGoing`'s type definition includes multiple `Callable[…]`s. Since the file does not include `from __future__ import annotations` the type definitions are evaluated at function definition time, which is on every call to `_flattenElement`. `typing.Callable.__getitem__` does multiple `isinstance` checks before deferring to the the `@_tp_cache`ed implementation of `Callable.__getitem_inner__`. This causes evaluating the types for the closure to make up a significant portion of `_flattenElement`'s runtime. **Describe how to cause this behavior** This behavior was discovered while profiling the a documentation build using pydoctor under the austin profiler. Speedscope URL for the profile is here: https://www.speedscope.app/#profileURL=https%3A%2F%2Fgist.githubusercontent.com%2Fdreid%2F197566471f39a96523f5065d19d0bf7f%2Fraw%2F3e7ec92a17bc82d40acceb1e2efcaa3ef7c8ef07%2Ftwisted-austin-trunk.speedscope In the profile you can see that the `inner` function in `_tp_cache` accounts for 1m25s of the total runtime, and the `Callable.__getitem__` accounts for 25s of total runtime. ![Screenshot 2023-03-28 at 2 08 01 PM](https://user-images.githubusercontent.com/48695/228367178-36f58e67-e2fe-4c1a-9442-95053c6a449a.png) ![Screenshot 2023-03-28 at 2 07 32 PM](https://user-images.githubusercontent.com/48695/228367094-084ac107-9196-4025-9de4-d278eab8f31f.png) **Describe the correct behavior you'd like to see** A clear and concise description of what you expected to happen, or what you believe should be happening instead. **Testing environment** - Operating System and Version; paste the output of these commands: - on Linux, `uname -a ; cat /etc/lsb-release` - on Windows, `systeminfo | Findstr /i "OS"` - on macOS, `sw_vers` - Twisted version [e.g. 22.2.0] - please paste the output of `twist --version` and `pip --freeze` - Reactor [e.g. select, iocp] **Additional context** Add any other context about the problem here.
[ { "content": "# -*- test-case-name: twisted.web.test.test_flatten,twisted.web.test.test_template -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nContext-free flattener/serializer for rendering Python objects, possibly\ncomplex or arbitrarily nested, as strings.\n\"\"\"\n\nfrom inspect import iscoroutine\nfrom io import BytesIO\nfrom sys import exc_info\nfrom traceback import extract_tb\nfrom types import GeneratorType\nfrom typing import (\n Any,\n Callable,\n Coroutine,\n Generator,\n List,\n Mapping,\n Optional,\n Sequence,\n Tuple,\n TypeVar,\n Union,\n cast,\n)\n\nfrom twisted.internet.defer import Deferred, ensureDeferred\nfrom twisted.python.compat import nativeString\nfrom twisted.python.failure import Failure\nfrom twisted.web._stan import CDATA, CharRef, Comment, Tag, slot, voidElements\nfrom twisted.web.error import FlattenerError, UnfilledSlot, UnsupportedType\nfrom twisted.web.iweb import IRenderable, IRequest\n\nT = TypeVar(\"T\")\n\nFlattenableRecursive = Any\n\"\"\"\nFor documentation purposes, read C{FlattenableRecursive} as L{Flattenable}.\nHowever, since mypy doesn't support recursive type definitions (yet?),\nwe'll put Any in the actual definition.\n\"\"\"\n\nFlattenable = Union[\n bytes,\n str,\n slot,\n CDATA,\n Comment,\n Tag,\n Tuple[FlattenableRecursive, ...],\n List[FlattenableRecursive],\n Generator[FlattenableRecursive, None, None],\n CharRef,\n Deferred[FlattenableRecursive],\n Coroutine[Deferred[FlattenableRecursive], object, FlattenableRecursive],\n IRenderable,\n]\n\"\"\"\nType alias containing all types that can be flattened by L{flatten()}.\n\"\"\"\n\n# The maximum number of bytes to synchronously accumulate in the flattener\n# buffer before delivering them onwards.\nBUFFER_SIZE = 2 ** 16\n\n\ndef escapeForContent(data: Union[bytes, str]) -> bytes:\n \"\"\"\n Escape some character or UTF-8 byte data for inclusion in an HTML or XML\n document, by replacing metacharacters (C{&<>}) with their entity\n equivalents (C{&amp;&lt;&gt;}).\n\n This is used as an input to L{_flattenElement}'s C{dataEscaper} parameter.\n\n @param data: The string to escape.\n\n @return: The quoted form of C{data}. If C{data} is L{str}, return a utf-8\n encoded string.\n \"\"\"\n if isinstance(data, str):\n data = data.encode(\"utf-8\")\n data = data.replace(b\"&\", b\"&amp;\").replace(b\"<\", b\"&lt;\").replace(b\">\", b\"&gt;\")\n return data\n\n\ndef attributeEscapingDoneOutside(data: Union[bytes, str]) -> bytes:\n \"\"\"\n Escape some character or UTF-8 byte data for inclusion in the top level of\n an attribute. L{attributeEscapingDoneOutside} actually passes the data\n through unchanged, because L{writeWithAttributeEscaping} handles the\n quoting of the text within attributes outside the generator returned by\n L{_flattenElement}; this is used as the C{dataEscaper} argument to that\n L{_flattenElement} call so that that generator does not redundantly escape\n its text output.\n\n @param data: The string to escape.\n\n @return: The string, unchanged, except for encoding.\n \"\"\"\n if isinstance(data, str):\n return data.encode(\"utf-8\")\n return data\n\n\ndef writeWithAttributeEscaping(\n write: Callable[[bytes], object]\n) -> Callable[[bytes], None]:\n \"\"\"\n Decorate a C{write} callable so that all output written is properly quoted\n for inclusion within an XML attribute value.\n\n If a L{Tag <twisted.web.template.Tag>} C{x} is flattened within the context\n of the contents of another L{Tag <twisted.web.template.Tag>} C{y}, the\n metacharacters (C{<>&\"}) delimiting C{x} should be passed through\n unchanged, but the textual content of C{x} should still be quoted, as\n usual. For example: C{<y><x>&amp;</x></y>}. That is the default behavior\n of L{_flattenElement} when L{escapeForContent} is passed as the\n C{dataEscaper}.\n\n However, when a L{Tag <twisted.web.template.Tag>} C{x} is flattened within\n the context of an I{attribute} of another L{Tag <twisted.web.template.Tag>}\n C{y}, then the metacharacters delimiting C{x} should be quoted so that it\n can be parsed from the attribute's value. In the DOM itself, this is not a\n valid thing to do, but given that renderers and slots may be freely moved\n around in a L{twisted.web.template} template, it is a condition which may\n arise in a document and must be handled in a way which produces valid\n output. So, for example, you should be able to get C{<y attr=\"&lt;x /&gt;\"\n />}. This should also be true for other XML/HTML meta-constructs such as\n comments and CDATA, so if you were to serialize a L{comment\n <twisted.web.template.Comment>} in an attribute you should get C{<y\n attr=\"&lt;-- comment --&gt;\" />}. Therefore in order to capture these\n meta-characters, flattening is done with C{write} callable that is wrapped\n with L{writeWithAttributeEscaping}.\n\n The final case, and hopefully the much more common one as compared to\n serializing L{Tag <twisted.web.template.Tag>} and arbitrary L{IRenderable}\n objects within an attribute, is to serialize a simple string, and those\n should be passed through for L{writeWithAttributeEscaping} to quote\n without applying a second, redundant level of quoting.\n\n @param write: A callable which will be invoked with the escaped L{bytes}.\n\n @return: A callable that writes data with escaping.\n \"\"\"\n\n def _write(data: bytes) -> None:\n write(escapeForContent(data).replace(b'\"', b\"&quot;\"))\n\n return _write\n\n\ndef escapedCDATA(data: Union[bytes, str]) -> bytes:\n \"\"\"\n Escape CDATA for inclusion in a document.\n\n @param data: The string to escape.\n\n @return: The quoted form of C{data}. If C{data} is unicode, return a utf-8\n encoded string.\n \"\"\"\n if isinstance(data, str):\n data = data.encode(\"utf-8\")\n return data.replace(b\"]]>\", b\"]]]]><![CDATA[>\")\n\n\ndef escapedComment(data: Union[bytes, str]) -> bytes:\n \"\"\"\n Within comments the sequence C{-->} can be mistaken as the end of the comment.\n To ensure consistent parsing and valid output the sequence is replaced with C{--&gt;}.\n Furthermore, whitespace is added when a comment ends in a dash. This is done to break\n the connection of the ending C{-} with the closing C{-->}.\n\n @param data: The string to escape.\n\n @return: The quoted form of C{data}. If C{data} is unicode, return a utf-8\n encoded string.\n \"\"\"\n if isinstance(data, str):\n data = data.encode(\"utf-8\")\n data = data.replace(b\"-->\", b\"--&gt;\")\n if data and data[-1:] == b\"-\":\n data += b\" \"\n return data\n\n\ndef _getSlotValue(\n name: str,\n slotData: Sequence[Optional[Mapping[str, Flattenable]]],\n default: Optional[Flattenable] = None,\n) -> Flattenable:\n \"\"\"\n Find the value of the named slot in the given stack of slot data.\n \"\"\"\n for slotFrame in slotData[::-1]:\n if slotFrame is not None and name in slotFrame:\n return slotFrame[name]\n else:\n if default is not None:\n return default\n raise UnfilledSlot(name)\n\n\ndef _fork(d: Deferred[T]) -> Deferred[T]:\n \"\"\"\n Create a new L{Deferred} based on C{d} that will fire and fail with C{d}'s\n result or error, but will not modify C{d}'s callback type.\n \"\"\"\n d2: Deferred[T] = Deferred(lambda _: d.cancel())\n\n def callback(result: T) -> T:\n d2.callback(result)\n return result\n\n def errback(failure: Failure) -> Failure:\n d2.errback(failure)\n return failure\n\n d.addCallbacks(callback, errback)\n return d2\n\n\ndef _flattenElement(\n request: Optional[IRequest],\n root: Flattenable,\n write: Callable[[bytes], object],\n slotData: List[Optional[Mapping[str, Flattenable]]],\n renderFactory: Optional[IRenderable],\n dataEscaper: Callable[[Union[bytes, str]], bytes],\n # This is annotated as Generator[T, None, None] instead of Iterator[T]\n # because mypy does not consider an Iterator to be an instance of\n # GeneratorType.\n) -> Generator[Union[Generator, Deferred[Flattenable]], None, None]:\n \"\"\"\n Make C{root} slightly more flat by yielding all its immediate contents as\n strings, deferreds or generators that are recursive calls to itself.\n\n @param request: A request object which will be passed to\n L{IRenderable.render}.\n\n @param root: An object to be made flatter. This may be of type C{unicode},\n L{str}, L{slot}, L{Tag <twisted.web.template.Tag>}, L{tuple}, L{list},\n L{types.GeneratorType}, L{Deferred}, or an object that implements\n L{IRenderable}.\n\n @param write: A callable which will be invoked with each L{bytes} produced\n by flattening C{root}.\n\n @param slotData: A L{list} of L{dict} mapping L{str} slot names to data\n with which those slots will be replaced.\n\n @param renderFactory: If not L{None}, an object that provides\n L{IRenderable}.\n\n @param dataEscaper: A 1-argument callable which takes L{bytes} or\n L{unicode} and returns L{bytes}, quoted as appropriate for the\n rendering context. This is really only one of two values:\n L{attributeEscapingDoneOutside} or L{escapeForContent}, depending on\n whether the rendering context is within an attribute or not. See the\n explanation in L{writeWithAttributeEscaping}.\n\n @return: An iterator that eventually writes L{bytes} to C{write}.\n It can yield other iterators or L{Deferred}s; if it yields another\n iterator, the caller will iterate it; if it yields a L{Deferred},\n the result of that L{Deferred} will be another generator, in which\n case it is iterated. See L{_flattenTree} for the trampoline that\n consumes said values.\n \"\"\"\n\n def keepGoing(\n newRoot: Flattenable,\n dataEscaper: Callable[[Union[bytes, str]], bytes] = dataEscaper,\n renderFactory: Optional[IRenderable] = renderFactory,\n write: Callable[[bytes], object] = write,\n ) -> Generator[Union[Flattenable, Deferred[Flattenable]], None, None]:\n return _flattenElement(\n request, newRoot, write, slotData, renderFactory, dataEscaper\n )\n\n def keepGoingAsync(result: Deferred[Flattenable]) -> Deferred[Flattenable]:\n return result.addCallback(keepGoing)\n\n if isinstance(root, (bytes, str)):\n write(dataEscaper(root))\n elif isinstance(root, slot):\n slotValue = _getSlotValue(root.name, slotData, root.default)\n yield keepGoing(slotValue)\n elif isinstance(root, CDATA):\n write(b\"<![CDATA[\")\n write(escapedCDATA(root.data))\n write(b\"]]>\")\n elif isinstance(root, Comment):\n write(b\"<!--\")\n write(escapedComment(root.data))\n write(b\"-->\")\n elif isinstance(root, Tag):\n slotData.append(root.slotData)\n rendererName = root.render\n if rendererName is not None:\n if renderFactory is None:\n raise ValueError(\n f'Tag wants to be rendered by method \"{rendererName}\" '\n f\"but is not contained in any IRenderable\"\n )\n rootClone = root.clone(False)\n rootClone.render = None\n renderMethod = renderFactory.lookupRenderMethod(rendererName)\n result = renderMethod(request, rootClone)\n yield keepGoing(result)\n slotData.pop()\n return\n\n if not root.tagName:\n yield keepGoing(root.children)\n return\n\n write(b\"<\")\n if isinstance(root.tagName, str):\n tagName = root.tagName.encode(\"ascii\")\n else:\n tagName = root.tagName\n write(tagName)\n for k, v in root.attributes.items():\n if isinstance(k, str):\n k = k.encode(\"ascii\")\n write(b\" \" + k + b'=\"')\n # Serialize the contents of the attribute, wrapping the results of\n # that serialization so that _everything_ is quoted.\n yield keepGoing(\n v, attributeEscapingDoneOutside, write=writeWithAttributeEscaping(write)\n )\n write(b'\"')\n if root.children or nativeString(tagName) not in voidElements:\n write(b\">\")\n # Regardless of whether we're in an attribute or not, switch back\n # to the escapeForContent dataEscaper. The contents of a tag must\n # be quoted no matter what; in the top-level document, just so\n # they're valid, and if they're within an attribute, they have to\n # be quoted so that after applying the *un*-quoting required to re-\n # parse the tag within the attribute, all the quoting is still\n # correct.\n yield keepGoing(root.children, escapeForContent)\n write(b\"</\" + tagName + b\">\")\n else:\n write(b\" />\")\n\n elif isinstance(root, (tuple, list, GeneratorType)):\n for element in root:\n yield keepGoing(element)\n elif isinstance(root, CharRef):\n escaped = \"&#%d;\" % (root.ordinal,)\n write(escaped.encode(\"ascii\"))\n elif isinstance(root, Deferred):\n yield keepGoingAsync(_fork(root))\n elif iscoroutine(root):\n yield keepGoingAsync(\n Deferred.fromCoroutine(\n cast(Coroutine[Deferred[Flattenable], object, Flattenable], root)\n )\n )\n elif IRenderable.providedBy(root):\n result = root.render(request)\n yield keepGoing(result, renderFactory=root)\n else:\n raise UnsupportedType(root)\n\n\nasync def _flattenTree(\n request: Optional[IRequest], root: Flattenable, write: Callable[[bytes], object]\n) -> None:\n \"\"\"\n Make C{root} into an iterable of L{bytes} and L{Deferred} by doing a depth\n first traversal of the tree.\n\n @param request: A request object which will be passed to\n L{IRenderable.render}.\n\n @param root: An object to be made flatter. This may be of type C{unicode},\n L{bytes}, L{slot}, L{Tag <twisted.web.template.Tag>}, L{tuple},\n L{list}, L{types.GeneratorType}, L{Deferred}, or something providing\n L{IRenderable}.\n\n @param write: A callable which will be invoked with each L{bytes} produced\n by flattening C{root}.\n\n @return: A C{Deferred}-returning coroutine that resolves to C{None}.\n \"\"\"\n buf = []\n bufSize = 0\n\n # Accumulate some bytes up to the buffer size so that we don't annoy the\n # upstream writer with a million tiny string.\n def bufferedWrite(bs: bytes) -> None:\n nonlocal bufSize\n buf.append(bs)\n bufSize += len(bs)\n if bufSize >= BUFFER_SIZE:\n flushBuffer()\n\n # Deliver the buffered content to the upstream writer as a single string.\n # This is how a \"big enough\" buffer gets delivered, how a buffer of any\n # size is delivered before execution is suspended to wait for an\n # asynchronous value, and how anything left in the buffer when we're\n # finished is delivered.\n def flushBuffer() -> None:\n nonlocal bufSize\n if bufSize > 0:\n write(b\"\".join(buf))\n del buf[:]\n bufSize = 0\n\n stack: List[Generator] = [\n _flattenElement(request, root, bufferedWrite, [], None, escapeForContent)\n ]\n\n while stack:\n try:\n frame = stack[-1].gi_frame\n element = next(stack[-1])\n if isinstance(element, Deferred):\n # Before suspending flattening for an unknown amount of time,\n # flush whatever data we have collected so far.\n flushBuffer()\n element = await element\n except StopIteration:\n stack.pop()\n except Exception as e:\n stack.pop()\n roots = []\n for generator in stack:\n roots.append(generator.gi_frame.f_locals[\"root\"])\n roots.append(frame.f_locals[\"root\"])\n raise FlattenerError(e, roots, extract_tb(exc_info()[2]))\n else:\n stack.append(element)\n\n # Flush any data that remains in the buffer before finishing.\n flushBuffer()\n\n\ndef flatten(\n request: Optional[IRequest], root: Flattenable, write: Callable[[bytes], object]\n) -> Deferred[None]:\n \"\"\"\n Incrementally write out a string representation of C{root} using C{write}.\n\n In order to create a string representation, C{root} will be decomposed into\n simpler objects which will themselves be decomposed and so on until strings\n or objects which can easily be converted to strings are encountered.\n\n @param request: A request object which will be passed to the C{render}\n method of any L{IRenderable} provider which is encountered.\n\n @param root: An object to be made flatter. This may be of type L{str},\n L{bytes}, L{slot}, L{Tag <twisted.web.template.Tag>}, L{tuple},\n L{list}, L{types.GeneratorType}, L{Deferred}, or something that\n provides L{IRenderable}.\n\n @param write: A callable which will be invoked with each L{bytes} produced\n by flattening C{root}.\n\n @return: A L{Deferred} which will be called back with C{None} when C{root}\n has been completely flattened into C{write} or which will be errbacked\n if an unexpected exception occurs.\n \"\"\"\n return ensureDeferred(_flattenTree(request, root, write))\n\n\ndef flattenString(request: Optional[IRequest], root: Flattenable) -> Deferred[bytes]:\n \"\"\"\n Collate a string representation of C{root} into a single string.\n\n This is basically gluing L{flatten} to an L{io.BytesIO} and returning\n the results. See L{flatten} for the exact meanings of C{request} and\n C{root}.\n\n @return: A L{Deferred} which will be called back with a single UTF-8 encoded\n string as its result when C{root} has been completely flattened or which\n will be errbacked if an unexpected exception occurs.\n \"\"\"\n io = BytesIO()\n d = flatten(request, root, io.write)\n d.addCallback(lambda _: io.getvalue())\n return cast(Deferred[bytes], d)\n", "path": "src/twisted/web/_flatten.py" } ]
[ { "content": "# -*- test-case-name: twisted.web.test.test_flatten,twisted.web.test.test_template -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nContext-free flattener/serializer for rendering Python objects, possibly\ncomplex or arbitrarily nested, as strings.\n\"\"\"\nfrom __future__ import annotations\n\nfrom inspect import iscoroutine\nfrom io import BytesIO\nfrom sys import exc_info\nfrom traceback import extract_tb\nfrom types import GeneratorType\nfrom typing import (\n Any,\n Callable,\n Coroutine,\n Generator,\n List,\n Mapping,\n Optional,\n Sequence,\n Tuple,\n TypeVar,\n Union,\n cast,\n)\n\nfrom twisted.internet.defer import Deferred, ensureDeferred\nfrom twisted.python.compat import nativeString\nfrom twisted.python.failure import Failure\nfrom twisted.web._stan import CDATA, CharRef, Comment, Tag, slot, voidElements\nfrom twisted.web.error import FlattenerError, UnfilledSlot, UnsupportedType\nfrom twisted.web.iweb import IRenderable, IRequest\n\nT = TypeVar(\"T\")\n\nFlattenableRecursive = Any\n\"\"\"\nFor documentation purposes, read C{FlattenableRecursive} as L{Flattenable}.\nHowever, since mypy doesn't support recursive type definitions (yet?),\nwe'll put Any in the actual definition.\n\"\"\"\n\nFlattenable = Union[\n bytes,\n str,\n slot,\n CDATA,\n Comment,\n Tag,\n Tuple[FlattenableRecursive, ...],\n List[FlattenableRecursive],\n Generator[FlattenableRecursive, None, None],\n CharRef,\n Deferred[FlattenableRecursive],\n Coroutine[Deferred[FlattenableRecursive], object, FlattenableRecursive],\n IRenderable,\n]\n\"\"\"\nType alias containing all types that can be flattened by L{flatten()}.\n\"\"\"\n\n# The maximum number of bytes to synchronously accumulate in the flattener\n# buffer before delivering them onwards.\nBUFFER_SIZE = 2 ** 16\n\n\ndef escapeForContent(data: Union[bytes, str]) -> bytes:\n \"\"\"\n Escape some character or UTF-8 byte data for inclusion in an HTML or XML\n document, by replacing metacharacters (C{&<>}) with their entity\n equivalents (C{&amp;&lt;&gt;}).\n\n This is used as an input to L{_flattenElement}'s C{dataEscaper} parameter.\n\n @param data: The string to escape.\n\n @return: The quoted form of C{data}. If C{data} is L{str}, return a utf-8\n encoded string.\n \"\"\"\n if isinstance(data, str):\n data = data.encode(\"utf-8\")\n data = data.replace(b\"&\", b\"&amp;\").replace(b\"<\", b\"&lt;\").replace(b\">\", b\"&gt;\")\n return data\n\n\ndef attributeEscapingDoneOutside(data: Union[bytes, str]) -> bytes:\n \"\"\"\n Escape some character or UTF-8 byte data for inclusion in the top level of\n an attribute. L{attributeEscapingDoneOutside} actually passes the data\n through unchanged, because L{writeWithAttributeEscaping} handles the\n quoting of the text within attributes outside the generator returned by\n L{_flattenElement}; this is used as the C{dataEscaper} argument to that\n L{_flattenElement} call so that that generator does not redundantly escape\n its text output.\n\n @param data: The string to escape.\n\n @return: The string, unchanged, except for encoding.\n \"\"\"\n if isinstance(data, str):\n return data.encode(\"utf-8\")\n return data\n\n\ndef writeWithAttributeEscaping(\n write: Callable[[bytes], object]\n) -> Callable[[bytes], None]:\n \"\"\"\n Decorate a C{write} callable so that all output written is properly quoted\n for inclusion within an XML attribute value.\n\n If a L{Tag <twisted.web.template.Tag>} C{x} is flattened within the context\n of the contents of another L{Tag <twisted.web.template.Tag>} C{y}, the\n metacharacters (C{<>&\"}) delimiting C{x} should be passed through\n unchanged, but the textual content of C{x} should still be quoted, as\n usual. For example: C{<y><x>&amp;</x></y>}. That is the default behavior\n of L{_flattenElement} when L{escapeForContent} is passed as the\n C{dataEscaper}.\n\n However, when a L{Tag <twisted.web.template.Tag>} C{x} is flattened within\n the context of an I{attribute} of another L{Tag <twisted.web.template.Tag>}\n C{y}, then the metacharacters delimiting C{x} should be quoted so that it\n can be parsed from the attribute's value. In the DOM itself, this is not a\n valid thing to do, but given that renderers and slots may be freely moved\n around in a L{twisted.web.template} template, it is a condition which may\n arise in a document and must be handled in a way which produces valid\n output. So, for example, you should be able to get C{<y attr=\"&lt;x /&gt;\"\n />}. This should also be true for other XML/HTML meta-constructs such as\n comments and CDATA, so if you were to serialize a L{comment\n <twisted.web.template.Comment>} in an attribute you should get C{<y\n attr=\"&lt;-- comment --&gt;\" />}. Therefore in order to capture these\n meta-characters, flattening is done with C{write} callable that is wrapped\n with L{writeWithAttributeEscaping}.\n\n The final case, and hopefully the much more common one as compared to\n serializing L{Tag <twisted.web.template.Tag>} and arbitrary L{IRenderable}\n objects within an attribute, is to serialize a simple string, and those\n should be passed through for L{writeWithAttributeEscaping} to quote\n without applying a second, redundant level of quoting.\n\n @param write: A callable which will be invoked with the escaped L{bytes}.\n\n @return: A callable that writes data with escaping.\n \"\"\"\n\n def _write(data: bytes) -> None:\n write(escapeForContent(data).replace(b'\"', b\"&quot;\"))\n\n return _write\n\n\ndef escapedCDATA(data: Union[bytes, str]) -> bytes:\n \"\"\"\n Escape CDATA for inclusion in a document.\n\n @param data: The string to escape.\n\n @return: The quoted form of C{data}. If C{data} is unicode, return a utf-8\n encoded string.\n \"\"\"\n if isinstance(data, str):\n data = data.encode(\"utf-8\")\n return data.replace(b\"]]>\", b\"]]]]><![CDATA[>\")\n\n\ndef escapedComment(data: Union[bytes, str]) -> bytes:\n \"\"\"\n Within comments the sequence C{-->} can be mistaken as the end of the comment.\n To ensure consistent parsing and valid output the sequence is replaced with C{--&gt;}.\n Furthermore, whitespace is added when a comment ends in a dash. This is done to break\n the connection of the ending C{-} with the closing C{-->}.\n\n @param data: The string to escape.\n\n @return: The quoted form of C{data}. If C{data} is unicode, return a utf-8\n encoded string.\n \"\"\"\n if isinstance(data, str):\n data = data.encode(\"utf-8\")\n data = data.replace(b\"-->\", b\"--&gt;\")\n if data and data[-1:] == b\"-\":\n data += b\" \"\n return data\n\n\ndef _getSlotValue(\n name: str,\n slotData: Sequence[Optional[Mapping[str, Flattenable]]],\n default: Optional[Flattenable] = None,\n) -> Flattenable:\n \"\"\"\n Find the value of the named slot in the given stack of slot data.\n \"\"\"\n for slotFrame in slotData[::-1]:\n if slotFrame is not None and name in slotFrame:\n return slotFrame[name]\n else:\n if default is not None:\n return default\n raise UnfilledSlot(name)\n\n\ndef _fork(d: Deferred[T]) -> Deferred[T]:\n \"\"\"\n Create a new L{Deferred} based on C{d} that will fire and fail with C{d}'s\n result or error, but will not modify C{d}'s callback type.\n \"\"\"\n d2: Deferred[T] = Deferred(lambda _: d.cancel())\n\n def callback(result: T) -> T:\n d2.callback(result)\n return result\n\n def errback(failure: Failure) -> Failure:\n d2.errback(failure)\n return failure\n\n d.addCallbacks(callback, errback)\n return d2\n\n\ndef _flattenElement(\n request: Optional[IRequest],\n root: Flattenable,\n write: Callable[[bytes], object],\n slotData: List[Optional[Mapping[str, Flattenable]]],\n renderFactory: Optional[IRenderable],\n dataEscaper: Callable[[Union[bytes, str]], bytes],\n # This is annotated as Generator[T, None, None] instead of Iterator[T]\n # because mypy does not consider an Iterator to be an instance of\n # GeneratorType.\n) -> Generator[Union[Generator, Deferred[Flattenable]], None, None]:\n \"\"\"\n Make C{root} slightly more flat by yielding all its immediate contents as\n strings, deferreds or generators that are recursive calls to itself.\n\n @param request: A request object which will be passed to\n L{IRenderable.render}.\n\n @param root: An object to be made flatter. This may be of type C{unicode},\n L{str}, L{slot}, L{Tag <twisted.web.template.Tag>}, L{tuple}, L{list},\n L{types.GeneratorType}, L{Deferred}, or an object that implements\n L{IRenderable}.\n\n @param write: A callable which will be invoked with each L{bytes} produced\n by flattening C{root}.\n\n @param slotData: A L{list} of L{dict} mapping L{str} slot names to data\n with which those slots will be replaced.\n\n @param renderFactory: If not L{None}, an object that provides\n L{IRenderable}.\n\n @param dataEscaper: A 1-argument callable which takes L{bytes} or\n L{unicode} and returns L{bytes}, quoted as appropriate for the\n rendering context. This is really only one of two values:\n L{attributeEscapingDoneOutside} or L{escapeForContent}, depending on\n whether the rendering context is within an attribute or not. See the\n explanation in L{writeWithAttributeEscaping}.\n\n @return: An iterator that eventually writes L{bytes} to C{write}.\n It can yield other iterators or L{Deferred}s; if it yields another\n iterator, the caller will iterate it; if it yields a L{Deferred},\n the result of that L{Deferred} will be another generator, in which\n case it is iterated. See L{_flattenTree} for the trampoline that\n consumes said values.\n \"\"\"\n\n def keepGoing(\n newRoot: Flattenable,\n dataEscaper: Callable[[Union[bytes, str]], bytes] = dataEscaper,\n renderFactory: Optional[IRenderable] = renderFactory,\n write: Callable[[bytes], object] = write,\n ) -> Generator[Union[Flattenable, Deferred[Flattenable]], None, None]:\n return _flattenElement(\n request, newRoot, write, slotData, renderFactory, dataEscaper\n )\n\n def keepGoingAsync(result: Deferred[Flattenable]) -> Deferred[Flattenable]:\n return result.addCallback(keepGoing)\n\n if isinstance(root, (bytes, str)):\n write(dataEscaper(root))\n elif isinstance(root, slot):\n slotValue = _getSlotValue(root.name, slotData, root.default)\n yield keepGoing(slotValue)\n elif isinstance(root, CDATA):\n write(b\"<![CDATA[\")\n write(escapedCDATA(root.data))\n write(b\"]]>\")\n elif isinstance(root, Comment):\n write(b\"<!--\")\n write(escapedComment(root.data))\n write(b\"-->\")\n elif isinstance(root, Tag):\n slotData.append(root.slotData)\n rendererName = root.render\n if rendererName is not None:\n if renderFactory is None:\n raise ValueError(\n f'Tag wants to be rendered by method \"{rendererName}\" '\n f\"but is not contained in any IRenderable\"\n )\n rootClone = root.clone(False)\n rootClone.render = None\n renderMethod = renderFactory.lookupRenderMethod(rendererName)\n result = renderMethod(request, rootClone)\n yield keepGoing(result)\n slotData.pop()\n return\n\n if not root.tagName:\n yield keepGoing(root.children)\n return\n\n write(b\"<\")\n if isinstance(root.tagName, str):\n tagName = root.tagName.encode(\"ascii\")\n else:\n tagName = root.tagName\n write(tagName)\n for k, v in root.attributes.items():\n if isinstance(k, str):\n k = k.encode(\"ascii\")\n write(b\" \" + k + b'=\"')\n # Serialize the contents of the attribute, wrapping the results of\n # that serialization so that _everything_ is quoted.\n yield keepGoing(\n v, attributeEscapingDoneOutside, write=writeWithAttributeEscaping(write)\n )\n write(b'\"')\n if root.children or nativeString(tagName) not in voidElements:\n write(b\">\")\n # Regardless of whether we're in an attribute or not, switch back\n # to the escapeForContent dataEscaper. The contents of a tag must\n # be quoted no matter what; in the top-level document, just so\n # they're valid, and if they're within an attribute, they have to\n # be quoted so that after applying the *un*-quoting required to re-\n # parse the tag within the attribute, all the quoting is still\n # correct.\n yield keepGoing(root.children, escapeForContent)\n write(b\"</\" + tagName + b\">\")\n else:\n write(b\" />\")\n\n elif isinstance(root, (tuple, list, GeneratorType)):\n for element in root:\n yield keepGoing(element)\n elif isinstance(root, CharRef):\n escaped = \"&#%d;\" % (root.ordinal,)\n write(escaped.encode(\"ascii\"))\n elif isinstance(root, Deferred):\n yield keepGoingAsync(_fork(root))\n elif iscoroutine(root):\n yield keepGoingAsync(\n Deferred.fromCoroutine(\n cast(Coroutine[Deferred[Flattenable], object, Flattenable], root)\n )\n )\n elif IRenderable.providedBy(root):\n result = root.render(request)\n yield keepGoing(result, renderFactory=root)\n else:\n raise UnsupportedType(root)\n\n\nasync def _flattenTree(\n request: Optional[IRequest], root: Flattenable, write: Callable[[bytes], object]\n) -> None:\n \"\"\"\n Make C{root} into an iterable of L{bytes} and L{Deferred} by doing a depth\n first traversal of the tree.\n\n @param request: A request object which will be passed to\n L{IRenderable.render}.\n\n @param root: An object to be made flatter. This may be of type C{unicode},\n L{bytes}, L{slot}, L{Tag <twisted.web.template.Tag>}, L{tuple},\n L{list}, L{types.GeneratorType}, L{Deferred}, or something providing\n L{IRenderable}.\n\n @param write: A callable which will be invoked with each L{bytes} produced\n by flattening C{root}.\n\n @return: A C{Deferred}-returning coroutine that resolves to C{None}.\n \"\"\"\n buf = []\n bufSize = 0\n\n # Accumulate some bytes up to the buffer size so that we don't annoy the\n # upstream writer with a million tiny string.\n def bufferedWrite(bs: bytes) -> None:\n nonlocal bufSize\n buf.append(bs)\n bufSize += len(bs)\n if bufSize >= BUFFER_SIZE:\n flushBuffer()\n\n # Deliver the buffered content to the upstream writer as a single string.\n # This is how a \"big enough\" buffer gets delivered, how a buffer of any\n # size is delivered before execution is suspended to wait for an\n # asynchronous value, and how anything left in the buffer when we're\n # finished is delivered.\n def flushBuffer() -> None:\n nonlocal bufSize\n if bufSize > 0:\n write(b\"\".join(buf))\n del buf[:]\n bufSize = 0\n\n stack: List[Generator] = [\n _flattenElement(request, root, bufferedWrite, [], None, escapeForContent)\n ]\n\n while stack:\n try:\n frame = stack[-1].gi_frame\n element = next(stack[-1])\n if isinstance(element, Deferred):\n # Before suspending flattening for an unknown amount of time,\n # flush whatever data we have collected so far.\n flushBuffer()\n element = await element\n except StopIteration:\n stack.pop()\n except Exception as e:\n stack.pop()\n roots = []\n for generator in stack:\n roots.append(generator.gi_frame.f_locals[\"root\"])\n roots.append(frame.f_locals[\"root\"])\n raise FlattenerError(e, roots, extract_tb(exc_info()[2]))\n else:\n stack.append(element)\n\n # Flush any data that remains in the buffer before finishing.\n flushBuffer()\n\n\ndef flatten(\n request: Optional[IRequest], root: Flattenable, write: Callable[[bytes], object]\n) -> Deferred[None]:\n \"\"\"\n Incrementally write out a string representation of C{root} using C{write}.\n\n In order to create a string representation, C{root} will be decomposed into\n simpler objects which will themselves be decomposed and so on until strings\n or objects which can easily be converted to strings are encountered.\n\n @param request: A request object which will be passed to the C{render}\n method of any L{IRenderable} provider which is encountered.\n\n @param root: An object to be made flatter. This may be of type L{str},\n L{bytes}, L{slot}, L{Tag <twisted.web.template.Tag>}, L{tuple},\n L{list}, L{types.GeneratorType}, L{Deferred}, or something that\n provides L{IRenderable}.\n\n @param write: A callable which will be invoked with each L{bytes} produced\n by flattening C{root}.\n\n @return: A L{Deferred} which will be called back with C{None} when C{root}\n has been completely flattened into C{write} or which will be errbacked\n if an unexpected exception occurs.\n \"\"\"\n return ensureDeferred(_flattenTree(request, root, write))\n\n\ndef flattenString(request: Optional[IRequest], root: Flattenable) -> Deferred[bytes]:\n \"\"\"\n Collate a string representation of C{root} into a single string.\n\n This is basically gluing L{flatten} to an L{io.BytesIO} and returning\n the results. See L{flatten} for the exact meanings of C{request} and\n C{root}.\n\n @return: A L{Deferred} which will be called back with a single UTF-8 encoded\n string as its result when C{root} has been completely flattened or which\n will be errbacked if an unexpected exception occurs.\n \"\"\"\n io = BytesIO()\n d = flatten(request, root, io.write)\n d.addCallback(lambda _: io.getvalue())\n return cast(Deferred[bytes], d)\n", "path": "src/twisted/web/_flatten.py" } ]
diff --git a/src/twisted/newsfragments/11835.bugfix b/src/twisted/newsfragments/11835.bugfix new file mode 100644 index 00000000000..1dc8f6d0af7 --- /dev/null +++ b/src/twisted/newsfragments/11835.bugfix @@ -0,0 +1 @@ +`twisted.web.template` now avoids some unecessary evaluation of type annotations and is faster. diff --git a/src/twisted/web/_flatten.py b/src/twisted/web/_flatten.py index 276c6455083..e5eba2b6cac 100644 --- a/src/twisted/web/_flatten.py +++ b/src/twisted/web/_flatten.py @@ -6,6 +6,7 @@ Context-free flattener/serializer for rendering Python objects, possibly complex or arbitrarily nested, as strings. """ +from __future__ import annotations from inspect import iscoroutine from io import BytesIO
liqd__a4-meinberlin-2082
dashboard: district "gesamtstädtisch" ist --- in dashboard the default district is "---" and should be changed to "Gesamtstädtisch"
[ { "content": "from django import forms\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.forms import ProjectDashboardForm\nfrom adhocracy4.maps import widgets as maps_widgets\nfrom adhocracy4.projects.models import Project\nfrom meinberlin.apps.users import fields as user_fields\n\nfrom .models import ModeratorInvite\nfrom .models import ParticipantInvite\n\nUser = get_user_model()\n\n\nclass InviteForm(forms.ModelForm):\n accept = forms.CharField(required=False)\n reject = forms.CharField(required=False)\n\n def clean(self):\n data = self.data\n if 'accept' not in data and 'reject' not in data:\n raise ValidationError('Reject or accept')\n return data\n\n def is_accepted(self):\n data = self.data\n return 'accept' in data and 'reject' not in data\n\n\nclass ParticipantInviteForm(InviteForm):\n\n class Meta:\n model = ParticipantInvite\n fields = ['accept', 'reject']\n\n\nclass ModeratorInviteForm(InviteForm):\n\n class Meta:\n model = ModeratorInvite\n fields = ['accept', 'reject']\n\n\nclass InviteUsersFromEmailForm(forms.Form):\n add_users = user_fields.CommaSeparatedEmailField(\n required=False,\n label=_('Invite users via email')\n )\n\n add_users_upload = user_fields.EmailFileField(\n required=False,\n label=_('Invite users via file upload'),\n help_text=_('Upload a csv file containing email addresses.')\n )\n\n def __init__(self, *args, **kwargs):\n labels = kwargs.pop('labels', None)\n super().__init__(*args, **kwargs)\n\n if labels:\n self.fields['add_users'].label = labels[0]\n self.fields['add_users_upload'].label = labels[1]\n\n def clean(self):\n cleaned_data = super().clean()\n add_users = self.data.get('add_users')\n add_users_upload = self.files.get('add_users_upload')\n if not self.errors and not add_users and not add_users_upload:\n raise ValidationError(\n _('Please enter email addresses or upload a file'))\n return cleaned_data\n\n\nclass TopicForm(ProjectDashboardForm):\n\n class Meta:\n model = Project\n fields = ['topics']\n required_for_project_publish = ['topics']\n\n\nclass PointForm(ProjectDashboardForm):\n\n class Meta:\n model = Project\n fields = ['administrative_district', 'point']\n required_for_project_publish = []\n widgets = {\n 'point': maps_widgets.MapChoosePointWidget(\n polygon=settings.BERLIN_POLYGON)\n }\n", "path": "meinberlin/apps/projects/forms.py" } ]
[ { "content": "from django import forms\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.forms import ProjectDashboardForm\nfrom adhocracy4.maps import widgets as maps_widgets\nfrom adhocracy4.projects.models import Project\nfrom meinberlin.apps.users import fields as user_fields\n\nfrom .models import ModeratorInvite\nfrom .models import ParticipantInvite\n\nUser = get_user_model()\n\n\nclass InviteForm(forms.ModelForm):\n accept = forms.CharField(required=False)\n reject = forms.CharField(required=False)\n\n def clean(self):\n data = self.data\n if 'accept' not in data and 'reject' not in data:\n raise ValidationError('Reject or accept')\n return data\n\n def is_accepted(self):\n data = self.data\n return 'accept' in data and 'reject' not in data\n\n\nclass ParticipantInviteForm(InviteForm):\n\n class Meta:\n model = ParticipantInvite\n fields = ['accept', 'reject']\n\n\nclass ModeratorInviteForm(InviteForm):\n\n class Meta:\n model = ModeratorInvite\n fields = ['accept', 'reject']\n\n\nclass InviteUsersFromEmailForm(forms.Form):\n add_users = user_fields.CommaSeparatedEmailField(\n required=False,\n label=_('Invite users via email')\n )\n\n add_users_upload = user_fields.EmailFileField(\n required=False,\n label=_('Invite users via file upload'),\n help_text=_('Upload a csv file containing email addresses.')\n )\n\n def __init__(self, *args, **kwargs):\n labels = kwargs.pop('labels', None)\n super().__init__(*args, **kwargs)\n\n if labels:\n self.fields['add_users'].label = labels[0]\n self.fields['add_users_upload'].label = labels[1]\n\n def clean(self):\n cleaned_data = super().clean()\n add_users = self.data.get('add_users')\n add_users_upload = self.files.get('add_users_upload')\n if not self.errors and not add_users and not add_users_upload:\n raise ValidationError(\n _('Please enter email addresses or upload a file'))\n return cleaned_data\n\n\nclass TopicForm(ProjectDashboardForm):\n\n class Meta:\n model = Project\n fields = ['topics']\n required_for_project_publish = ['topics']\n\n\nclass PointForm(ProjectDashboardForm):\n\n class Meta:\n model = Project\n fields = ['administrative_district', 'point']\n required_for_project_publish = []\n widgets = {\n 'point': maps_widgets.MapChoosePointWidget(\n polygon=settings.BERLIN_POLYGON)\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['administrative_district'].empty_label = _('City wide')\n", "path": "meinberlin/apps/projects/forms.py" } ]
diff --git a/meinberlin/apps/projects/forms.py b/meinberlin/apps/projects/forms.py index 969995993c..f45b56634c 100644 --- a/meinberlin/apps/projects/forms.py +++ b/meinberlin/apps/projects/forms.py @@ -92,3 +92,7 @@ class Meta: 'point': maps_widgets.MapChoosePointWidget( polygon=settings.BERLIN_POLYGON) } + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.fields['administrative_district'].empty_label = _('City wide')
tiangolo__fastapi-681
Async class method dependency raises a ValueError **Describe the bug** If you use an async class method as a dependency, a `ValueError` is thrown. It doesn't happen for a non-async method. Complete error: `ValueError: [KeyError(<class 'coroutine'>), TypeError("'coroutine' object is not iterable"), TypeError('vars() argument must have __dict__ attribute')]` (at `fastapi/encoders.py:106`) **To Reproduce** ```py from fastapi import Depends, FastAPI from starlette.requests import Request class DependencyClass: async def async_dep(self, request: Request): return True def sync_dep(self, request: Request): return True app = FastAPI() dependency = DependencyClass() # Error @app.get('/async-dep') def async_dep(r=Depends(dependency.async_dep)): return r # Everything is fine @app.get('/sync-dep') def sync_dep(r=Depends(dependency.sync_dep)): return r ``` **Expected behavior** The async class method dependency should be called and its return value injected. **Environment:** - OS: macOS - FastAPI Version: 0.42.0 - Python version: 3.7.2 **Additional context** I believe the issue comes from here: https://github.com/tiangolo/fastapi/blob/65536cbf63318d111bf608960378d651b6c1596a/fastapi/dependencies/utils.py#L353-L359 Indeed, `inspect.isfunction(call)` will return `False` in case of a class method. Hence, it is [sent to `run_in_threadpool`](https://github.com/tiangolo/fastapi/blob/65536cbf63318d111bf608960378d651b6c1596a/fastapi/dependencies/utils.py#L453-L456), which never awaits the coroutine, and we end up trying to serialize it instead of its result (hence the `ValueError`). Changing the check by: ```py if inspect.isfunction(call) or inspect.ismethod(call): ``` solves the issue. I can make a PR with the fix and unit tests if it helps.
[ { "content": "import asyncio\nimport inspect\nfrom contextlib import contextmanager\nfrom copy import deepcopy\nfrom typing import (\n Any,\n Callable,\n Dict,\n List,\n Mapping,\n Optional,\n Sequence,\n Tuple,\n Type,\n Union,\n cast,\n)\n\nfrom fastapi import params\nfrom fastapi.concurrency import (\n AsyncExitStack,\n _fake_asynccontextmanager,\n asynccontextmanager,\n contextmanager_in_threadpool,\n)\nfrom fastapi.dependencies.models import Dependant, SecurityRequirement\nfrom fastapi.security.base import SecurityBase\nfrom fastapi.security.oauth2 import OAuth2, SecurityScopes\nfrom fastapi.security.open_id_connect_url import OpenIdConnect\nfrom fastapi.utils import get_path_param_names\nfrom pydantic import BaseConfig, BaseModel, Schema, create_model\nfrom pydantic.error_wrappers import ErrorWrapper\nfrom pydantic.errors import MissingError\nfrom pydantic.fields import Field, Required, Shape\nfrom pydantic.schema import get_annotation_from_schema\nfrom pydantic.utils import ForwardRef, evaluate_forwardref, lenient_issubclass\nfrom starlette.background import BackgroundTasks\nfrom starlette.concurrency import run_in_threadpool\nfrom starlette.datastructures import FormData, Headers, QueryParams, UploadFile\nfrom starlette.requests import Request\nfrom starlette.responses import Response\nfrom starlette.websockets import WebSocket\n\nsequence_shapes = {\n Shape.LIST,\n Shape.SET,\n Shape.TUPLE,\n Shape.SEQUENCE,\n Shape.TUPLE_ELLIPS,\n}\nsequence_types = (list, set, tuple)\nsequence_shape_to_type = {\n Shape.LIST: list,\n Shape.SET: set,\n Shape.TUPLE: tuple,\n Shape.SEQUENCE: list,\n Shape.TUPLE_ELLIPS: list,\n}\n\n\ndef get_param_sub_dependant(\n *, param: inspect.Parameter, path: str, security_scopes: List[str] = None\n) -> Dependant:\n depends: params.Depends = param.default\n if depends.dependency:\n dependency = depends.dependency\n else:\n dependency = param.annotation\n return get_sub_dependant(\n depends=depends,\n dependency=dependency,\n path=path,\n name=param.name,\n security_scopes=security_scopes,\n )\n\n\ndef get_parameterless_sub_dependant(*, depends: params.Depends, path: str) -> Dependant:\n assert callable(\n depends.dependency\n ), \"A parameter-less dependency must have a callable dependency\"\n return get_sub_dependant(depends=depends, dependency=depends.dependency, path=path)\n\n\ndef get_sub_dependant(\n *,\n depends: params.Depends,\n dependency: Callable,\n path: str,\n name: str = None,\n security_scopes: List[str] = None,\n) -> Dependant:\n security_requirement = None\n security_scopes = security_scopes or []\n if isinstance(depends, params.Security):\n dependency_scopes = depends.scopes\n security_scopes.extend(dependency_scopes)\n if isinstance(dependency, SecurityBase):\n use_scopes: List[str] = []\n if isinstance(dependency, (OAuth2, OpenIdConnect)):\n use_scopes = security_scopes\n security_requirement = SecurityRequirement(\n security_scheme=dependency, scopes=use_scopes\n )\n sub_dependant = get_dependant(\n path=path,\n call=dependency,\n name=name,\n security_scopes=security_scopes,\n use_cache=depends.use_cache,\n )\n if security_requirement:\n sub_dependant.security_requirements.append(security_requirement)\n sub_dependant.security_scopes = security_scopes\n return sub_dependant\n\n\nCacheKey = Tuple[Optional[Callable], Tuple[str, ...]]\n\n\ndef get_flat_dependant(\n dependant: Dependant, *, skip_repeats: bool = False, visited: List[CacheKey] = None\n) -> Dependant:\n if visited is None:\n visited = []\n visited.append(dependant.cache_key)\n\n flat_dependant = Dependant(\n path_params=dependant.path_params.copy(),\n query_params=dependant.query_params.copy(),\n header_params=dependant.header_params.copy(),\n cookie_params=dependant.cookie_params.copy(),\n body_params=dependant.body_params.copy(),\n security_schemes=dependant.security_requirements.copy(),\n use_cache=dependant.use_cache,\n path=dependant.path,\n )\n for sub_dependant in dependant.dependencies:\n if skip_repeats and sub_dependant.cache_key in visited:\n continue\n flat_sub = get_flat_dependant(\n sub_dependant, skip_repeats=skip_repeats, visited=visited\n )\n flat_dependant.path_params.extend(flat_sub.path_params)\n flat_dependant.query_params.extend(flat_sub.query_params)\n flat_dependant.header_params.extend(flat_sub.header_params)\n flat_dependant.cookie_params.extend(flat_sub.cookie_params)\n flat_dependant.body_params.extend(flat_sub.body_params)\n flat_dependant.security_requirements.extend(flat_sub.security_requirements)\n return flat_dependant\n\n\ndef is_scalar_field(field: Field) -> bool:\n if not (\n field.shape == Shape.SINGLETON\n and not lenient_issubclass(field.type_, BaseModel)\n and not lenient_issubclass(field.type_, sequence_types + (dict,))\n and not isinstance(field.schema, params.Body)\n ):\n return False\n if field.sub_fields:\n if not all(is_scalar_field(f) for f in field.sub_fields):\n return False\n return True\n\n\ndef is_scalar_sequence_field(field: Field) -> bool:\n if (field.shape in sequence_shapes) and not lenient_issubclass(\n field.type_, BaseModel\n ):\n if field.sub_fields is not None:\n for sub_field in field.sub_fields:\n if not is_scalar_field(sub_field):\n return False\n return True\n if lenient_issubclass(field.type_, sequence_types):\n return True\n return False\n\n\ndef get_typed_signature(call: Callable) -> inspect.Signature:\n signature = inspect.signature(call)\n globalns = getattr(call, \"__globals__\", {})\n typed_params = [\n inspect.Parameter(\n name=param.name,\n kind=param.kind,\n default=param.default,\n annotation=get_typed_annotation(param, globalns),\n )\n for param in signature.parameters.values()\n ]\n typed_signature = inspect.Signature(typed_params)\n return typed_signature\n\n\ndef get_typed_annotation(param: inspect.Parameter, globalns: Dict[str, Any]) -> Any:\n annotation = param.annotation\n if isinstance(annotation, str):\n annotation = ForwardRef(annotation)\n annotation = evaluate_forwardref(annotation, globalns, globalns)\n return annotation\n\n\nasync_contextmanager_dependencies_error = \"\"\"\nFastAPI dependencies with yield require Python 3.7 or above,\nor the backports for Python 3.6, installed with:\n pip install async-exit-stack async-generator\n\"\"\"\n\n\ndef check_dependency_contextmanagers() -> None:\n if AsyncExitStack is None or asynccontextmanager == _fake_asynccontextmanager:\n raise RuntimeError(async_contextmanager_dependencies_error) # pragma: no cover\n\n\ndef get_dependant(\n *,\n path: str,\n call: Callable,\n name: str = None,\n security_scopes: List[str] = None,\n use_cache: bool = True,\n) -> Dependant:\n path_param_names = get_path_param_names(path)\n endpoint_signature = get_typed_signature(call)\n signature_params = endpoint_signature.parameters\n if inspect.isgeneratorfunction(call) or inspect.isasyncgenfunction(call):\n check_dependency_contextmanagers()\n dependant = Dependant(call=call, name=name, path=path, use_cache=use_cache)\n for param_name, param in signature_params.items():\n if isinstance(param.default, params.Depends):\n sub_dependant = get_param_sub_dependant(\n param=param, path=path, security_scopes=security_scopes\n )\n dependant.dependencies.append(sub_dependant)\n for param_name, param in signature_params.items():\n if isinstance(param.default, params.Depends):\n continue\n if add_non_field_param_to_dependency(param=param, dependant=dependant):\n continue\n param_field = get_param_field(param=param, default_schema=params.Query)\n if param_name in path_param_names:\n assert is_scalar_field(\n field=param_field\n ), f\"Path params must be of one of the supported types\"\n if isinstance(param.default, params.Path):\n ignore_default = False\n else:\n ignore_default = True\n param_field = get_param_field(\n param=param,\n default_schema=params.Path,\n force_type=params.ParamTypes.path,\n ignore_default=ignore_default,\n )\n add_param_to_fields(field=param_field, dependant=dependant)\n elif is_scalar_field(field=param_field):\n add_param_to_fields(field=param_field, dependant=dependant)\n elif isinstance(\n param.default, (params.Query, params.Header)\n ) and is_scalar_sequence_field(param_field):\n add_param_to_fields(field=param_field, dependant=dependant)\n else:\n assert isinstance(\n param_field.schema, params.Body\n ), f\"Param: {param_field.name} can only be a request body, using Body(...)\"\n dependant.body_params.append(param_field)\n return dependant\n\n\ndef add_non_field_param_to_dependency(\n *, param: inspect.Parameter, dependant: Dependant\n) -> Optional[bool]:\n if lenient_issubclass(param.annotation, Request):\n dependant.request_param_name = param.name\n return True\n elif lenient_issubclass(param.annotation, WebSocket):\n dependant.websocket_param_name = param.name\n return True\n elif lenient_issubclass(param.annotation, Response):\n dependant.response_param_name = param.name\n return True\n elif lenient_issubclass(param.annotation, BackgroundTasks):\n dependant.background_tasks_param_name = param.name\n return True\n elif lenient_issubclass(param.annotation, SecurityScopes):\n dependant.security_scopes_param_name = param.name\n return True\n return None\n\n\ndef get_param_field(\n *,\n param: inspect.Parameter,\n default_schema: Type[params.Param] = params.Param,\n force_type: params.ParamTypes = None,\n ignore_default: bool = False,\n) -> Field:\n default_value = Required\n had_schema = False\n if not param.default == param.empty and ignore_default is False:\n default_value = param.default\n if isinstance(default_value, Schema):\n had_schema = True\n schema = default_value\n default_value = schema.default\n if isinstance(schema, params.Param) and getattr(schema, \"in_\", None) is None:\n schema.in_ = default_schema.in_\n if force_type:\n schema.in_ = force_type # type: ignore\n else:\n schema = default_schema(default_value)\n required = default_value == Required\n annotation: Any = Any\n if not param.annotation == param.empty:\n annotation = param.annotation\n annotation = get_annotation_from_schema(annotation, schema)\n if not schema.alias and getattr(schema, \"convert_underscores\", None):\n alias = param.name.replace(\"_\", \"-\")\n else:\n alias = schema.alias or param.name\n field = Field(\n name=param.name,\n type_=annotation,\n default=None if required else default_value,\n alias=alias,\n required=required,\n model_config=BaseConfig,\n class_validators={},\n schema=schema,\n )\n if not had_schema and not is_scalar_field(field=field):\n field.schema = params.Body(schema.default)\n return field\n\n\ndef add_param_to_fields(*, field: Field, dependant: Dependant) -> None:\n field.schema = cast(params.Param, field.schema)\n if field.schema.in_ == params.ParamTypes.path:\n dependant.path_params.append(field)\n elif field.schema.in_ == params.ParamTypes.query:\n dependant.query_params.append(field)\n elif field.schema.in_ == params.ParamTypes.header:\n dependant.header_params.append(field)\n else:\n assert (\n field.schema.in_ == params.ParamTypes.cookie\n ), f\"non-body parameters must be in path, query, header or cookie: {field.name}\"\n dependant.cookie_params.append(field)\n\n\ndef is_coroutine_callable(call: Callable) -> bool:\n if inspect.isfunction(call):\n return asyncio.iscoroutinefunction(call)\n if inspect.isclass(call):\n return False\n call = getattr(call, \"__call__\", None)\n return asyncio.iscoroutinefunction(call)\n\n\nasync def solve_generator(\n *, call: Callable, stack: AsyncExitStack, sub_values: Dict[str, Any]\n) -> Any:\n if inspect.isgeneratorfunction(call):\n cm = contextmanager_in_threadpool(contextmanager(call)(**sub_values))\n elif inspect.isasyncgenfunction(call):\n cm = asynccontextmanager(call)(**sub_values)\n return await stack.enter_async_context(cm)\n\n\nasync def solve_dependencies(\n *,\n request: Union[Request, WebSocket],\n dependant: Dependant,\n body: Optional[Union[Dict[str, Any], FormData]] = None,\n background_tasks: BackgroundTasks = None,\n response: Response = None,\n dependency_overrides_provider: Any = None,\n dependency_cache: Dict[Tuple[Callable, Tuple[str]], Any] = None,\n) -> Tuple[\n Dict[str, Any],\n List[ErrorWrapper],\n Optional[BackgroundTasks],\n Response,\n Dict[Tuple[Callable, Tuple[str]], Any],\n]:\n values: Dict[str, Any] = {}\n errors: List[ErrorWrapper] = []\n response = response or Response(\n content=None,\n status_code=None, # type: ignore\n headers=None,\n media_type=None,\n background=None,\n )\n dependency_cache = dependency_cache or {}\n sub_dependant: Dependant\n for sub_dependant in dependant.dependencies:\n sub_dependant.call = cast(Callable, sub_dependant.call)\n sub_dependant.cache_key = cast(\n Tuple[Callable, Tuple[str]], sub_dependant.cache_key\n )\n call = sub_dependant.call\n use_sub_dependant = sub_dependant\n if (\n dependency_overrides_provider\n and dependency_overrides_provider.dependency_overrides\n ):\n original_call = sub_dependant.call\n call = getattr(\n dependency_overrides_provider, \"dependency_overrides\", {}\n ).get(original_call, original_call)\n use_path: str = sub_dependant.path # type: ignore\n use_sub_dependant = get_dependant(\n path=use_path,\n call=call,\n name=sub_dependant.name,\n security_scopes=sub_dependant.security_scopes,\n )\n\n solved_result = await solve_dependencies(\n request=request,\n dependant=use_sub_dependant,\n body=body,\n background_tasks=background_tasks,\n response=response,\n dependency_overrides_provider=dependency_overrides_provider,\n dependency_cache=dependency_cache,\n )\n (\n sub_values,\n sub_errors,\n background_tasks,\n sub_response,\n sub_dependency_cache,\n ) = solved_result\n sub_response = cast(Response, sub_response)\n response.headers.raw.extend(sub_response.headers.raw)\n if sub_response.status_code:\n response.status_code = sub_response.status_code\n dependency_cache.update(sub_dependency_cache)\n if sub_errors:\n errors.extend(sub_errors)\n continue\n if sub_dependant.use_cache and sub_dependant.cache_key in dependency_cache:\n solved = dependency_cache[sub_dependant.cache_key]\n elif inspect.isgeneratorfunction(call) or inspect.isasyncgenfunction(call):\n stack = request.scope.get(\"fastapi_astack\")\n if stack is None:\n raise RuntimeError(\n async_contextmanager_dependencies_error\n ) # pragma: no cover\n solved = await solve_generator(\n call=call, stack=stack, sub_values=sub_values\n )\n elif is_coroutine_callable(call):\n solved = await call(**sub_values)\n else:\n solved = await run_in_threadpool(call, **sub_values)\n if sub_dependant.name is not None:\n values[sub_dependant.name] = solved\n if sub_dependant.cache_key not in dependency_cache:\n dependency_cache[sub_dependant.cache_key] = solved\n path_values, path_errors = request_params_to_args(\n dependant.path_params, request.path_params\n )\n query_values, query_errors = request_params_to_args(\n dependant.query_params, request.query_params\n )\n header_values, header_errors = request_params_to_args(\n dependant.header_params, request.headers\n )\n cookie_values, cookie_errors = request_params_to_args(\n dependant.cookie_params, request.cookies\n )\n values.update(path_values)\n values.update(query_values)\n values.update(header_values)\n values.update(cookie_values)\n errors += path_errors + query_errors + header_errors + cookie_errors\n if dependant.body_params:\n (\n body_values,\n body_errors,\n ) = await request_body_to_args( # body_params checked above\n required_params=dependant.body_params, received_body=body\n )\n values.update(body_values)\n errors.extend(body_errors)\n if dependant.request_param_name and isinstance(request, Request):\n values[dependant.request_param_name] = request\n elif dependant.websocket_param_name and isinstance(request, WebSocket):\n values[dependant.websocket_param_name] = request\n if dependant.background_tasks_param_name:\n if background_tasks is None:\n background_tasks = BackgroundTasks()\n values[dependant.background_tasks_param_name] = background_tasks\n if dependant.response_param_name:\n values[dependant.response_param_name] = response\n if dependant.security_scopes_param_name:\n values[dependant.security_scopes_param_name] = SecurityScopes(\n scopes=dependant.security_scopes\n )\n return values, errors, background_tasks, response, dependency_cache\n\n\ndef request_params_to_args(\n required_params: Sequence[Field],\n received_params: Union[Mapping[str, Any], QueryParams, Headers],\n) -> Tuple[Dict[str, Any], List[ErrorWrapper]]:\n values = {}\n errors = []\n for field in required_params:\n if is_scalar_sequence_field(field) and isinstance(\n received_params, (QueryParams, Headers)\n ):\n value = received_params.getlist(field.alias) or field.default\n else:\n value = received_params.get(field.alias)\n schema = field.schema\n assert isinstance(schema, params.Param), \"Params must be subclasses of Param\"\n if value is None:\n if field.required:\n errors.append(\n ErrorWrapper(\n MissingError(),\n loc=(schema.in_.value, field.alias),\n config=BaseConfig,\n )\n )\n else:\n values[field.name] = deepcopy(field.default)\n continue\n v_, errors_ = field.validate(value, values, loc=(schema.in_.value, field.alias))\n if isinstance(errors_, ErrorWrapper):\n errors.append(errors_)\n elif isinstance(errors_, list):\n errors.extend(errors_)\n else:\n values[field.name] = v_\n return values, errors\n\n\nasync def request_body_to_args(\n required_params: List[Field],\n received_body: Optional[Union[Dict[str, Any], FormData]],\n) -> Tuple[Dict[str, Any], List[ErrorWrapper]]:\n values = {}\n errors = []\n if required_params:\n field = required_params[0]\n embed = getattr(field.schema, \"embed\", None)\n if len(required_params) == 1 and not embed:\n received_body = {field.alias: received_body}\n for field in required_params:\n value: Any = None\n if received_body is not None:\n if field.shape in sequence_shapes and isinstance(\n received_body, FormData\n ):\n value = received_body.getlist(field.alias)\n else:\n value = received_body.get(field.alias)\n if (\n value is None\n or (isinstance(field.schema, params.Form) and value == \"\")\n or (\n isinstance(field.schema, params.Form)\n and field.shape in sequence_shapes\n and len(value) == 0\n )\n ):\n if field.required:\n errors.append(\n ErrorWrapper(\n MissingError(), loc=(\"body\", field.alias), config=BaseConfig\n )\n )\n else:\n values[field.name] = deepcopy(field.default)\n continue\n if (\n isinstance(field.schema, params.File)\n and lenient_issubclass(field.type_, bytes)\n and isinstance(value, UploadFile)\n ):\n value = await value.read()\n elif (\n field.shape in sequence_shapes\n and isinstance(field.schema, params.File)\n and lenient_issubclass(field.type_, bytes)\n and isinstance(value, sequence_types)\n ):\n awaitables = [sub_value.read() for sub_value in value]\n contents = await asyncio.gather(*awaitables)\n value = sequence_shape_to_type[field.shape](contents)\n v_, errors_ = field.validate(value, values, loc=(\"body\", field.alias))\n if isinstance(errors_, ErrorWrapper):\n errors.append(errors_)\n elif isinstance(errors_, list):\n errors.extend(errors_)\n else:\n values[field.name] = v_\n return values, errors\n\n\ndef get_schema_compatible_field(*, field: Field) -> Field:\n out_field = field\n if lenient_issubclass(field.type_, UploadFile):\n use_type: type = bytes\n if field.shape in sequence_shapes:\n use_type = List[bytes]\n out_field = Field(\n name=field.name,\n type_=use_type,\n class_validators=field.class_validators,\n model_config=field.model_config,\n default=field.default,\n required=field.required,\n alias=field.alias,\n schema=field.schema,\n )\n return out_field\n\n\ndef get_body_field(*, dependant: Dependant, name: str) -> Optional[Field]:\n flat_dependant = get_flat_dependant(dependant)\n if not flat_dependant.body_params:\n return None\n first_param = flat_dependant.body_params[0]\n embed = getattr(first_param.schema, \"embed\", None)\n if len(flat_dependant.body_params) == 1 and not embed:\n return get_schema_compatible_field(field=first_param)\n model_name = \"Body_\" + name\n BodyModel = create_model(model_name)\n for f in flat_dependant.body_params:\n BodyModel.__fields__[f.name] = get_schema_compatible_field(field=f)\n required = any(True for f in flat_dependant.body_params if f.required)\n\n BodySchema_kwargs: Dict[str, Any] = dict(default=None)\n if any(isinstance(f.schema, params.File) for f in flat_dependant.body_params):\n BodySchema: Type[params.Body] = params.File\n elif any(isinstance(f.schema, params.Form) for f in flat_dependant.body_params):\n BodySchema = params.Form\n else:\n BodySchema = params.Body\n\n body_param_media_types = [\n getattr(f.schema, \"media_type\")\n for f in flat_dependant.body_params\n if isinstance(f.schema, params.Body)\n ]\n if len(set(body_param_media_types)) == 1:\n BodySchema_kwargs[\"media_type\"] = body_param_media_types[0]\n\n field = Field(\n name=\"body\",\n type_=BodyModel,\n default=None,\n required=required,\n model_config=BaseConfig,\n class_validators={},\n alias=\"body\",\n schema=BodySchema(**BodySchema_kwargs),\n )\n return field\n", "path": "fastapi/dependencies/utils.py" } ]
[ { "content": "import asyncio\nimport inspect\nfrom contextlib import contextmanager\nfrom copy import deepcopy\nfrom typing import (\n Any,\n Callable,\n Dict,\n List,\n Mapping,\n Optional,\n Sequence,\n Tuple,\n Type,\n Union,\n cast,\n)\n\nfrom fastapi import params\nfrom fastapi.concurrency import (\n AsyncExitStack,\n _fake_asynccontextmanager,\n asynccontextmanager,\n contextmanager_in_threadpool,\n)\nfrom fastapi.dependencies.models import Dependant, SecurityRequirement\nfrom fastapi.security.base import SecurityBase\nfrom fastapi.security.oauth2 import OAuth2, SecurityScopes\nfrom fastapi.security.open_id_connect_url import OpenIdConnect\nfrom fastapi.utils import get_path_param_names\nfrom pydantic import BaseConfig, BaseModel, Schema, create_model\nfrom pydantic.error_wrappers import ErrorWrapper\nfrom pydantic.errors import MissingError\nfrom pydantic.fields import Field, Required, Shape\nfrom pydantic.schema import get_annotation_from_schema\nfrom pydantic.utils import ForwardRef, evaluate_forwardref, lenient_issubclass\nfrom starlette.background import BackgroundTasks\nfrom starlette.concurrency import run_in_threadpool\nfrom starlette.datastructures import FormData, Headers, QueryParams, UploadFile\nfrom starlette.requests import Request\nfrom starlette.responses import Response\nfrom starlette.websockets import WebSocket\n\nsequence_shapes = {\n Shape.LIST,\n Shape.SET,\n Shape.TUPLE,\n Shape.SEQUENCE,\n Shape.TUPLE_ELLIPS,\n}\nsequence_types = (list, set, tuple)\nsequence_shape_to_type = {\n Shape.LIST: list,\n Shape.SET: set,\n Shape.TUPLE: tuple,\n Shape.SEQUENCE: list,\n Shape.TUPLE_ELLIPS: list,\n}\n\n\ndef get_param_sub_dependant(\n *, param: inspect.Parameter, path: str, security_scopes: List[str] = None\n) -> Dependant:\n depends: params.Depends = param.default\n if depends.dependency:\n dependency = depends.dependency\n else:\n dependency = param.annotation\n return get_sub_dependant(\n depends=depends,\n dependency=dependency,\n path=path,\n name=param.name,\n security_scopes=security_scopes,\n )\n\n\ndef get_parameterless_sub_dependant(*, depends: params.Depends, path: str) -> Dependant:\n assert callable(\n depends.dependency\n ), \"A parameter-less dependency must have a callable dependency\"\n return get_sub_dependant(depends=depends, dependency=depends.dependency, path=path)\n\n\ndef get_sub_dependant(\n *,\n depends: params.Depends,\n dependency: Callable,\n path: str,\n name: str = None,\n security_scopes: List[str] = None,\n) -> Dependant:\n security_requirement = None\n security_scopes = security_scopes or []\n if isinstance(depends, params.Security):\n dependency_scopes = depends.scopes\n security_scopes.extend(dependency_scopes)\n if isinstance(dependency, SecurityBase):\n use_scopes: List[str] = []\n if isinstance(dependency, (OAuth2, OpenIdConnect)):\n use_scopes = security_scopes\n security_requirement = SecurityRequirement(\n security_scheme=dependency, scopes=use_scopes\n )\n sub_dependant = get_dependant(\n path=path,\n call=dependency,\n name=name,\n security_scopes=security_scopes,\n use_cache=depends.use_cache,\n )\n if security_requirement:\n sub_dependant.security_requirements.append(security_requirement)\n sub_dependant.security_scopes = security_scopes\n return sub_dependant\n\n\nCacheKey = Tuple[Optional[Callable], Tuple[str, ...]]\n\n\ndef get_flat_dependant(\n dependant: Dependant, *, skip_repeats: bool = False, visited: List[CacheKey] = None\n) -> Dependant:\n if visited is None:\n visited = []\n visited.append(dependant.cache_key)\n\n flat_dependant = Dependant(\n path_params=dependant.path_params.copy(),\n query_params=dependant.query_params.copy(),\n header_params=dependant.header_params.copy(),\n cookie_params=dependant.cookie_params.copy(),\n body_params=dependant.body_params.copy(),\n security_schemes=dependant.security_requirements.copy(),\n use_cache=dependant.use_cache,\n path=dependant.path,\n )\n for sub_dependant in dependant.dependencies:\n if skip_repeats and sub_dependant.cache_key in visited:\n continue\n flat_sub = get_flat_dependant(\n sub_dependant, skip_repeats=skip_repeats, visited=visited\n )\n flat_dependant.path_params.extend(flat_sub.path_params)\n flat_dependant.query_params.extend(flat_sub.query_params)\n flat_dependant.header_params.extend(flat_sub.header_params)\n flat_dependant.cookie_params.extend(flat_sub.cookie_params)\n flat_dependant.body_params.extend(flat_sub.body_params)\n flat_dependant.security_requirements.extend(flat_sub.security_requirements)\n return flat_dependant\n\n\ndef is_scalar_field(field: Field) -> bool:\n if not (\n field.shape == Shape.SINGLETON\n and not lenient_issubclass(field.type_, BaseModel)\n and not lenient_issubclass(field.type_, sequence_types + (dict,))\n and not isinstance(field.schema, params.Body)\n ):\n return False\n if field.sub_fields:\n if not all(is_scalar_field(f) for f in field.sub_fields):\n return False\n return True\n\n\ndef is_scalar_sequence_field(field: Field) -> bool:\n if (field.shape in sequence_shapes) and not lenient_issubclass(\n field.type_, BaseModel\n ):\n if field.sub_fields is not None:\n for sub_field in field.sub_fields:\n if not is_scalar_field(sub_field):\n return False\n return True\n if lenient_issubclass(field.type_, sequence_types):\n return True\n return False\n\n\ndef get_typed_signature(call: Callable) -> inspect.Signature:\n signature = inspect.signature(call)\n globalns = getattr(call, \"__globals__\", {})\n typed_params = [\n inspect.Parameter(\n name=param.name,\n kind=param.kind,\n default=param.default,\n annotation=get_typed_annotation(param, globalns),\n )\n for param in signature.parameters.values()\n ]\n typed_signature = inspect.Signature(typed_params)\n return typed_signature\n\n\ndef get_typed_annotation(param: inspect.Parameter, globalns: Dict[str, Any]) -> Any:\n annotation = param.annotation\n if isinstance(annotation, str):\n annotation = ForwardRef(annotation)\n annotation = evaluate_forwardref(annotation, globalns, globalns)\n return annotation\n\n\nasync_contextmanager_dependencies_error = \"\"\"\nFastAPI dependencies with yield require Python 3.7 or above,\nor the backports for Python 3.6, installed with:\n pip install async-exit-stack async-generator\n\"\"\"\n\n\ndef check_dependency_contextmanagers() -> None:\n if AsyncExitStack is None or asynccontextmanager == _fake_asynccontextmanager:\n raise RuntimeError(async_contextmanager_dependencies_error) # pragma: no cover\n\n\ndef get_dependant(\n *,\n path: str,\n call: Callable,\n name: str = None,\n security_scopes: List[str] = None,\n use_cache: bool = True,\n) -> Dependant:\n path_param_names = get_path_param_names(path)\n endpoint_signature = get_typed_signature(call)\n signature_params = endpoint_signature.parameters\n if inspect.isgeneratorfunction(call) or inspect.isasyncgenfunction(call):\n check_dependency_contextmanagers()\n dependant = Dependant(call=call, name=name, path=path, use_cache=use_cache)\n for param_name, param in signature_params.items():\n if isinstance(param.default, params.Depends):\n sub_dependant = get_param_sub_dependant(\n param=param, path=path, security_scopes=security_scopes\n )\n dependant.dependencies.append(sub_dependant)\n for param_name, param in signature_params.items():\n if isinstance(param.default, params.Depends):\n continue\n if add_non_field_param_to_dependency(param=param, dependant=dependant):\n continue\n param_field = get_param_field(param=param, default_schema=params.Query)\n if param_name in path_param_names:\n assert is_scalar_field(\n field=param_field\n ), f\"Path params must be of one of the supported types\"\n if isinstance(param.default, params.Path):\n ignore_default = False\n else:\n ignore_default = True\n param_field = get_param_field(\n param=param,\n default_schema=params.Path,\n force_type=params.ParamTypes.path,\n ignore_default=ignore_default,\n )\n add_param_to_fields(field=param_field, dependant=dependant)\n elif is_scalar_field(field=param_field):\n add_param_to_fields(field=param_field, dependant=dependant)\n elif isinstance(\n param.default, (params.Query, params.Header)\n ) and is_scalar_sequence_field(param_field):\n add_param_to_fields(field=param_field, dependant=dependant)\n else:\n assert isinstance(\n param_field.schema, params.Body\n ), f\"Param: {param_field.name} can only be a request body, using Body(...)\"\n dependant.body_params.append(param_field)\n return dependant\n\n\ndef add_non_field_param_to_dependency(\n *, param: inspect.Parameter, dependant: Dependant\n) -> Optional[bool]:\n if lenient_issubclass(param.annotation, Request):\n dependant.request_param_name = param.name\n return True\n elif lenient_issubclass(param.annotation, WebSocket):\n dependant.websocket_param_name = param.name\n return True\n elif lenient_issubclass(param.annotation, Response):\n dependant.response_param_name = param.name\n return True\n elif lenient_issubclass(param.annotation, BackgroundTasks):\n dependant.background_tasks_param_name = param.name\n return True\n elif lenient_issubclass(param.annotation, SecurityScopes):\n dependant.security_scopes_param_name = param.name\n return True\n return None\n\n\ndef get_param_field(\n *,\n param: inspect.Parameter,\n default_schema: Type[params.Param] = params.Param,\n force_type: params.ParamTypes = None,\n ignore_default: bool = False,\n) -> Field:\n default_value = Required\n had_schema = False\n if not param.default == param.empty and ignore_default is False:\n default_value = param.default\n if isinstance(default_value, Schema):\n had_schema = True\n schema = default_value\n default_value = schema.default\n if isinstance(schema, params.Param) and getattr(schema, \"in_\", None) is None:\n schema.in_ = default_schema.in_\n if force_type:\n schema.in_ = force_type # type: ignore\n else:\n schema = default_schema(default_value)\n required = default_value == Required\n annotation: Any = Any\n if not param.annotation == param.empty:\n annotation = param.annotation\n annotation = get_annotation_from_schema(annotation, schema)\n if not schema.alias and getattr(schema, \"convert_underscores\", None):\n alias = param.name.replace(\"_\", \"-\")\n else:\n alias = schema.alias or param.name\n field = Field(\n name=param.name,\n type_=annotation,\n default=None if required else default_value,\n alias=alias,\n required=required,\n model_config=BaseConfig,\n class_validators={},\n schema=schema,\n )\n if not had_schema and not is_scalar_field(field=field):\n field.schema = params.Body(schema.default)\n return field\n\n\ndef add_param_to_fields(*, field: Field, dependant: Dependant) -> None:\n field.schema = cast(params.Param, field.schema)\n if field.schema.in_ == params.ParamTypes.path:\n dependant.path_params.append(field)\n elif field.schema.in_ == params.ParamTypes.query:\n dependant.query_params.append(field)\n elif field.schema.in_ == params.ParamTypes.header:\n dependant.header_params.append(field)\n else:\n assert (\n field.schema.in_ == params.ParamTypes.cookie\n ), f\"non-body parameters must be in path, query, header or cookie: {field.name}\"\n dependant.cookie_params.append(field)\n\n\ndef is_coroutine_callable(call: Callable) -> bool:\n if inspect.isroutine(call):\n return asyncio.iscoroutinefunction(call)\n if inspect.isclass(call):\n return False\n call = getattr(call, \"__call__\", None)\n return asyncio.iscoroutinefunction(call)\n\n\nasync def solve_generator(\n *, call: Callable, stack: AsyncExitStack, sub_values: Dict[str, Any]\n) -> Any:\n if inspect.isgeneratorfunction(call):\n cm = contextmanager_in_threadpool(contextmanager(call)(**sub_values))\n elif inspect.isasyncgenfunction(call):\n cm = asynccontextmanager(call)(**sub_values)\n return await stack.enter_async_context(cm)\n\n\nasync def solve_dependencies(\n *,\n request: Union[Request, WebSocket],\n dependant: Dependant,\n body: Optional[Union[Dict[str, Any], FormData]] = None,\n background_tasks: BackgroundTasks = None,\n response: Response = None,\n dependency_overrides_provider: Any = None,\n dependency_cache: Dict[Tuple[Callable, Tuple[str]], Any] = None,\n) -> Tuple[\n Dict[str, Any],\n List[ErrorWrapper],\n Optional[BackgroundTasks],\n Response,\n Dict[Tuple[Callable, Tuple[str]], Any],\n]:\n values: Dict[str, Any] = {}\n errors: List[ErrorWrapper] = []\n response = response or Response(\n content=None,\n status_code=None, # type: ignore\n headers=None,\n media_type=None,\n background=None,\n )\n dependency_cache = dependency_cache or {}\n sub_dependant: Dependant\n for sub_dependant in dependant.dependencies:\n sub_dependant.call = cast(Callable, sub_dependant.call)\n sub_dependant.cache_key = cast(\n Tuple[Callable, Tuple[str]], sub_dependant.cache_key\n )\n call = sub_dependant.call\n use_sub_dependant = sub_dependant\n if (\n dependency_overrides_provider\n and dependency_overrides_provider.dependency_overrides\n ):\n original_call = sub_dependant.call\n call = getattr(\n dependency_overrides_provider, \"dependency_overrides\", {}\n ).get(original_call, original_call)\n use_path: str = sub_dependant.path # type: ignore\n use_sub_dependant = get_dependant(\n path=use_path,\n call=call,\n name=sub_dependant.name,\n security_scopes=sub_dependant.security_scopes,\n )\n\n solved_result = await solve_dependencies(\n request=request,\n dependant=use_sub_dependant,\n body=body,\n background_tasks=background_tasks,\n response=response,\n dependency_overrides_provider=dependency_overrides_provider,\n dependency_cache=dependency_cache,\n )\n (\n sub_values,\n sub_errors,\n background_tasks,\n sub_response,\n sub_dependency_cache,\n ) = solved_result\n sub_response = cast(Response, sub_response)\n response.headers.raw.extend(sub_response.headers.raw)\n if sub_response.status_code:\n response.status_code = sub_response.status_code\n dependency_cache.update(sub_dependency_cache)\n if sub_errors:\n errors.extend(sub_errors)\n continue\n if sub_dependant.use_cache and sub_dependant.cache_key in dependency_cache:\n solved = dependency_cache[sub_dependant.cache_key]\n elif inspect.isgeneratorfunction(call) or inspect.isasyncgenfunction(call):\n stack = request.scope.get(\"fastapi_astack\")\n if stack is None:\n raise RuntimeError(\n async_contextmanager_dependencies_error\n ) # pragma: no cover\n solved = await solve_generator(\n call=call, stack=stack, sub_values=sub_values\n )\n elif is_coroutine_callable(call):\n solved = await call(**sub_values)\n else:\n solved = await run_in_threadpool(call, **sub_values)\n if sub_dependant.name is not None:\n values[sub_dependant.name] = solved\n if sub_dependant.cache_key not in dependency_cache:\n dependency_cache[sub_dependant.cache_key] = solved\n path_values, path_errors = request_params_to_args(\n dependant.path_params, request.path_params\n )\n query_values, query_errors = request_params_to_args(\n dependant.query_params, request.query_params\n )\n header_values, header_errors = request_params_to_args(\n dependant.header_params, request.headers\n )\n cookie_values, cookie_errors = request_params_to_args(\n dependant.cookie_params, request.cookies\n )\n values.update(path_values)\n values.update(query_values)\n values.update(header_values)\n values.update(cookie_values)\n errors += path_errors + query_errors + header_errors + cookie_errors\n if dependant.body_params:\n (\n body_values,\n body_errors,\n ) = await request_body_to_args( # body_params checked above\n required_params=dependant.body_params, received_body=body\n )\n values.update(body_values)\n errors.extend(body_errors)\n if dependant.request_param_name and isinstance(request, Request):\n values[dependant.request_param_name] = request\n elif dependant.websocket_param_name and isinstance(request, WebSocket):\n values[dependant.websocket_param_name] = request\n if dependant.background_tasks_param_name:\n if background_tasks is None:\n background_tasks = BackgroundTasks()\n values[dependant.background_tasks_param_name] = background_tasks\n if dependant.response_param_name:\n values[dependant.response_param_name] = response\n if dependant.security_scopes_param_name:\n values[dependant.security_scopes_param_name] = SecurityScopes(\n scopes=dependant.security_scopes\n )\n return values, errors, background_tasks, response, dependency_cache\n\n\ndef request_params_to_args(\n required_params: Sequence[Field],\n received_params: Union[Mapping[str, Any], QueryParams, Headers],\n) -> Tuple[Dict[str, Any], List[ErrorWrapper]]:\n values = {}\n errors = []\n for field in required_params:\n if is_scalar_sequence_field(field) and isinstance(\n received_params, (QueryParams, Headers)\n ):\n value = received_params.getlist(field.alias) or field.default\n else:\n value = received_params.get(field.alias)\n schema = field.schema\n assert isinstance(schema, params.Param), \"Params must be subclasses of Param\"\n if value is None:\n if field.required:\n errors.append(\n ErrorWrapper(\n MissingError(),\n loc=(schema.in_.value, field.alias),\n config=BaseConfig,\n )\n )\n else:\n values[field.name] = deepcopy(field.default)\n continue\n v_, errors_ = field.validate(value, values, loc=(schema.in_.value, field.alias))\n if isinstance(errors_, ErrorWrapper):\n errors.append(errors_)\n elif isinstance(errors_, list):\n errors.extend(errors_)\n else:\n values[field.name] = v_\n return values, errors\n\n\nasync def request_body_to_args(\n required_params: List[Field],\n received_body: Optional[Union[Dict[str, Any], FormData]],\n) -> Tuple[Dict[str, Any], List[ErrorWrapper]]:\n values = {}\n errors = []\n if required_params:\n field = required_params[0]\n embed = getattr(field.schema, \"embed\", None)\n if len(required_params) == 1 and not embed:\n received_body = {field.alias: received_body}\n for field in required_params:\n value: Any = None\n if received_body is not None:\n if field.shape in sequence_shapes and isinstance(\n received_body, FormData\n ):\n value = received_body.getlist(field.alias)\n else:\n value = received_body.get(field.alias)\n if (\n value is None\n or (isinstance(field.schema, params.Form) and value == \"\")\n or (\n isinstance(field.schema, params.Form)\n and field.shape in sequence_shapes\n and len(value) == 0\n )\n ):\n if field.required:\n errors.append(\n ErrorWrapper(\n MissingError(), loc=(\"body\", field.alias), config=BaseConfig\n )\n )\n else:\n values[field.name] = deepcopy(field.default)\n continue\n if (\n isinstance(field.schema, params.File)\n and lenient_issubclass(field.type_, bytes)\n and isinstance(value, UploadFile)\n ):\n value = await value.read()\n elif (\n field.shape in sequence_shapes\n and isinstance(field.schema, params.File)\n and lenient_issubclass(field.type_, bytes)\n and isinstance(value, sequence_types)\n ):\n awaitables = [sub_value.read() for sub_value in value]\n contents = await asyncio.gather(*awaitables)\n value = sequence_shape_to_type[field.shape](contents)\n v_, errors_ = field.validate(value, values, loc=(\"body\", field.alias))\n if isinstance(errors_, ErrorWrapper):\n errors.append(errors_)\n elif isinstance(errors_, list):\n errors.extend(errors_)\n else:\n values[field.name] = v_\n return values, errors\n\n\ndef get_schema_compatible_field(*, field: Field) -> Field:\n out_field = field\n if lenient_issubclass(field.type_, UploadFile):\n use_type: type = bytes\n if field.shape in sequence_shapes:\n use_type = List[bytes]\n out_field = Field(\n name=field.name,\n type_=use_type,\n class_validators=field.class_validators,\n model_config=field.model_config,\n default=field.default,\n required=field.required,\n alias=field.alias,\n schema=field.schema,\n )\n return out_field\n\n\ndef get_body_field(*, dependant: Dependant, name: str) -> Optional[Field]:\n flat_dependant = get_flat_dependant(dependant)\n if not flat_dependant.body_params:\n return None\n first_param = flat_dependant.body_params[0]\n embed = getattr(first_param.schema, \"embed\", None)\n if len(flat_dependant.body_params) == 1 and not embed:\n return get_schema_compatible_field(field=first_param)\n model_name = \"Body_\" + name\n BodyModel = create_model(model_name)\n for f in flat_dependant.body_params:\n BodyModel.__fields__[f.name] = get_schema_compatible_field(field=f)\n required = any(True for f in flat_dependant.body_params if f.required)\n\n BodySchema_kwargs: Dict[str, Any] = dict(default=None)\n if any(isinstance(f.schema, params.File) for f in flat_dependant.body_params):\n BodySchema: Type[params.Body] = params.File\n elif any(isinstance(f.schema, params.Form) for f in flat_dependant.body_params):\n BodySchema = params.Form\n else:\n BodySchema = params.Body\n\n body_param_media_types = [\n getattr(f.schema, \"media_type\")\n for f in flat_dependant.body_params\n if isinstance(f.schema, params.Body)\n ]\n if len(set(body_param_media_types)) == 1:\n BodySchema_kwargs[\"media_type\"] = body_param_media_types[0]\n\n field = Field(\n name=\"body\",\n type_=BodyModel,\n default=None,\n required=required,\n model_config=BaseConfig,\n class_validators={},\n alias=\"body\",\n schema=BodySchema(**BodySchema_kwargs),\n )\n return field\n", "path": "fastapi/dependencies/utils.py" } ]
diff --git a/fastapi/dependencies/utils.py b/fastapi/dependencies/utils.py index 4745f173f0d6e..2cda78a9e9a6f 100644 --- a/fastapi/dependencies/utils.py +++ b/fastapi/dependencies/utils.py @@ -351,7 +351,7 @@ def add_param_to_fields(*, field: Field, dependant: Dependant) -> None: def is_coroutine_callable(call: Callable) -> bool: - if inspect.isfunction(call): + if inspect.isroutine(call): return asyncio.iscoroutinefunction(call) if inspect.isclass(call): return False diff --git a/tests/test_dependency_class.py b/tests/test_dependency_class.py new file mode 100644 index 0000000000000..db1f5cc8fe70a --- /dev/null +++ b/tests/test_dependency_class.py @@ -0,0 +1,70 @@ +import pytest +from fastapi import Depends, FastAPI +from starlette.testclient import TestClient + +app = FastAPI() + + +class CallableDependency: + def __call__(self, value: str) -> str: + return value + + +class AsyncCallableDependency: + async def __call__(self, value: str) -> str: + return value + + +class MethodsDependency: + def synchronous(self, value: str) -> str: + return value + + async def asynchronous(self, value: str) -> str: + return value + + +callable_dependency = CallableDependency() +async_callable_dependency = AsyncCallableDependency() +methods_dependency = MethodsDependency() + + [email protected]("/callable-dependency") +async def get_callable_dependency(value: str = Depends(callable_dependency)): + return value + + [email protected]("/async-callable-dependency") +async def get_callable_dependency(value: str = Depends(async_callable_dependency)): + return value + + [email protected]("/synchronous-method-dependency") +async def get_synchronous_method_dependency( + value: str = Depends(methods_dependency.synchronous), +): + return value + + [email protected]("/asynchronous-method-dependency") +async def get_asynchronous_method_dependency( + value: str = Depends(methods_dependency.asynchronous), +): + return value + + +client = TestClient(app) + + [email protected]( + "route,value", + [ + ("/callable-dependency", "callable-dependency"), + ("/async-callable-dependency", "async-callable-dependency"), + ("/synchronous-method-dependency", "synchronous-method-dependency"), + ("/asynchronous-method-dependency", "asynchronous-method-dependency"), + ], +) +def test_class_dependency(route, value): + response = client.get(route, params={"value": value}) + assert response.status_code == 200 + assert response.json() == value
kivy__python-for-android-1163
Openssl recipe crashes on x86 arch p4a branch: stable buildozer: 0.33 bootstrap: sdl2 kivy: 1.10.0 Error message i get: ``` arm_arch.h:46:6: error: #error "unsupported ARM architecture" ```
[ { "content": "from functools import partial\n\nfrom pythonforandroid.toolchain import Recipe, shprint, current_directory\nimport sh\n\n\nclass OpenSSLRecipe(Recipe):\n version = '1.0.2h'\n url = 'https://www.openssl.org/source/openssl-{version}.tar.gz'\n\n def should_build(self, arch):\n return not self.has_libs(arch, 'libssl' + self.version + '.so',\n 'libcrypto' + self.version + '.so')\n\n def check_symbol(self, env, sofile, symbol):\n nm = env.get('NM', 'nm')\n syms = sh.sh('-c', \"{} -gp {} | cut -d' ' -f3\".format(\n nm, sofile), _env=env).splitlines()\n if symbol in syms:\n return True\n print('{} missing symbol {}; rebuilding'.format(sofile, symbol))\n return False\n\n def get_recipe_env(self, arch=None):\n env = super(OpenSSLRecipe, self).get_recipe_env(arch)\n env['OPENSSL_VERSION'] = self.version\n env['CFLAGS'] += ' ' + env['LDFLAGS']\n env['CC'] += ' ' + env['LDFLAGS']\n return env\n\n def select_build_arch(self, arch):\n aname = arch.arch\n if 'arm64' in aname:\n return 'linux-aarch64'\n if 'v7a' in aname:\n return 'android-armv7'\n if 'arm' in aname:\n return 'android'\n return 'linux-armv4'\n\n def build_arch(self, arch):\n env = self.get_recipe_env(arch)\n with current_directory(self.get_build_dir(arch.arch)):\n # sh fails with code 255 trying to execute ./Configure\n # so instead we manually run perl passing in Configure\n perl = sh.Command('perl')\n buildarch = self.select_build_arch(arch)\n shprint(perl, 'Configure', 'shared', 'no-dso', 'no-krb5', buildarch, _env=env)\n self.apply_patch('disable-sover.patch', arch.arch)\n self.apply_patch('rename-shared-lib.patch', arch.arch)\n\n # check_ssl = partial(self.check_symbol, env, 'libssl' + self.version + '.so')\n check_crypto = partial(self.check_symbol, env, 'libcrypto' + self.version + '.so')\n while True:\n shprint(sh.make, 'build_libs', _env=env)\n if all(map(check_crypto, ('SSLeay', 'MD5_Transform', 'MD4_Init'))):\n break\n shprint(sh.make, 'clean', _env=env)\n\n self.install_libs(arch, 'libssl' + self.version + '.so',\n 'libcrypto' + self.version + '.so')\n\nrecipe = OpenSSLRecipe()\n", "path": "pythonforandroid/recipes/openssl/__init__.py" } ]
[ { "content": "from functools import partial\n\nfrom pythonforandroid.toolchain import Recipe, shprint, current_directory\nimport sh\n\n\nclass OpenSSLRecipe(Recipe):\n version = '1.0.2h'\n url = 'https://www.openssl.org/source/openssl-{version}.tar.gz'\n\n def should_build(self, arch):\n return not self.has_libs(arch, 'libssl' + self.version + '.so',\n 'libcrypto' + self.version + '.so')\n\n def check_symbol(self, env, sofile, symbol):\n nm = env.get('NM', 'nm')\n syms = sh.sh('-c', \"{} -gp {} | cut -d' ' -f3\".format(\n nm, sofile), _env=env).splitlines()\n if symbol in syms:\n return True\n print('{} missing symbol {}; rebuilding'.format(sofile, symbol))\n return False\n\n def get_recipe_env(self, arch=None):\n env = super(OpenSSLRecipe, self).get_recipe_env(arch)\n env['OPENSSL_VERSION'] = self.version\n env['CFLAGS'] += ' ' + env['LDFLAGS']\n env['CC'] += ' ' + env['LDFLAGS']\n return env\n\n def select_build_arch(self, arch):\n aname = arch.arch\n if 'arm64' in aname:\n return 'linux-aarch64'\n if 'v7a' in aname:\n return 'android-armv7'\n if 'arm' in aname:\n return 'android'\n if 'x86' in aname:\n return 'android-x86'\n return 'linux-armv4'\n\n def build_arch(self, arch):\n env = self.get_recipe_env(arch)\n with current_directory(self.get_build_dir(arch.arch)):\n # sh fails with code 255 trying to execute ./Configure\n # so instead we manually run perl passing in Configure\n perl = sh.Command('perl')\n buildarch = self.select_build_arch(arch)\n shprint(perl, 'Configure', 'shared', 'no-dso', 'no-krb5', buildarch, _env=env)\n self.apply_patch('disable-sover.patch', arch.arch)\n self.apply_patch('rename-shared-lib.patch', arch.arch)\n\n # check_ssl = partial(self.check_symbol, env, 'libssl' + self.version + '.so')\n check_crypto = partial(self.check_symbol, env, 'libcrypto' + self.version + '.so')\n while True:\n shprint(sh.make, 'build_libs', _env=env)\n if all(map(check_crypto, ('SSLeay', 'MD5_Transform', 'MD4_Init'))):\n break\n shprint(sh.make, 'clean', _env=env)\n\n self.install_libs(arch, 'libssl' + self.version + '.so',\n 'libcrypto' + self.version + '.so')\n\nrecipe = OpenSSLRecipe()\n", "path": "pythonforandroid/recipes/openssl/__init__.py" } ]
diff --git a/pythonforandroid/recipes/openssl/__init__.py b/pythonforandroid/recipes/openssl/__init__.py index 5be1cdd445..355e6f539d 100644 --- a/pythonforandroid/recipes/openssl/__init__.py +++ b/pythonforandroid/recipes/openssl/__init__.py @@ -36,6 +36,8 @@ def select_build_arch(self, arch): return 'android-armv7' if 'arm' in aname: return 'android' + if 'x86' in aname: + return 'android-x86' return 'linux-armv4' def build_arch(self, arch):
WordPress__openverse-api-556
Sound category mismatch ## Description <!-- Concisely describe the bug. --> The `sound` category for audio doesn't work on the front-end. There seems to be a mismatch between the `audio` category of `sound_effect`: If you go to `https://api.openverse.engineering/v1/audio/?q=cat&categories=sound`, you will get a 400 response: ``` HTTP 400 Bad Request Allow: GET, HEAD, OPTIONS Content-Type: application/json Vary: Accept { "detail": { "categories": [ "Invalid category: sound. Available options: {'music', 'audiobook', 'podcast', 'news', 'sound_effect'}" ] } } ``` However, if you access a single audio result, you will see that it returns `sound` for the category: https://api.openverse.engineering/v1/audio/1bb94f50-009c-4371-a605-dd289562a9f5/ ## Expectation <!-- Concisely describe what you expected to happen. --> Both the query category parameter and the result category property for sound effect should have the same name. ## Additional context The catalog sets the category as `sound`, so that is the value we get from the database: https://github.com/WordPress/openverse-catalog/blob/cb19f839e96de7ae1a55e8b7dc82a7d2bf5588e8/openverse_catalog/dags/providers/provider_api_scripts/freesound.py#L33-L34 ## Resolution <!-- Replace the [ ] with [x] to check the box. --> - [ ] 🙋 I would be interested in resolving this bug.
[ { "content": "from catalog.api.controllers.search_controller import get_sources\nfrom catalog.api.docs.media_docs import fields_to_md\nfrom catalog.api.models import AudioReport\nfrom catalog.api.models.audio import Audio\nfrom catalog.api.serializers.media_serializers import (\n MediaSearchRequestSerializer,\n MediaSearchSerializer,\n MediaSerializer,\n _validate_enum,\n)\nfrom elasticsearch_dsl.response import Hit\nfrom rest_framework import serializers\n\n\nclass AudioSetSerializer(serializers.Serializer):\n \"\"\"An audio set, rendered as a part of the ``AudioSerializer`` output.\"\"\"\n\n title = serializers.CharField(help_text=\"The name of the media.\", required=False)\n foreign_landing_url = serializers.URLField(\n required=False, help_text=\"A foreign landing link for the image.\"\n )\n\n creator = serializers.CharField(\n help_text=\"The name of the media creator.\", required=False, allow_blank=True\n )\n creator_url = serializers.URLField(\n required=False, help_text=\"A direct link to the media creator.\"\n )\n\n url = serializers.URLField(help_text=\"The actual URL to the media file.\")\n filesize = serializers.CharField(\n required=False, help_text=\"Number in bytes, e.g. 1024.\"\n )\n filetype = serializers.CharField(\n required=False,\n help_text=\"The type of the file, related to the file extension.\",\n )\n\n\nclass AudioSearchRequestSerializer(MediaSearchRequestSerializer):\n \"\"\"Parse and validate search query string parameters.\"\"\"\n\n fields_names = [\n *MediaSearchRequestSerializer.fields_names,\n \"source\",\n \"categories\",\n \"duration\",\n ]\n \"\"\"\n Keep the fields names in sync with the actual fields below as this list is\n used to generate Swagger documentation.\n \"\"\"\n\n source = serializers.CharField(\n label=\"provider\",\n help_text=\"A comma separated list of data sources to search. Valid \"\n \"inputs: \"\n f\"`{list(get_sources('audio').keys())}`\",\n required=False,\n )\n categories = serializers.CharField(\n label=\"categories\",\n help_text=\"A comma separated list of categories; available categories \"\n \"include `music`, `sound_effect`, `podcast`, `audiobook`, \"\n \"and `news`.\",\n required=False,\n )\n duration = serializers.CharField(\n label=\"duration\",\n help_text=\"A comma separated list of audio lengths; available lengths \"\n \"include `short`, and `long`.\",\n required=False,\n )\n\n @staticmethod\n def validate_source(input_sources):\n allowed_sources = list(get_sources(\"audio\").keys())\n input_sources = input_sources.split(\",\")\n input_sources = [x for x in input_sources if x in allowed_sources]\n input_sources = \",\".join(input_sources)\n return input_sources.lower()\n\n @staticmethod\n def validate_categories(value):\n valid_categories = {\n \"music\",\n \"sound_effect\",\n \"podcast\",\n \"news\",\n \"audiobook\",\n }\n _validate_enum(\"category\", valid_categories, value)\n return value.lower()\n\n @staticmethod\n def validate_duration(value):\n valid_durations = {\"short\", \"long\"} # TODO: Finalise duration filters\n _validate_enum(\"duration\", valid_durations, value)\n return value.lower()\n\n\nclass AudioSerializer(MediaSerializer):\n \"\"\"A single audio file. Used in search results.\"\"\"\n\n fields_names = [\n *MediaSerializer.fields_names,\n \"audio_set\",\n \"genre\",\n \"duration\",\n \"bit_rate\",\n \"sample_rate\",\n \"alt_files\",\n \"detail_url\",\n \"related_url\",\n \"category\",\n ]\n \"\"\"\n Keep the fields names in sync with the actual fields below as this list is\n used to generate Swagger documentation.\n \"\"\"\n\n audio_set = AudioSetSerializer(\n required=False,\n help_text=\"Reference to set of which this track is a part.\",\n read_only=True,\n )\n\n genres = serializers.ListField(\n child=serializers.CharField(),\n required=False,\n help_text=\"An array of audio genres such as \"\n \"`rock`, `electronic` for `music` category, or \"\n \"`politics`, `sport`, `education` for `podcast` category\",\n )\n\n duration = serializers.IntegerField(\n required=False, help_text=\"The time length of the audio file in milliseconds.\"\n )\n bit_rate = serializers.IntegerField(\n required=False, help_text=\"Number in bits per second, eg. 128000.\"\n )\n sample_rate = serializers.IntegerField(\n required=False, help_text=\"Number in hertz, eg. 44100.\"\n )\n\n alt_files = serializers.JSONField(\n required=False, help_text=\"JSON describing alternative files for this audio.\"\n )\n\n # Hyperlinks\n thumbnail = serializers.HyperlinkedIdentityField(\n read_only=True,\n view_name=\"audio-thumb\",\n lookup_field=\"identifier\",\n help_text=\"A direct link to the miniature artwork.\",\n )\n waveform = serializers.HyperlinkedIdentityField(\n read_only=True,\n view_name=\"audio-waveform\",\n lookup_field=\"identifier\",\n help_text=\"A direct link to the waveform peaks.\",\n )\n detail_url = serializers.HyperlinkedIdentityField(\n read_only=True,\n view_name=\"audio-detail\",\n lookup_field=\"identifier\",\n help_text=\"A direct link to the detail view of this audio file.\",\n )\n related_url = serializers.HyperlinkedIdentityField(\n read_only=True,\n view_name=\"audio-related\",\n lookup_field=\"identifier\",\n help_text=\"A link to an endpoint that provides similar audio files.\",\n )\n\n # Add-on data\n peaks = serializers.SerializerMethodField()\n\n @staticmethod\n def get_peaks(obj):\n if isinstance(obj, Hit):\n obj = Audio.objects.get(identifier=obj.identifier)\n return obj.get_waveform()\n\n\nclass AudioSearchSerializer(MediaSearchSerializer):\n \"\"\"\n The full audio search response.\n This serializer is purely representational and not actually used to\n serialize the response.\n \"\"\"\n\n results = AudioSerializer(\n many=True,\n help_text=(\n \"An array of audios and their details such as \"\n f\"{fields_to_md(AudioSerializer.fields_names)}.\"\n ),\n )\n\n\nclass AudioReportSerializer(serializers.ModelSerializer):\n class Meta:\n model = AudioReport\n fields = (\"identifier\", \"reason\", \"description\")\n read_only_fields = (\"identifier\",)\n\n def create(self, validated_data):\n if (\n validated_data[\"reason\"] == \"other\"\n and (\n \"description\" not in validated_data\n or len(validated_data[\"description\"])\n )\n < 20\n ):\n raise serializers.ValidationError(\n \"Description must be at least be 20 characters long\"\n )\n return AudioReport.objects.create(**validated_data)\n\n\nclass AudioWaveformSerializer(serializers.Serializer):\n len = serializers.SerializerMethodField()\n points = serializers.ListField(\n child=serializers.FloatField(min_value=0, max_value=1)\n )\n\n @staticmethod\n def get_len(obj) -> int:\n return len(obj.get(\"points\", []))\n", "path": "api/catalog/api/serializers/audio_serializers.py" } ]
[ { "content": "from catalog.api.controllers.search_controller import get_sources\nfrom catalog.api.docs.media_docs import fields_to_md\nfrom catalog.api.models import AudioReport\nfrom catalog.api.models.audio import Audio\nfrom catalog.api.serializers.media_serializers import (\n MediaSearchRequestSerializer,\n MediaSearchSerializer,\n MediaSerializer,\n _validate_enum,\n)\nfrom elasticsearch_dsl.response import Hit\nfrom rest_framework import serializers\n\n\nclass AudioSetSerializer(serializers.Serializer):\n \"\"\"An audio set, rendered as a part of the ``AudioSerializer`` output.\"\"\"\n\n title = serializers.CharField(help_text=\"The name of the media.\", required=False)\n foreign_landing_url = serializers.URLField(\n required=False, help_text=\"A foreign landing link for the image.\"\n )\n\n creator = serializers.CharField(\n help_text=\"The name of the media creator.\", required=False, allow_blank=True\n )\n creator_url = serializers.URLField(\n required=False, help_text=\"A direct link to the media creator.\"\n )\n\n url = serializers.URLField(help_text=\"The actual URL to the media file.\")\n filesize = serializers.CharField(\n required=False, help_text=\"Number in bytes, e.g. 1024.\"\n )\n filetype = serializers.CharField(\n required=False,\n help_text=\"The type of the file, related to the file extension.\",\n )\n\n\nclass AudioSearchRequestSerializer(MediaSearchRequestSerializer):\n \"\"\"Parse and validate search query string parameters.\"\"\"\n\n fields_names = [\n *MediaSearchRequestSerializer.fields_names,\n \"source\",\n \"categories\",\n \"duration\",\n ]\n \"\"\"\n Keep the fields names in sync with the actual fields below as this list is\n used to generate Swagger documentation.\n \"\"\"\n\n source = serializers.CharField(\n label=\"provider\",\n help_text=\"A comma separated list of data sources to search. Valid \"\n \"inputs: \"\n f\"`{list(get_sources('audio').keys())}`\",\n required=False,\n )\n categories = serializers.CharField(\n label=\"categories\",\n help_text=\"A comma separated list of categories; available categories \"\n \"include `music`, `sound_effect`, `podcast`, `audiobook`, \"\n \"and `news`.\",\n required=False,\n )\n duration = serializers.CharField(\n label=\"duration\",\n help_text=\"A comma separated list of audio lengths; available lengths \"\n \"include `short`, and `long`.\",\n required=False,\n )\n\n @staticmethod\n def validate_source(input_sources):\n allowed_sources = list(get_sources(\"audio\").keys())\n input_sources = input_sources.split(\",\")\n input_sources = [x for x in input_sources if x in allowed_sources]\n input_sources = \",\".join(input_sources)\n return input_sources.lower()\n\n @staticmethod\n def validate_categories(value):\n valid_categories = {\n \"music\",\n \"sound_effect\",\n \"podcast\",\n \"news\",\n \"audiobook\",\n \"pronunciation\",\n }\n _validate_enum(\"category\", valid_categories, value)\n return value.lower()\n\n @staticmethod\n def validate_duration(value):\n valid_durations = {\"short\", \"long\"} # TODO: Finalise duration filters\n _validate_enum(\"duration\", valid_durations, value)\n return value.lower()\n\n\nclass AudioSerializer(MediaSerializer):\n \"\"\"A single audio file. Used in search results.\"\"\"\n\n fields_names = [\n *MediaSerializer.fields_names,\n \"audio_set\",\n \"genre\",\n \"duration\",\n \"bit_rate\",\n \"sample_rate\",\n \"alt_files\",\n \"detail_url\",\n \"related_url\",\n \"category\",\n ]\n \"\"\"\n Keep the fields names in sync with the actual fields below as this list is\n used to generate Swagger documentation.\n \"\"\"\n\n audio_set = AudioSetSerializer(\n required=False,\n help_text=\"Reference to set of which this track is a part.\",\n read_only=True,\n )\n\n genres = serializers.ListField(\n child=serializers.CharField(),\n required=False,\n help_text=\"An array of audio genres such as \"\n \"`rock`, `electronic` for `music` category, or \"\n \"`politics`, `sport`, `education` for `podcast` category\",\n )\n\n duration = serializers.IntegerField(\n required=False, help_text=\"The time length of the audio file in milliseconds.\"\n )\n bit_rate = serializers.IntegerField(\n required=False, help_text=\"Number in bits per second, eg. 128000.\"\n )\n sample_rate = serializers.IntegerField(\n required=False, help_text=\"Number in hertz, eg. 44100.\"\n )\n\n alt_files = serializers.JSONField(\n required=False, help_text=\"JSON describing alternative files for this audio.\"\n )\n\n # Hyperlinks\n thumbnail = serializers.HyperlinkedIdentityField(\n read_only=True,\n view_name=\"audio-thumb\",\n lookup_field=\"identifier\",\n help_text=\"A direct link to the miniature artwork.\",\n )\n waveform = serializers.HyperlinkedIdentityField(\n read_only=True,\n view_name=\"audio-waveform\",\n lookup_field=\"identifier\",\n help_text=\"A direct link to the waveform peaks.\",\n )\n detail_url = serializers.HyperlinkedIdentityField(\n read_only=True,\n view_name=\"audio-detail\",\n lookup_field=\"identifier\",\n help_text=\"A direct link to the detail view of this audio file.\",\n )\n related_url = serializers.HyperlinkedIdentityField(\n read_only=True,\n view_name=\"audio-related\",\n lookup_field=\"identifier\",\n help_text=\"A link to an endpoint that provides similar audio files.\",\n )\n\n # Add-on data\n peaks = serializers.SerializerMethodField()\n\n @staticmethod\n def get_peaks(obj):\n if isinstance(obj, Hit):\n obj = Audio.objects.get(identifier=obj.identifier)\n return obj.get_waveform()\n\n\nclass AudioSearchSerializer(MediaSearchSerializer):\n \"\"\"\n The full audio search response.\n This serializer is purely representational and not actually used to\n serialize the response.\n \"\"\"\n\n results = AudioSerializer(\n many=True,\n help_text=(\n \"An array of audios and their details such as \"\n f\"{fields_to_md(AudioSerializer.fields_names)}.\"\n ),\n )\n\n\nclass AudioReportSerializer(serializers.ModelSerializer):\n class Meta:\n model = AudioReport\n fields = (\"identifier\", \"reason\", \"description\")\n read_only_fields = (\"identifier\",)\n\n def create(self, validated_data):\n if (\n validated_data[\"reason\"] == \"other\"\n and (\n \"description\" not in validated_data\n or len(validated_data[\"description\"])\n )\n < 20\n ):\n raise serializers.ValidationError(\n \"Description must be at least be 20 characters long\"\n )\n return AudioReport.objects.create(**validated_data)\n\n\nclass AudioWaveformSerializer(serializers.Serializer):\n len = serializers.SerializerMethodField()\n points = serializers.ListField(\n child=serializers.FloatField(min_value=0, max_value=1)\n )\n\n @staticmethod\n def get_len(obj) -> int:\n return len(obj.get(\"points\", []))\n", "path": "api/catalog/api/serializers/audio_serializers.py" } ]
diff --git a/api/catalog/api/serializers/audio_serializers.py b/api/catalog/api/serializers/audio_serializers.py index 26525c079..de8b0c25b 100644 --- a/api/catalog/api/serializers/audio_serializers.py +++ b/api/catalog/api/serializers/audio_serializers.py @@ -88,6 +88,7 @@ def validate_categories(value): "podcast", "news", "audiobook", + "pronunciation", } _validate_enum("category", valid_categories, value) return value.lower()
google__clusterfuzz-1169
_update_issue_metadata in progression task fails on OSS-Fuzz This is due to use of untrusted runner on OSS-Fuzz. Is this even needed there, currently causing exceptions. Should this be a simple bailout ?
[ { "content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test to see if test cases are fixed.\"\"\"\n\nimport time\n\nfrom base import errors\nfrom base import tasks\nfrom base import utils\nfrom bot import testcase_manager\nfrom bot.fuzzers import engine_common\nfrom bot.tasks import setup\nfrom bot.tasks import task_creation\nfrom build_management import build_manager\nfrom build_management import revisions\nfrom chrome import crash_uploader\nfrom datastore import data_handler\nfrom datastore import data_types\nfrom google_cloud_utils import big_query\nfrom metrics import logs\nfrom system import environment\n\n\ndef _write_to_bigquery(testcase, progression_range_start,\n progression_range_end):\n \"\"\"Write the fixed range to BigQuery.\"\"\"\n big_query.write_range(\n table_id='fixeds',\n testcase=testcase,\n range_name='fixed',\n start=progression_range_start,\n end=progression_range_end)\n\n\ndef _clear_progression_pending(testcase):\n \"\"\"If we marked progression as pending for this testcase, clear that state.\"\"\"\n if not testcase.get_metadata('progression_pending'):\n return\n\n testcase.delete_metadata('progression_pending', update_testcase=False)\n\n\ndef _update_completion_metadata(testcase,\n revision,\n is_crash=False,\n message=None):\n \"\"\"Update metadata the progression task completes.\"\"\"\n _clear_progression_pending(testcase)\n testcase.set_metadata('last_tested_revision', revision, update_testcase=False)\n if is_crash:\n testcase.set_metadata(\n 'last_tested_crash_revision', revision, update_testcase=False)\n testcase.set_metadata(\n 'last_tested_crash_time', utils.utcnow(), update_testcase=False)\n if not testcase.open:\n testcase.set_metadata('closed_time', utils.utcnow(), update_testcase=False)\n data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED,\n message)\n\n\ndef _log_output(revision, crash_result):\n \"\"\"Log process output.\"\"\"\n logs.log(\n 'Testing %s.' % revision,\n revision=revision,\n output=crash_result.get_stacktrace(symbolized=True))\n\n\ndef _check_fixed_for_custom_binary(testcase, job_type, testcase_file_path):\n \"\"\"Simplified fixed check for test cases using custom binaries.\"\"\"\n revision = environment.get_value('APP_REVISION')\n\n # Update comments to reflect bot information and clean up old comments.\n testcase_id = testcase.key.id()\n testcase = data_handler.get_testcase_by_id(testcase_id)\n data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)\n\n build_manager.setup_build()\n if not build_manager.check_app_path():\n testcase = data_handler.get_testcase_by_id(testcase_id)\n data_handler.update_testcase_comment(\n testcase, data_types.TaskState.ERROR,\n 'Build setup failed for custom binary')\n build_fail_wait = environment.get_value('FAIL_WAIT')\n tasks.add_task(\n 'progression', testcase_id, job_type, wait_time=build_fail_wait)\n return\n\n test_timeout = environment.get_value('TEST_TIMEOUT', 10)\n result = testcase_manager.test_for_crash_with_retries(\n testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag)\n _log_output(revision, result)\n\n # Re-fetch to finalize testcase updates in branches below.\n testcase = data_handler.get_testcase_by_id(testcase.key.id())\n\n # If this still crashes on the most recent build, it's not fixed. The task\n # will be rescheduled by a cron job and re-attempted eventually.\n if result.is_crash():\n app_path = environment.get_value('APP_PATH')\n command = testcase_manager.get_command_line_for_application(\n testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)\n symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)\n unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)\n stacktrace = utils.get_crash_stacktrace_output(\n command, symbolized_crash_stacktrace, unsymbolized_crash_stacktrace)\n testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(\n stacktrace)\n _update_completion_metadata(\n testcase,\n revision,\n is_crash=True,\n message='still crashes on latest custom build')\n return\n\n # Retry once on another bot to confirm our results and in case this bot is in\n # a bad state which we didn't catch through our usual means.\n if data_handler.is_first_retry_for_task(testcase, reset_after_retry=True):\n tasks.add_task('progression', testcase_id, job_type)\n _update_completion_metadata(testcase, revision)\n return\n\n # The bug is fixed.\n testcase.fixed = 'Yes'\n testcase.open = False\n _update_completion_metadata(\n testcase, revision, message='fixed on latest custom build')\n\n\ndef _update_issue_metadata(testcase):\n \"\"\"Update issue metadata.\"\"\"\n fuzz_target = testcase.get_fuzz_target()\n if not fuzz_target:\n return\n\n build_dir = environment.get_value('BUILD_DIR')\n target_path = engine_common.find_fuzzer_path(build_dir, fuzz_target.binary)\n if not target_path:\n logs.log_error('Failed to find target path for ' + fuzz_target.binary)\n return\n\n metadata = engine_common.get_all_issue_metadata(target_path)\n for key, value in metadata.items():\n old_value = testcase.get_metadata(key)\n if old_value != value:\n logs.log('Updating issue metadata for {} from {} to {}.'.format(\n key, old_value, value))\n testcase.set_metadata(key, value)\n\n\ndef _testcase_reproduces_in_revision(testcase,\n testcase_file_path,\n job_type,\n revision,\n update_metadata=False):\n \"\"\"Test to see if a test case reproduces in the specified revision.\"\"\"\n build_manager.setup_build(revision)\n if not build_manager.check_app_path():\n raise errors.BuildSetupError(revision, job_type)\n\n if testcase_manager.check_for_bad_build(job_type, revision):\n log_message = 'Bad build at r%d. Skipping' % revision\n testcase = data_handler.get_testcase_by_id(testcase.key.id())\n data_handler.update_testcase_comment(testcase, data_types.TaskState.WIP,\n log_message)\n raise errors.BadBuildError(revision, job_type)\n\n test_timeout = environment.get_value('TEST_TIMEOUT', 10)\n result = testcase_manager.test_for_crash_with_retries(\n testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag)\n _log_output(revision, result)\n\n if update_metadata:\n _update_issue_metadata(testcase)\n\n return result\n\n\ndef _save_current_fixed_range_indices(testcase_id, fixed_range_start,\n fixed_range_end):\n \"\"\"Save current fixed range indices in case we die in middle of task.\"\"\"\n testcase = data_handler.get_testcase_by_id(testcase_id)\n testcase.set_metadata(\n 'last_progression_min', fixed_range_start, update_testcase=False)\n testcase.set_metadata(\n 'last_progression_max', fixed_range_end, update_testcase=False)\n testcase.put()\n\n\ndef _save_fixed_range(testcase_id, min_revision, max_revision):\n \"\"\"Update a test case and other metadata with a fixed range.\"\"\"\n testcase = data_handler.get_testcase_by_id(testcase_id)\n testcase.fixed = '%d:%d' % (min_revision, max_revision)\n testcase.open = False\n\n _update_completion_metadata(\n testcase, max_revision, message='fixed in range r%s' % testcase.fixed)\n _write_to_bigquery(testcase, min_revision, max_revision)\n\n\ndef find_fixed_range(testcase_id, job_type):\n \"\"\"Attempt to find the revision range where a testcase was fixed.\"\"\"\n deadline = tasks.get_task_completion_deadline()\n testcase = data_handler.get_testcase_by_id(testcase_id)\n if not testcase:\n return\n\n if testcase.fixed:\n logs.log_error('Fixed range is already set as %s, skip.' % testcase.fixed)\n return\n\n # Setup testcase and its dependencies.\n file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)\n if not file_list:\n return\n\n # Set a flag to indicate we are running progression task. This shows pending\n # status on testcase report page and avoid conflicting testcase updates by\n # triage cron.\n testcase.set_metadata('progression_pending', True)\n\n # Custom binaries are handled as special cases.\n if build_manager.is_custom_binary():\n _check_fixed_for_custom_binary(testcase, job_type, testcase_file_path)\n return\n\n build_bucket_path = build_manager.get_primary_bucket_path()\n revision_list = build_manager.get_revisions_list(\n build_bucket_path, testcase=testcase)\n if not revision_list:\n testcase = data_handler.get_testcase_by_id(testcase_id)\n data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,\n 'Failed to fetch revision list')\n tasks.add_task('progression', testcase_id, job_type)\n return\n\n # Use min, max_index to mark the start and end of revision list that is used\n # for bisecting the progression range. Set start to the revision where noticed\n # the crash. Set end to the trunk revision. Also, use min, max from past run\n # if it timed out.\n min_revision = testcase.get_metadata('last_progression_min')\n max_revision = testcase.get_metadata('last_progression_max')\n last_tested_revision = testcase.get_metadata('last_tested_crash_revision')\n known_crash_revision = last_tested_revision or testcase.crash_revision\n if not min_revision:\n min_revision = known_crash_revision\n if not max_revision:\n max_revision = revisions.get_last_revision_in_list(revision_list)\n\n min_index = revisions.find_min_revision_index(revision_list, min_revision)\n if min_index is None:\n raise errors.BuildNotFoundError(min_revision, job_type)\n max_index = revisions.find_max_revision_index(revision_list, max_revision)\n if max_index is None:\n raise errors.BuildNotFoundError(max_revision, job_type)\n\n testcase = data_handler.get_testcase_by_id(testcase_id)\n data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED,\n 'r%d' % max_revision)\n\n # Check to see if this testcase is still crashing now. If it is, then just\n # bail out.\n result = _testcase_reproduces_in_revision(\n testcase,\n testcase_file_path,\n job_type,\n max_revision,\n update_metadata=True)\n if result.is_crash():\n logs.log('Found crash with same signature on latest revision r%d.' %\n max_revision)\n app_path = environment.get_value('APP_PATH')\n command = testcase_manager.get_command_line_for_application(\n testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)\n symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)\n unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)\n stacktrace = utils.get_crash_stacktrace_output(\n command, symbolized_crash_stacktrace, unsymbolized_crash_stacktrace)\n testcase = data_handler.get_testcase_by_id(testcase_id)\n testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(\n stacktrace)\n _update_completion_metadata(\n testcase,\n max_revision,\n is_crash=True,\n message='still crashes on latest revision r%s' % max_revision)\n\n # Since we've verified that the test case is still crashing, clear out any\n # metadata indicating potential flake from previous runs.\n task_creation.mark_unreproducible_if_flaky(testcase, False)\n\n # For chromium project, save latest crash information for later upload\n # to chromecrash/.\n state = result.get_symbolized_data()\n crash_uploader.save_crash_info_if_needed(testcase_id, max_revision,\n job_type, state.crash_type,\n state.crash_address, state.frames)\n return\n\n # Don't burden NFS server with caching these random builds.\n environment.set_value('CACHE_STORE', False)\n\n # Verify that we do crash in the min revision. This is assumed to be true\n # while we are doing the bisect.\n result = _testcase_reproduces_in_revision(testcase, testcase_file_path,\n job_type, min_revision)\n if result and not result.is_crash():\n testcase = data_handler.get_testcase_by_id(testcase_id)\n\n # Retry once on another bot to confirm our result.\n if data_handler.is_first_retry_for_task(testcase, reset_after_retry=True):\n tasks.add_task('progression', testcase_id, job_type)\n error_message = (\n 'Known crash revision %d did not crash, will retry on another bot to '\n 'confirm result' % known_crash_revision)\n data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,\n error_message)\n _update_completion_metadata(testcase, max_revision)\n return\n\n _clear_progression_pending(testcase)\n error_message = (\n 'Known crash revision %d did not crash' % known_crash_revision)\n data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,\n error_message)\n task_creation.mark_unreproducible_if_flaky(testcase, True)\n return\n\n # Start a binary search to find last non-crashing revision. At this point, we\n # know that we do crash in the min_revision, and do not crash in max_revision.\n while time.time() < deadline:\n min_revision = revision_list[min_index]\n max_revision = revision_list[max_index]\n\n # If the min and max revisions are one apart this is as much as we can\n # narrow the range.\n if max_index - min_index == 1:\n _save_fixed_range(testcase_id, min_revision, max_revision)\n return\n\n # Test the middle revision of our range.\n middle_index = (min_index + max_index) // 2\n middle_revision = revision_list[middle_index]\n\n testcase = data_handler.get_testcase_by_id(testcase_id)\n log_message = 'Testing r%d (current range %d:%d)' % (\n middle_revision, min_revision, max_revision)\n data_handler.update_testcase_comment(testcase, data_types.TaskState.WIP,\n log_message)\n\n try:\n result = _testcase_reproduces_in_revision(testcase, testcase_file_path,\n job_type, middle_revision)\n except errors.BadBuildError:\n # Skip this revision.\n del revision_list[middle_index]\n max_index -= 1\n continue\n\n if result.is_crash():\n min_index = middle_index\n else:\n max_index = middle_index\n\n _save_current_fixed_range_indices(testcase_id, revision_list[min_index],\n revision_list[max_index])\n\n # If we've broken out of the loop, we've exceeded the deadline. Recreate the\n # task to pick up where we left off.\n testcase = data_handler.get_testcase_by_id(testcase_id)\n error_message = ('Timed out, current range r%d:r%d' %\n (revision_list[min_index], revision_list[max_index]))\n data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,\n error_message)\n tasks.add_task('progression', testcase_id, job_type)\n\n\ndef execute_task(testcase_id, job_type):\n \"\"\"Execute progression task.\"\"\"\n try:\n find_fixed_range(testcase_id, job_type)\n except errors.BuildSetupError as error:\n # If we failed to setup a build, it is likely a bot error. We can retry\n # the task in this case.\n testcase = data_handler.get_testcase_by_id(testcase_id)\n error_message = 'Build setup failed r%d' % error.revision\n data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,\n error_message)\n build_fail_wait = environment.get_value('FAIL_WAIT')\n tasks.add_task(\n 'progression', testcase_id, job_type, wait_time=build_fail_wait)\n except errors.BadBuildError:\n # Though bad builds when narrowing the range are recoverable, certain builds\n # being marked as bad may be unrecoverable. Recoverable ones should not\n # reach this point.\n testcase = data_handler.get_testcase_by_id(testcase_id)\n error_message = 'Unable to recover from bad build'\n data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,\n error_message)\n", "path": "src/python/bot/tasks/progression_task.py" } ]
[ { "content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test to see if test cases are fixed.\"\"\"\n\nimport time\n\nfrom base import errors\nfrom base import tasks\nfrom base import utils\nfrom bot import testcase_manager\nfrom bot.fuzzers import engine_common\nfrom bot.tasks import setup\nfrom bot.tasks import task_creation\nfrom build_management import build_manager\nfrom build_management import revisions\nfrom chrome import crash_uploader\nfrom datastore import data_handler\nfrom datastore import data_types\nfrom google_cloud_utils import big_query\nfrom metrics import logs\nfrom system import environment\n\n\ndef _write_to_bigquery(testcase, progression_range_start,\n progression_range_end):\n \"\"\"Write the fixed range to BigQuery.\"\"\"\n big_query.write_range(\n table_id='fixeds',\n testcase=testcase,\n range_name='fixed',\n start=progression_range_start,\n end=progression_range_end)\n\n\ndef _clear_progression_pending(testcase):\n \"\"\"If we marked progression as pending for this testcase, clear that state.\"\"\"\n if not testcase.get_metadata('progression_pending'):\n return\n\n testcase.delete_metadata('progression_pending', update_testcase=False)\n\n\ndef _update_completion_metadata(testcase,\n revision,\n is_crash=False,\n message=None):\n \"\"\"Update metadata the progression task completes.\"\"\"\n _clear_progression_pending(testcase)\n testcase.set_metadata('last_tested_revision', revision, update_testcase=False)\n if is_crash:\n testcase.set_metadata(\n 'last_tested_crash_revision', revision, update_testcase=False)\n testcase.set_metadata(\n 'last_tested_crash_time', utils.utcnow(), update_testcase=False)\n if not testcase.open:\n testcase.set_metadata('closed_time', utils.utcnow(), update_testcase=False)\n data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED,\n message)\n\n\ndef _log_output(revision, crash_result):\n \"\"\"Log process output.\"\"\"\n logs.log(\n 'Testing %s.' % revision,\n revision=revision,\n output=crash_result.get_stacktrace(symbolized=True))\n\n\ndef _check_fixed_for_custom_binary(testcase, job_type, testcase_file_path):\n \"\"\"Simplified fixed check for test cases using custom binaries.\"\"\"\n revision = environment.get_value('APP_REVISION')\n\n # Update comments to reflect bot information and clean up old comments.\n testcase_id = testcase.key.id()\n testcase = data_handler.get_testcase_by_id(testcase_id)\n data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)\n\n build_manager.setup_build()\n if not build_manager.check_app_path():\n testcase = data_handler.get_testcase_by_id(testcase_id)\n data_handler.update_testcase_comment(\n testcase, data_types.TaskState.ERROR,\n 'Build setup failed for custom binary')\n build_fail_wait = environment.get_value('FAIL_WAIT')\n tasks.add_task(\n 'progression', testcase_id, job_type, wait_time=build_fail_wait)\n return\n\n test_timeout = environment.get_value('TEST_TIMEOUT', 10)\n result = testcase_manager.test_for_crash_with_retries(\n testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag)\n _log_output(revision, result)\n\n # Re-fetch to finalize testcase updates in branches below.\n testcase = data_handler.get_testcase_by_id(testcase.key.id())\n\n # If this still crashes on the most recent build, it's not fixed. The task\n # will be rescheduled by a cron job and re-attempted eventually.\n if result.is_crash():\n app_path = environment.get_value('APP_PATH')\n command = testcase_manager.get_command_line_for_application(\n testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)\n symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)\n unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)\n stacktrace = utils.get_crash_stacktrace_output(\n command, symbolized_crash_stacktrace, unsymbolized_crash_stacktrace)\n testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(\n stacktrace)\n _update_completion_metadata(\n testcase,\n revision,\n is_crash=True,\n message='still crashes on latest custom build')\n return\n\n # Retry once on another bot to confirm our results and in case this bot is in\n # a bad state which we didn't catch through our usual means.\n if data_handler.is_first_retry_for_task(testcase, reset_after_retry=True):\n tasks.add_task('progression', testcase_id, job_type)\n _update_completion_metadata(testcase, revision)\n return\n\n # The bug is fixed.\n testcase.fixed = 'Yes'\n testcase.open = False\n _update_completion_metadata(\n testcase, revision, message='fixed on latest custom build')\n\n\ndef _update_issue_metadata(testcase):\n \"\"\"Update issue metadata.\"\"\"\n if environment.is_trusted_host():\n # Not applicable.\n return\n\n fuzz_target = testcase.get_fuzz_target()\n if not fuzz_target:\n return\n\n build_dir = environment.get_value('BUILD_DIR')\n target_path = engine_common.find_fuzzer_path(build_dir, fuzz_target.binary)\n if not target_path:\n logs.log_error('Failed to find target path for ' + fuzz_target.binary)\n return\n\n metadata = engine_common.get_all_issue_metadata(target_path)\n for key, value in metadata.items():\n old_value = testcase.get_metadata(key)\n if old_value != value:\n logs.log('Updating issue metadata for {} from {} to {}.'.format(\n key, old_value, value))\n testcase.set_metadata(key, value)\n\n\ndef _testcase_reproduces_in_revision(testcase,\n testcase_file_path,\n job_type,\n revision,\n update_metadata=False):\n \"\"\"Test to see if a test case reproduces in the specified revision.\"\"\"\n build_manager.setup_build(revision)\n if not build_manager.check_app_path():\n raise errors.BuildSetupError(revision, job_type)\n\n if testcase_manager.check_for_bad_build(job_type, revision):\n log_message = 'Bad build at r%d. Skipping' % revision\n testcase = data_handler.get_testcase_by_id(testcase.key.id())\n data_handler.update_testcase_comment(testcase, data_types.TaskState.WIP,\n log_message)\n raise errors.BadBuildError(revision, job_type)\n\n test_timeout = environment.get_value('TEST_TIMEOUT', 10)\n result = testcase_manager.test_for_crash_with_retries(\n testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag)\n _log_output(revision, result)\n\n if update_metadata:\n _update_issue_metadata(testcase)\n\n return result\n\n\ndef _save_current_fixed_range_indices(testcase_id, fixed_range_start,\n fixed_range_end):\n \"\"\"Save current fixed range indices in case we die in middle of task.\"\"\"\n testcase = data_handler.get_testcase_by_id(testcase_id)\n testcase.set_metadata(\n 'last_progression_min', fixed_range_start, update_testcase=False)\n testcase.set_metadata(\n 'last_progression_max', fixed_range_end, update_testcase=False)\n testcase.put()\n\n\ndef _save_fixed_range(testcase_id, min_revision, max_revision):\n \"\"\"Update a test case and other metadata with a fixed range.\"\"\"\n testcase = data_handler.get_testcase_by_id(testcase_id)\n testcase.fixed = '%d:%d' % (min_revision, max_revision)\n testcase.open = False\n\n _update_completion_metadata(\n testcase, max_revision, message='fixed in range r%s' % testcase.fixed)\n _write_to_bigquery(testcase, min_revision, max_revision)\n\n\ndef find_fixed_range(testcase_id, job_type):\n \"\"\"Attempt to find the revision range where a testcase was fixed.\"\"\"\n deadline = tasks.get_task_completion_deadline()\n testcase = data_handler.get_testcase_by_id(testcase_id)\n if not testcase:\n return\n\n if testcase.fixed:\n logs.log_error('Fixed range is already set as %s, skip.' % testcase.fixed)\n return\n\n # Setup testcase and its dependencies.\n file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)\n if not file_list:\n return\n\n # Set a flag to indicate we are running progression task. This shows pending\n # status on testcase report page and avoid conflicting testcase updates by\n # triage cron.\n testcase.set_metadata('progression_pending', True)\n\n # Custom binaries are handled as special cases.\n if build_manager.is_custom_binary():\n _check_fixed_for_custom_binary(testcase, job_type, testcase_file_path)\n return\n\n build_bucket_path = build_manager.get_primary_bucket_path()\n revision_list = build_manager.get_revisions_list(\n build_bucket_path, testcase=testcase)\n if not revision_list:\n testcase = data_handler.get_testcase_by_id(testcase_id)\n data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,\n 'Failed to fetch revision list')\n tasks.add_task('progression', testcase_id, job_type)\n return\n\n # Use min, max_index to mark the start and end of revision list that is used\n # for bisecting the progression range. Set start to the revision where noticed\n # the crash. Set end to the trunk revision. Also, use min, max from past run\n # if it timed out.\n min_revision = testcase.get_metadata('last_progression_min')\n max_revision = testcase.get_metadata('last_progression_max')\n last_tested_revision = testcase.get_metadata('last_tested_crash_revision')\n known_crash_revision = last_tested_revision or testcase.crash_revision\n if not min_revision:\n min_revision = known_crash_revision\n if not max_revision:\n max_revision = revisions.get_last_revision_in_list(revision_list)\n\n min_index = revisions.find_min_revision_index(revision_list, min_revision)\n if min_index is None:\n raise errors.BuildNotFoundError(min_revision, job_type)\n max_index = revisions.find_max_revision_index(revision_list, max_revision)\n if max_index is None:\n raise errors.BuildNotFoundError(max_revision, job_type)\n\n testcase = data_handler.get_testcase_by_id(testcase_id)\n data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED,\n 'r%d' % max_revision)\n\n # Check to see if this testcase is still crashing now. If it is, then just\n # bail out.\n result = _testcase_reproduces_in_revision(\n testcase,\n testcase_file_path,\n job_type,\n max_revision,\n update_metadata=True)\n if result.is_crash():\n logs.log('Found crash with same signature on latest revision r%d.' %\n max_revision)\n app_path = environment.get_value('APP_PATH')\n command = testcase_manager.get_command_line_for_application(\n testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)\n symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)\n unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)\n stacktrace = utils.get_crash_stacktrace_output(\n command, symbolized_crash_stacktrace, unsymbolized_crash_stacktrace)\n testcase = data_handler.get_testcase_by_id(testcase_id)\n testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(\n stacktrace)\n _update_completion_metadata(\n testcase,\n max_revision,\n is_crash=True,\n message='still crashes on latest revision r%s' % max_revision)\n\n # Since we've verified that the test case is still crashing, clear out any\n # metadata indicating potential flake from previous runs.\n task_creation.mark_unreproducible_if_flaky(testcase, False)\n\n # For chromium project, save latest crash information for later upload\n # to chromecrash/.\n state = result.get_symbolized_data()\n crash_uploader.save_crash_info_if_needed(testcase_id, max_revision,\n job_type, state.crash_type,\n state.crash_address, state.frames)\n return\n\n # Don't burden NFS server with caching these random builds.\n environment.set_value('CACHE_STORE', False)\n\n # Verify that we do crash in the min revision. This is assumed to be true\n # while we are doing the bisect.\n result = _testcase_reproduces_in_revision(testcase, testcase_file_path,\n job_type, min_revision)\n if result and not result.is_crash():\n testcase = data_handler.get_testcase_by_id(testcase_id)\n\n # Retry once on another bot to confirm our result.\n if data_handler.is_first_retry_for_task(testcase, reset_after_retry=True):\n tasks.add_task('progression', testcase_id, job_type)\n error_message = (\n 'Known crash revision %d did not crash, will retry on another bot to '\n 'confirm result' % known_crash_revision)\n data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,\n error_message)\n _update_completion_metadata(testcase, max_revision)\n return\n\n _clear_progression_pending(testcase)\n error_message = (\n 'Known crash revision %d did not crash' % known_crash_revision)\n data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,\n error_message)\n task_creation.mark_unreproducible_if_flaky(testcase, True)\n return\n\n # Start a binary search to find last non-crashing revision. At this point, we\n # know that we do crash in the min_revision, and do not crash in max_revision.\n while time.time() < deadline:\n min_revision = revision_list[min_index]\n max_revision = revision_list[max_index]\n\n # If the min and max revisions are one apart this is as much as we can\n # narrow the range.\n if max_index - min_index == 1:\n _save_fixed_range(testcase_id, min_revision, max_revision)\n return\n\n # Test the middle revision of our range.\n middle_index = (min_index + max_index) // 2\n middle_revision = revision_list[middle_index]\n\n testcase = data_handler.get_testcase_by_id(testcase_id)\n log_message = 'Testing r%d (current range %d:%d)' % (\n middle_revision, min_revision, max_revision)\n data_handler.update_testcase_comment(testcase, data_types.TaskState.WIP,\n log_message)\n\n try:\n result = _testcase_reproduces_in_revision(testcase, testcase_file_path,\n job_type, middle_revision)\n except errors.BadBuildError:\n # Skip this revision.\n del revision_list[middle_index]\n max_index -= 1\n continue\n\n if result.is_crash():\n min_index = middle_index\n else:\n max_index = middle_index\n\n _save_current_fixed_range_indices(testcase_id, revision_list[min_index],\n revision_list[max_index])\n\n # If we've broken out of the loop, we've exceeded the deadline. Recreate the\n # task to pick up where we left off.\n testcase = data_handler.get_testcase_by_id(testcase_id)\n error_message = ('Timed out, current range r%d:r%d' %\n (revision_list[min_index], revision_list[max_index]))\n data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,\n error_message)\n tasks.add_task('progression', testcase_id, job_type)\n\n\ndef execute_task(testcase_id, job_type):\n \"\"\"Execute progression task.\"\"\"\n try:\n find_fixed_range(testcase_id, job_type)\n except errors.BuildSetupError as error:\n # If we failed to setup a build, it is likely a bot error. We can retry\n # the task in this case.\n testcase = data_handler.get_testcase_by_id(testcase_id)\n error_message = 'Build setup failed r%d' % error.revision\n data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,\n error_message)\n build_fail_wait = environment.get_value('FAIL_WAIT')\n tasks.add_task(\n 'progression', testcase_id, job_type, wait_time=build_fail_wait)\n except errors.BadBuildError:\n # Though bad builds when narrowing the range are recoverable, certain builds\n # being marked as bad may be unrecoverable. Recoverable ones should not\n # reach this point.\n testcase = data_handler.get_testcase_by_id(testcase_id)\n error_message = 'Unable to recover from bad build'\n data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,\n error_message)\n", "path": "src/python/bot/tasks/progression_task.py" } ]
diff --git a/src/python/bot/tasks/progression_task.py b/src/python/bot/tasks/progression_task.py index ad61f2d903..0a93c13b37 100644 --- a/src/python/bot/tasks/progression_task.py +++ b/src/python/bot/tasks/progression_task.py @@ -140,6 +140,10 @@ def _check_fixed_for_custom_binary(testcase, job_type, testcase_file_path): def _update_issue_metadata(testcase): """Update issue metadata.""" + if environment.is_trusted_host(): + # Not applicable. + return + fuzz_target = testcase.get_fuzz_target() if not fuzz_target: return
AUTOMATIC1111__stable-diffusion-webui-1326
New samplers are not showing up I just updated my version to try out the new samplers but they are not showing up. I deleted repositories/k-diffusion as a test but they still dont show up. Someone on reddit mentioned to do "source venv/bin/activate/" and then to do a pip uninstall k-diffusion, but I have no idea what it means. How can I get the new samplers to show up in the UI? Edit: They dont show up in the img2img Tab
[ { "content": "from collections import namedtuple\r\nimport numpy as np\r\nimport torch\r\nimport tqdm\r\nfrom PIL import Image\r\nimport inspect\r\n\r\nimport k_diffusion.sampling\r\nimport ldm.models.diffusion.ddim\r\nimport ldm.models.diffusion.plms\r\nfrom modules import prompt_parser\r\n\r\nfrom modules.shared import opts, cmd_opts, state\r\nimport modules.shared as shared\r\n\r\n\r\nSamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases'])\r\n\r\nsamplers_k_diffusion = [\r\n ('Euler a', 'sample_euler_ancestral', ['k_euler_a']),\r\n ('Euler', 'sample_euler', ['k_euler']),\r\n ('LMS', 'sample_lms', ['k_lms']),\r\n ('Heun', 'sample_heun', ['k_heun']),\r\n ('DPM2', 'sample_dpm_2', ['k_dpm_2']),\r\n ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a']),\r\n ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast']),\r\n ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad']),\r\n]\r\n\r\nsamplers_data_k_diffusion = [\r\n SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases)\r\n for label, funcname, aliases in samplers_k_diffusion\r\n if hasattr(k_diffusion.sampling, funcname)\r\n]\r\n\r\nsamplers = [\r\n *samplers_data_k_diffusion,\r\n SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), []),\r\n SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), []),\r\n]\r\nsamplers_for_img2img = [x for x in samplers if x.name not in ['PLMS', 'DPM fast', 'DPM adaptive']]\r\n\r\nsampler_extra_params = {\r\n 'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'],\r\n 'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'],\r\n 'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'],\r\n}\r\n\r\ndef setup_img2img_steps(p, steps=None):\r\n if opts.img2img_fix_steps or steps is not None:\r\n steps = int((steps or p.steps) / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0\r\n t_enc = p.steps - 1\r\n else:\r\n steps = p.steps\r\n t_enc = int(min(p.denoising_strength, 0.999) * steps)\r\n\r\n return steps, t_enc\r\n\r\n\r\ndef sample_to_image(samples):\r\n x_sample = shared.sd_model.decode_first_stage(samples[0:1].type(shared.sd_model.dtype))[0]\r\n x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)\r\n x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)\r\n x_sample = x_sample.astype(np.uint8)\r\n return Image.fromarray(x_sample)\r\n\r\n\r\ndef store_latent(decoded):\r\n state.current_latent = decoded\r\n\r\n if opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0:\r\n if not shared.parallel_processing_allowed:\r\n shared.state.current_image = sample_to_image(decoded)\r\n\r\n\r\n\r\ndef extended_tdqm(sequence, *args, desc=None, **kwargs):\r\n state.sampling_steps = len(sequence)\r\n state.sampling_step = 0\r\n\r\n for x in tqdm.tqdm(sequence, *args, desc=state.job, file=shared.progress_print_out, **kwargs):\r\n if state.interrupted:\r\n break\r\n\r\n yield x\r\n\r\n state.sampling_step += 1\r\n shared.total_tqdm.update()\r\n\r\n\r\nldm.models.diffusion.ddim.tqdm = lambda *args, desc=None, **kwargs: extended_tdqm(*args, desc=desc, **kwargs)\r\nldm.models.diffusion.plms.tqdm = lambda *args, desc=None, **kwargs: extended_tdqm(*args, desc=desc, **kwargs)\r\n\r\n\r\nclass VanillaStableDiffusionSampler:\r\n def __init__(self, constructor, sd_model):\r\n self.sampler = constructor(sd_model)\r\n self.orig_p_sample_ddim = self.sampler.p_sample_ddim if hasattr(self.sampler, 'p_sample_ddim') else self.sampler.p_sample_plms\r\n self.mask = None\r\n self.nmask = None\r\n self.init_latent = None\r\n self.sampler_noises = None\r\n self.step = 0\r\n self.eta = None\r\n self.default_eta = 0.0\r\n\r\n def number_of_needed_noises(self, p):\r\n return 0\r\n\r\n def p_sample_ddim_hook(self, x_dec, cond, ts, unconditional_conditioning, *args, **kwargs):\r\n cond = prompt_parser.reconstruct_cond_batch(cond, self.step)\r\n unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step)\r\n\r\n if self.mask is not None:\r\n img_orig = self.sampler.model.q_sample(self.init_latent, ts)\r\n x_dec = img_orig * self.mask + self.nmask * x_dec\r\n\r\n res = self.orig_p_sample_ddim(x_dec, cond, ts, unconditional_conditioning=unconditional_conditioning, *args, **kwargs)\r\n\r\n if self.mask is not None:\r\n store_latent(self.init_latent * self.mask + self.nmask * res[1])\r\n else:\r\n store_latent(res[1])\r\n\r\n self.step += 1\r\n return res\r\n\r\n def initialize(self, p):\r\n self.eta = p.eta or opts.eta_ddim\r\n\r\n for fieldname in ['p_sample_ddim', 'p_sample_plms']:\r\n if hasattr(self.sampler, fieldname):\r\n setattr(self.sampler, fieldname, self.p_sample_ddim_hook)\r\n\r\n self.mask = p.mask if hasattr(p, 'mask') else None\r\n self.nmask = p.nmask if hasattr(p, 'nmask') else None\r\n\r\n def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None):\r\n steps, t_enc = setup_img2img_steps(p, steps)\r\n\r\n self.initialize(p)\r\n\r\n # existing code fails with cetain step counts, like 9\r\n try:\r\n self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)\r\n except Exception:\r\n self.sampler.make_schedule(ddim_num_steps=steps+1, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)\r\n\r\n x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise)\r\n\r\n self.init_latent = x\r\n self.step = 0\r\n\r\n samples = self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning)\r\n\r\n return samples\r\n\r\n def sample(self, p, x, conditioning, unconditional_conditioning, steps=None):\r\n self.initialize(p)\r\n\r\n self.init_latent = None\r\n self.step = 0\r\n\r\n steps = steps or p.steps\r\n\r\n # existing code fails with cetin step counts, like 9\r\n try:\r\n samples_ddim, _ = self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)\r\n except Exception:\r\n samples_ddim, _ = self.sampler.sample(S=steps+1, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)\r\n\r\n return samples_ddim\r\n\r\n\r\nclass CFGDenoiser(torch.nn.Module):\r\n def __init__(self, model):\r\n super().__init__()\r\n self.inner_model = model\r\n self.mask = None\r\n self.nmask = None\r\n self.init_latent = None\r\n self.step = 0\r\n\r\n def forward(self, x, sigma, uncond, cond, cond_scale):\r\n cond = prompt_parser.reconstruct_cond_batch(cond, self.step)\r\n uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step)\r\n\r\n if shared.batch_cond_uncond:\r\n x_in = torch.cat([x] * 2)\r\n sigma_in = torch.cat([sigma] * 2)\r\n cond_in = torch.cat([uncond, cond])\r\n uncond, cond = self.inner_model(x_in, sigma_in, cond=cond_in).chunk(2)\r\n denoised = uncond + (cond - uncond) * cond_scale\r\n else:\r\n uncond = self.inner_model(x, sigma, cond=uncond)\r\n cond = self.inner_model(x, sigma, cond=cond)\r\n denoised = uncond + (cond - uncond) * cond_scale\r\n\r\n if self.mask is not None:\r\n denoised = self.init_latent * self.mask + self.nmask * denoised\r\n\r\n self.step += 1\r\n\r\n return denoised\r\n\r\n\r\ndef extended_trange(sampler, count, *args, **kwargs):\r\n state.sampling_steps = count\r\n state.sampling_step = 0\r\n\r\n for x in tqdm.trange(count, *args, desc=state.job, file=shared.progress_print_out, **kwargs):\r\n if state.interrupted:\r\n break\r\n\r\n if sampler.stop_at is not None and x > sampler.stop_at:\r\n break\r\n\r\n yield x\r\n\r\n state.sampling_step += 1\r\n shared.total_tqdm.update()\r\n\r\n\r\nclass TorchHijack:\r\n def __init__(self, kdiff_sampler):\r\n self.kdiff_sampler = kdiff_sampler\r\n\r\n def __getattr__(self, item):\r\n if item == 'randn_like':\r\n return self.kdiff_sampler.randn_like\r\n\r\n if hasattr(torch, item):\r\n return getattr(torch, item)\r\n\r\n raise AttributeError(\"'{}' object has no attribute '{}'\".format(type(self).__name__, item))\r\n\r\n\r\nclass KDiffusionSampler:\r\n def __init__(self, funcname, sd_model):\r\n self.model_wrap = k_diffusion.external.CompVisDenoiser(sd_model, quantize=shared.opts.enable_quantization)\r\n self.funcname = funcname\r\n self.func = getattr(k_diffusion.sampling, self.funcname)\r\n self.extra_params = sampler_extra_params.get(funcname, [])\r\n self.model_wrap_cfg = CFGDenoiser(self.model_wrap)\r\n self.sampler_noises = None\r\n self.sampler_noise_index = 0\r\n self.stop_at = None\r\n self.eta = None\r\n self.default_eta = 1.0\r\n\r\n def callback_state(self, d):\r\n store_latent(d[\"denoised\"])\r\n\r\n def number_of_needed_noises(self, p):\r\n return p.steps\r\n\r\n def randn_like(self, x):\r\n noise = self.sampler_noises[self.sampler_noise_index] if self.sampler_noises is not None and self.sampler_noise_index < len(self.sampler_noises) else None\r\n\r\n if noise is not None and x.shape == noise.shape:\r\n res = noise\r\n else:\r\n res = torch.randn_like(x)\r\n\r\n self.sampler_noise_index += 1\r\n return res\r\n\r\n def initialize(self, p):\r\n self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None\r\n self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None\r\n self.model_wrap.step = 0\r\n self.sampler_noise_index = 0\r\n self.eta = p.eta or opts.eta_ancestral\r\n\r\n if hasattr(k_diffusion.sampling, 'trange'):\r\n k_diffusion.sampling.trange = lambda *args, **kwargs: extended_trange(self, *args, **kwargs)\r\n\r\n if self.sampler_noises is not None:\r\n k_diffusion.sampling.torch = TorchHijack(self)\r\n\r\n extra_params_kwargs = {}\r\n for param_name in self.extra_params:\r\n if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters:\r\n extra_params_kwargs[param_name] = getattr(p, param_name)\r\n\r\n if 'eta' in inspect.signature(self.func).parameters:\r\n extra_params_kwargs['eta'] = self.eta\r\n\r\n return extra_params_kwargs\r\n\r\n def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None):\r\n steps, t_enc = setup_img2img_steps(p, steps)\r\n\r\n sigmas = self.model_wrap.get_sigmas(steps)\r\n\r\n noise = noise * sigmas[steps - t_enc - 1]\r\n xi = x + noise\r\n\r\n extra_params_kwargs = self.initialize(p)\r\n\r\n sigma_sched = sigmas[steps - t_enc - 1:]\r\n\r\n self.model_wrap_cfg.init_latent = x\r\n\r\n return self.func(self.model_wrap_cfg, xi, sigma_sched, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale}, disable=False, callback=self.callback_state, **extra_params_kwargs)\r\n\r\n def sample(self, p, x, conditioning, unconditional_conditioning, steps=None):\r\n steps = steps or p.steps\r\n\r\n sigmas = self.model_wrap.get_sigmas(steps)\r\n x = x * sigmas[0]\r\n\r\n extra_params_kwargs = self.initialize(p)\r\n if 'sigma_min' in inspect.signature(self.func).parameters:\r\n extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item()\r\n extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item()\r\n if 'n' in inspect.signature(self.func).parameters:\r\n extra_params_kwargs['n'] = steps\r\n else:\r\n extra_params_kwargs['sigmas'] = sigmas\r\n samples = self.func(self.model_wrap_cfg, x, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale}, disable=False, callback=self.callback_state, **extra_params_kwargs)\r\n return samples\r\n\r\n", "path": "modules/sd_samplers.py" } ]
[ { "content": "from collections import namedtuple\r\nimport numpy as np\r\nimport torch\r\nimport tqdm\r\nfrom PIL import Image\r\nimport inspect\r\nfrom modules.paths import paths\r\nsys.path.insert(0, paths[\"k_diffusion\"])\r\nimport k_diffusion.sampling\r\nimport ldm.models.diffusion.ddim\r\nimport ldm.models.diffusion.plms\r\nfrom modules import prompt_parser\r\n\r\nfrom modules.shared import opts, cmd_opts, state\r\nimport modules.shared as shared\r\n\r\n\r\nSamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases'])\r\n\r\nsamplers_k_diffusion = [\r\n ('Euler a', 'sample_euler_ancestral', ['k_euler_a']),\r\n ('Euler', 'sample_euler', ['k_euler']),\r\n ('LMS', 'sample_lms', ['k_lms']),\r\n ('Heun', 'sample_heun', ['k_heun']),\r\n ('DPM2', 'sample_dpm_2', ['k_dpm_2']),\r\n ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a']),\r\n ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast']),\r\n ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad']),\r\n]\r\n\r\nsamplers_data_k_diffusion = [\r\n SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases)\r\n for label, funcname, aliases in samplers_k_diffusion\r\n if hasattr(k_diffusion.sampling, funcname)\r\n]\r\n\r\nsamplers = [\r\n *samplers_data_k_diffusion,\r\n SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), []),\r\n SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), []),\r\n]\r\nsamplers_for_img2img = [x for x in samplers if x.name not in ['PLMS', 'DPM fast', 'DPM adaptive']]\r\n\r\nsampler_extra_params = {\r\n 'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'],\r\n 'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'],\r\n 'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'],\r\n}\r\n\r\ndef setup_img2img_steps(p, steps=None):\r\n if opts.img2img_fix_steps or steps is not None:\r\n steps = int((steps or p.steps) / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0\r\n t_enc = p.steps - 1\r\n else:\r\n steps = p.steps\r\n t_enc = int(min(p.denoising_strength, 0.999) * steps)\r\n\r\n return steps, t_enc\r\n\r\n\r\ndef sample_to_image(samples):\r\n x_sample = shared.sd_model.decode_first_stage(samples[0:1].type(shared.sd_model.dtype))[0]\r\n x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)\r\n x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)\r\n x_sample = x_sample.astype(np.uint8)\r\n return Image.fromarray(x_sample)\r\n\r\n\r\ndef store_latent(decoded):\r\n state.current_latent = decoded\r\n\r\n if opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0:\r\n if not shared.parallel_processing_allowed:\r\n shared.state.current_image = sample_to_image(decoded)\r\n\r\n\r\n\r\ndef extended_tdqm(sequence, *args, desc=None, **kwargs):\r\n state.sampling_steps = len(sequence)\r\n state.sampling_step = 0\r\n\r\n for x in tqdm.tqdm(sequence, *args, desc=state.job, file=shared.progress_print_out, **kwargs):\r\n if state.interrupted:\r\n break\r\n\r\n yield x\r\n\r\n state.sampling_step += 1\r\n shared.total_tqdm.update()\r\n\r\n\r\nldm.models.diffusion.ddim.tqdm = lambda *args, desc=None, **kwargs: extended_tdqm(*args, desc=desc, **kwargs)\r\nldm.models.diffusion.plms.tqdm = lambda *args, desc=None, **kwargs: extended_tdqm(*args, desc=desc, **kwargs)\r\n\r\n\r\nclass VanillaStableDiffusionSampler:\r\n def __init__(self, constructor, sd_model):\r\n self.sampler = constructor(sd_model)\r\n self.orig_p_sample_ddim = self.sampler.p_sample_ddim if hasattr(self.sampler, 'p_sample_ddim') else self.sampler.p_sample_plms\r\n self.mask = None\r\n self.nmask = None\r\n self.init_latent = None\r\n self.sampler_noises = None\r\n self.step = 0\r\n self.eta = None\r\n self.default_eta = 0.0\r\n\r\n def number_of_needed_noises(self, p):\r\n return 0\r\n\r\n def p_sample_ddim_hook(self, x_dec, cond, ts, unconditional_conditioning, *args, **kwargs):\r\n cond = prompt_parser.reconstruct_cond_batch(cond, self.step)\r\n unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step)\r\n\r\n if self.mask is not None:\r\n img_orig = self.sampler.model.q_sample(self.init_latent, ts)\r\n x_dec = img_orig * self.mask + self.nmask * x_dec\r\n\r\n res = self.orig_p_sample_ddim(x_dec, cond, ts, unconditional_conditioning=unconditional_conditioning, *args, **kwargs)\r\n\r\n if self.mask is not None:\r\n store_latent(self.init_latent * self.mask + self.nmask * res[1])\r\n else:\r\n store_latent(res[1])\r\n\r\n self.step += 1\r\n return res\r\n\r\n def initialize(self, p):\r\n self.eta = p.eta or opts.eta_ddim\r\n\r\n for fieldname in ['p_sample_ddim', 'p_sample_plms']:\r\n if hasattr(self.sampler, fieldname):\r\n setattr(self.sampler, fieldname, self.p_sample_ddim_hook)\r\n\r\n self.mask = p.mask if hasattr(p, 'mask') else None\r\n self.nmask = p.nmask if hasattr(p, 'nmask') else None\r\n\r\n def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None):\r\n steps, t_enc = setup_img2img_steps(p, steps)\r\n\r\n self.initialize(p)\r\n\r\n # existing code fails with cetain step counts, like 9\r\n try:\r\n self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)\r\n except Exception:\r\n self.sampler.make_schedule(ddim_num_steps=steps+1, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)\r\n\r\n x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise)\r\n\r\n self.init_latent = x\r\n self.step = 0\r\n\r\n samples = self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning)\r\n\r\n return samples\r\n\r\n def sample(self, p, x, conditioning, unconditional_conditioning, steps=None):\r\n self.initialize(p)\r\n\r\n self.init_latent = None\r\n self.step = 0\r\n\r\n steps = steps or p.steps\r\n\r\n # existing code fails with cetin step counts, like 9\r\n try:\r\n samples_ddim, _ = self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)\r\n except Exception:\r\n samples_ddim, _ = self.sampler.sample(S=steps+1, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)\r\n\r\n return samples_ddim\r\n\r\n\r\nclass CFGDenoiser(torch.nn.Module):\r\n def __init__(self, model):\r\n super().__init__()\r\n self.inner_model = model\r\n self.mask = None\r\n self.nmask = None\r\n self.init_latent = None\r\n self.step = 0\r\n\r\n def forward(self, x, sigma, uncond, cond, cond_scale):\r\n cond = prompt_parser.reconstruct_cond_batch(cond, self.step)\r\n uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step)\r\n\r\n if shared.batch_cond_uncond:\r\n x_in = torch.cat([x] * 2)\r\n sigma_in = torch.cat([sigma] * 2)\r\n cond_in = torch.cat([uncond, cond])\r\n uncond, cond = self.inner_model(x_in, sigma_in, cond=cond_in).chunk(2)\r\n denoised = uncond + (cond - uncond) * cond_scale\r\n else:\r\n uncond = self.inner_model(x, sigma, cond=uncond)\r\n cond = self.inner_model(x, sigma, cond=cond)\r\n denoised = uncond + (cond - uncond) * cond_scale\r\n\r\n if self.mask is not None:\r\n denoised = self.init_latent * self.mask + self.nmask * denoised\r\n\r\n self.step += 1\r\n\r\n return denoised\r\n\r\n\r\ndef extended_trange(sampler, count, *args, **kwargs):\r\n state.sampling_steps = count\r\n state.sampling_step = 0\r\n\r\n for x in tqdm.trange(count, *args, desc=state.job, file=shared.progress_print_out, **kwargs):\r\n if state.interrupted:\r\n break\r\n\r\n if sampler.stop_at is not None and x > sampler.stop_at:\r\n break\r\n\r\n yield x\r\n\r\n state.sampling_step += 1\r\n shared.total_tqdm.update()\r\n\r\n\r\nclass TorchHijack:\r\n def __init__(self, kdiff_sampler):\r\n self.kdiff_sampler = kdiff_sampler\r\n\r\n def __getattr__(self, item):\r\n if item == 'randn_like':\r\n return self.kdiff_sampler.randn_like\r\n\r\n if hasattr(torch, item):\r\n return getattr(torch, item)\r\n\r\n raise AttributeError(\"'{}' object has no attribute '{}'\".format(type(self).__name__, item))\r\n\r\n\r\nclass KDiffusionSampler:\r\n def __init__(self, funcname, sd_model):\r\n self.model_wrap = k_diffusion.external.CompVisDenoiser(sd_model, quantize=shared.opts.enable_quantization)\r\n self.funcname = funcname\r\n self.func = getattr(k_diffusion.sampling, self.funcname)\r\n self.extra_params = sampler_extra_params.get(funcname, [])\r\n self.model_wrap_cfg = CFGDenoiser(self.model_wrap)\r\n self.sampler_noises = None\r\n self.sampler_noise_index = 0\r\n self.stop_at = None\r\n self.eta = None\r\n self.default_eta = 1.0\r\n\r\n def callback_state(self, d):\r\n store_latent(d[\"denoised\"])\r\n\r\n def number_of_needed_noises(self, p):\r\n return p.steps\r\n\r\n def randn_like(self, x):\r\n noise = self.sampler_noises[self.sampler_noise_index] if self.sampler_noises is not None and self.sampler_noise_index < len(self.sampler_noises) else None\r\n\r\n if noise is not None and x.shape == noise.shape:\r\n res = noise\r\n else:\r\n res = torch.randn_like(x)\r\n\r\n self.sampler_noise_index += 1\r\n return res\r\n\r\n def initialize(self, p):\r\n self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None\r\n self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None\r\n self.model_wrap.step = 0\r\n self.sampler_noise_index = 0\r\n self.eta = p.eta or opts.eta_ancestral\r\n\r\n if hasattr(k_diffusion.sampling, 'trange'):\r\n k_diffusion.sampling.trange = lambda *args, **kwargs: extended_trange(self, *args, **kwargs)\r\n\r\n if self.sampler_noises is not None:\r\n k_diffusion.sampling.torch = TorchHijack(self)\r\n\r\n extra_params_kwargs = {}\r\n for param_name in self.extra_params:\r\n if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters:\r\n extra_params_kwargs[param_name] = getattr(p, param_name)\r\n\r\n if 'eta' in inspect.signature(self.func).parameters:\r\n extra_params_kwargs['eta'] = self.eta\r\n\r\n return extra_params_kwargs\r\n\r\n def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None):\r\n steps, t_enc = setup_img2img_steps(p, steps)\r\n\r\n sigmas = self.model_wrap.get_sigmas(steps)\r\n\r\n noise = noise * sigmas[steps - t_enc - 1]\r\n xi = x + noise\r\n\r\n extra_params_kwargs = self.initialize(p)\r\n\r\n sigma_sched = sigmas[steps - t_enc - 1:]\r\n\r\n self.model_wrap_cfg.init_latent = x\r\n\r\n return self.func(self.model_wrap_cfg, xi, sigma_sched, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale}, disable=False, callback=self.callback_state, **extra_params_kwargs)\r\n\r\n def sample(self, p, x, conditioning, unconditional_conditioning, steps=None):\r\n steps = steps or p.steps\r\n\r\n sigmas = self.model_wrap.get_sigmas(steps)\r\n x = x * sigmas[0]\r\n\r\n extra_params_kwargs = self.initialize(p)\r\n if 'sigma_min' in inspect.signature(self.func).parameters:\r\n extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item()\r\n extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item()\r\n if 'n' in inspect.signature(self.func).parameters:\r\n extra_params_kwargs['n'] = steps\r\n else:\r\n extra_params_kwargs['sigmas'] = sigmas\r\n samples = self.func(self.model_wrap_cfg, x, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale}, disable=False, callback=self.callback_state, **extra_params_kwargs)\r\n return samples\r\n\r\n", "path": "modules/sd_samplers.py" } ]
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 5642b870cdc..5e60e494b90 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -4,7 +4,8 @@ import tqdm from PIL import Image import inspect - +from modules.paths import paths +sys.path.insert(0, paths["k_diffusion"]) import k_diffusion.sampling import ldm.models.diffusion.ddim import ldm.models.diffusion.plms
jazzband__pip-tools-488
Providing a source file which does not have an extension writes to a .txt file in current dir If you happen to use an extensionless filename as a source of requirements in, pip-compile will deduce the wrong output filename and generate a ".txt" file (relative file, thus at current working dir). ##### Environment Versions 1. OS Type Should be cross platform, but observed on Windows 1. Python version: `$ python -V` 2.7.8 1. pip version: `$ pip --version` 9.0.1 1. pip-tools version: `$ pip-compile --version` 1.8.1rc2 ##### Steps to replicate 1. echo "request" > no_extension 2. pip-compile no_extension ##### Expected result We should error out because it is difficult to deduce a new name if there is no extension to remove. ##### Actual result a .txt file is generated at the current directory.
[ { "content": "# coding: utf-8\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport optparse\nimport os\nimport sys\nimport tempfile\n\nimport pip\nfrom pip.req import InstallRequirement, parse_requirements\n\nfrom .. import click\nfrom ..exceptions import PipToolsError\nfrom ..logging import log\nfrom ..repositories import LocalRequirementsRepository, PyPIRepository\nfrom ..resolver import Resolver\nfrom ..utils import (assert_compatible_pip_version, is_pinned_requirement,\n key_from_req, dedup)\nfrom ..writer import OutputWriter\n\n# Make sure we're using a compatible version of pip\nassert_compatible_pip_version()\n\nDEFAULT_REQUIREMENTS_FILE = 'requirements.in'\n\n\nclass PipCommand(pip.basecommand.Command):\n name = 'PipCommand'\n\n\[email protected]()\[email protected]_option()\[email protected]('-v', '--verbose', is_flag=True, help=\"Show more output\")\[email protected]('-n', '--dry-run', is_flag=True, help=\"Only show what would happen, don't change anything\")\[email protected]('-p', '--pre', is_flag=True, default=None, help=\"Allow resolving to prereleases (default is not)\")\[email protected]('-r', '--rebuild', is_flag=True, help=\"Clear any caches upfront, rebuild from scratch\")\[email protected]('-f', '--find-links', multiple=True, help=\"Look for archives in this directory or on this HTML page\", envvar='PIP_FIND_LINKS') # noqa\[email protected]('-i', '--index-url', help=\"Change index URL (defaults to PyPI)\", envvar='PIP_INDEX_URL')\[email protected]('--extra-index-url', multiple=True, help=\"Add additional index URL to search\", envvar='PIP_EXTRA_INDEX_URL') # noqa\[email protected]('--client-cert', help=\"Path to SSL client certificate, a single file containing the private key and the certificate in PEM format.\") # noqa\[email protected]('--trusted-host', multiple=True, envvar='PIP_TRUSTED_HOST',\n help=\"Mark this host as trusted, even though it does not have \"\n \"valid or any HTTPS.\")\[email protected]('--header/--no-header', is_flag=True, default=True,\n help=\"Add header to generated file\")\[email protected]('--index/--no-index', is_flag=True, default=True,\n help=\"Add index URL to generated file\")\[email protected]('--emit-trusted-host/--no-emit-trusted-host', is_flag=True,\n default=True, help=\"Add trusted host option to generated file\")\[email protected]('--annotate/--no-annotate', is_flag=True, default=True,\n help=\"Annotate results, indicating where dependencies come from\")\[email protected]('-U', '--upgrade', is_flag=True, default=False,\n help='Try to upgrade all dependencies to their latest versions')\[email protected]('-P', '--upgrade-package', 'upgrade_packages', nargs=1, multiple=True,\n help=\"Specify particular packages to upgrade.\")\[email protected]('-o', '--output-file', nargs=1, type=str, default=None,\n help=('Output file name. Required if more than one input file is given. '\n 'Will be derived from input file otherwise.'))\[email protected]('--allow-unsafe', is_flag=True, default=False,\n help=\"Pin packages considered unsafe: pip, setuptools & distribute\")\[email protected]('--generate-hashes', is_flag=True, default=False,\n help=\"Generate pip 8 style hashes in the resulting requirements file.\")\[email protected]('--max-rounds', default=10,\n help=\"Maximum number of rounds before resolving the requirements aborts.\")\[email protected]('src_files', nargs=-1, type=click.Path(exists=True, allow_dash=True))\ndef cli(verbose, dry_run, pre, rebuild, find_links, index_url, extra_index_url,\n client_cert, trusted_host, header, index, emit_trusted_host, annotate,\n upgrade, upgrade_packages, output_file, allow_unsafe, generate_hashes,\n src_files, max_rounds):\n \"\"\"Compiles requirements.txt from requirements.in specs.\"\"\"\n log.verbose = verbose\n\n if len(src_files) == 0:\n if os.path.exists(DEFAULT_REQUIREMENTS_FILE):\n src_files = (DEFAULT_REQUIREMENTS_FILE,)\n elif os.path.exists('setup.py'):\n src_files = ('setup.py',)\n if not output_file:\n output_file = 'requirements.txt'\n else:\n raise click.BadParameter((\"If you do not specify an input file, \"\n \"the default is {} or setup.py\").format(DEFAULT_REQUIREMENTS_FILE))\n\n if len(src_files) == 1 and src_files[0] == '-':\n if not output_file:\n raise click.BadParameter('--output-file is required if input is from stdin')\n\n if len(src_files) > 1 and not output_file:\n raise click.BadParameter('--output-file is required if two or more input files are given.')\n\n if output_file:\n dst_file = output_file\n else:\n base_name, _, _ = src_files[0].rpartition('.')\n dst_file = base_name + '.txt'\n\n if upgrade and upgrade_packages:\n raise click.BadParameter('Only one of --upgrade or --upgrade-package can be provided as an argument.')\n\n ###\n # Setup\n ###\n\n pip_command = get_pip_command()\n\n pip_args = []\n if find_links:\n for link in find_links:\n pip_args.extend(['-f', link])\n if index_url:\n pip_args.extend(['-i', index_url])\n if extra_index_url:\n for extra_index in extra_index_url:\n pip_args.extend(['--extra-index-url', extra_index])\n if client_cert:\n pip_args.extend(['--client-cert', client_cert])\n if pre:\n pip_args.extend(['--pre'])\n if trusted_host:\n for host in trusted_host:\n pip_args.extend(['--trusted-host', host])\n\n pip_options, _ = pip_command.parse_args(pip_args)\n\n session = pip_command._build_session(pip_options)\n repository = PyPIRepository(pip_options, session)\n\n # Pre-parse the inline package upgrade specs: they should take precedence\n # over the stuff in the requirements files\n upgrade_packages = [InstallRequirement.from_line(pkg)\n for pkg in upgrade_packages]\n\n # Proxy with a LocalRequirementsRepository if --upgrade is not specified\n # (= default invocation)\n if not (upgrade or upgrade_packages) and os.path.exists(dst_file):\n ireqs = parse_requirements(dst_file, finder=repository.finder, session=repository.session, options=pip_options)\n existing_pins = {key_from_req(ireq.req): ireq for ireq in ireqs if is_pinned_requirement(ireq)}\n repository = LocalRequirementsRepository(existing_pins, repository)\n\n log.debug('Using indexes:')\n # remove duplicate index urls before processing\n repository.finder.index_urls = list(dedup(repository.finder.index_urls))\n for index_url in repository.finder.index_urls:\n log.debug(' {}'.format(index_url))\n\n if repository.finder.find_links:\n log.debug('')\n log.debug('Configuration:')\n for find_link in repository.finder.find_links:\n log.debug(' -f {}'.format(find_link))\n\n ###\n # Parsing/collecting initial requirements\n ###\n\n constraints = []\n for src_file in src_files:\n is_setup_file = os.path.basename(src_file) == 'setup.py'\n if is_setup_file or src_file == '-':\n # pip requires filenames and not files. Since we want to support\n # piping from stdin, we need to briefly save the input from stdin\n # to a temporary file and have pip read that. also used for\n # reading requirements from install_requires in setup.py.\n tmpfile = tempfile.NamedTemporaryFile(mode='wt', delete=False)\n if is_setup_file:\n from distutils.core import run_setup\n dist = run_setup(src_file)\n tmpfile.write('\\n'.join(dist.install_requires))\n else:\n tmpfile.write(sys.stdin.read())\n tmpfile.flush()\n constraints.extend(parse_requirements(\n tmpfile.name, finder=repository.finder, session=repository.session, options=pip_options))\n else:\n constraints.extend(parse_requirements(\n src_file, finder=repository.finder, session=repository.session, options=pip_options))\n\n # Check the given base set of constraints first\n Resolver.check_constraints(constraints)\n\n try:\n resolver = Resolver(constraints, repository, prereleases=pre,\n clear_caches=rebuild, allow_unsafe=allow_unsafe)\n results = resolver.resolve(max_rounds=max_rounds)\n if generate_hashes:\n hashes = resolver.resolve_hashes(results)\n else:\n hashes = None\n except PipToolsError as e:\n log.error(str(e))\n sys.exit(2)\n\n log.debug('')\n\n ##\n # Output\n ##\n\n # Compute reverse dependency annotations statically, from the\n # dependency cache that the resolver has populated by now.\n #\n # TODO (1a): reverse deps for any editable package are lost\n # what SHOULD happen is that they are cached in memory, just\n # not persisted to disk!\n #\n # TODO (1b): perhaps it's easiest if the dependency cache has an API\n # that could take InstallRequirements directly, like:\n #\n # cache.set(ireq, ...)\n #\n # then, when ireq is editable, it would store in\n #\n # editables[egg_name][link_without_fragment] = deps\n # editables['pip-tools']['git+...ols.git@future'] = {'click>=3.0', 'six'}\n #\n # otherwise:\n #\n # self[as_name_version_tuple(ireq)] = {'click>=3.0', 'six'}\n #\n reverse_dependencies = None\n if annotate:\n reverse_dependencies = resolver.reverse_dependencies(results)\n\n writer = OutputWriter(src_files, dst_file, dry_run=dry_run,\n emit_header=header, emit_index=index,\n emit_trusted_host=emit_trusted_host,\n annotate=annotate,\n generate_hashes=generate_hashes,\n default_index_url=repository.DEFAULT_INDEX_URL,\n index_urls=repository.finder.index_urls,\n trusted_hosts=pip_options.trusted_hosts,\n format_control=repository.finder.format_control)\n writer.write(results=results,\n reverse_dependencies=reverse_dependencies,\n primary_packages={key_from_req(ireq.req) for ireq in constraints if not ireq.constraint},\n markers={key_from_req(ireq.req): ireq.markers\n for ireq in constraints if ireq.markers},\n hashes=hashes)\n\n if dry_run:\n log.warning('Dry-run, so nothing updated.')\n\n\ndef get_pip_command():\n # Use pip's parser for pip.conf management and defaults.\n # General options (find_links, index_url, extra_index_url, trusted_host,\n # and pre) are defered to pip.\n pip_command = PipCommand()\n index_opts = pip.cmdoptions.make_option_group(\n pip.cmdoptions.index_group,\n pip_command.parser,\n )\n pip_command.parser.insert_option_group(0, index_opts)\n pip_command.parser.add_option(optparse.Option('--pre', action='store_true', default=False))\n\n return pip_command\n", "path": "piptools/scripts/compile.py" } ]
[ { "content": "# coding: utf-8\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport optparse\nimport os\nimport sys\nimport tempfile\n\nimport pip\nfrom pip.req import InstallRequirement, parse_requirements\n\nfrom .. import click\nfrom ..exceptions import PipToolsError\nfrom ..logging import log\nfrom ..repositories import LocalRequirementsRepository, PyPIRepository\nfrom ..resolver import Resolver\nfrom ..utils import (assert_compatible_pip_version, is_pinned_requirement,\n key_from_req, dedup)\nfrom ..writer import OutputWriter\n\n# Make sure we're using a compatible version of pip\nassert_compatible_pip_version()\n\nDEFAULT_REQUIREMENTS_FILE = 'requirements.in'\n\n\nclass PipCommand(pip.basecommand.Command):\n name = 'PipCommand'\n\n\[email protected]()\[email protected]_option()\[email protected]('-v', '--verbose', is_flag=True, help=\"Show more output\")\[email protected]('-n', '--dry-run', is_flag=True, help=\"Only show what would happen, don't change anything\")\[email protected]('-p', '--pre', is_flag=True, default=None, help=\"Allow resolving to prereleases (default is not)\")\[email protected]('-r', '--rebuild', is_flag=True, help=\"Clear any caches upfront, rebuild from scratch\")\[email protected]('-f', '--find-links', multiple=True, help=\"Look for archives in this directory or on this HTML page\", envvar='PIP_FIND_LINKS') # noqa\[email protected]('-i', '--index-url', help=\"Change index URL (defaults to PyPI)\", envvar='PIP_INDEX_URL')\[email protected]('--extra-index-url', multiple=True, help=\"Add additional index URL to search\", envvar='PIP_EXTRA_INDEX_URL') # noqa\[email protected]('--client-cert', help=\"Path to SSL client certificate, a single file containing the private key and the certificate in PEM format.\") # noqa\[email protected]('--trusted-host', multiple=True, envvar='PIP_TRUSTED_HOST',\n help=\"Mark this host as trusted, even though it does not have \"\n \"valid or any HTTPS.\")\[email protected]('--header/--no-header', is_flag=True, default=True,\n help=\"Add header to generated file\")\[email protected]('--index/--no-index', is_flag=True, default=True,\n help=\"Add index URL to generated file\")\[email protected]('--emit-trusted-host/--no-emit-trusted-host', is_flag=True,\n default=True, help=\"Add trusted host option to generated file\")\[email protected]('--annotate/--no-annotate', is_flag=True, default=True,\n help=\"Annotate results, indicating where dependencies come from\")\[email protected]('-U', '--upgrade', is_flag=True, default=False,\n help='Try to upgrade all dependencies to their latest versions')\[email protected]('-P', '--upgrade-package', 'upgrade_packages', nargs=1, multiple=True,\n help=\"Specify particular packages to upgrade.\")\[email protected]('-o', '--output-file', nargs=1, type=str, default=None,\n help=('Output file name. Required if more than one input file is given. '\n 'Will be derived from input file otherwise.'))\[email protected]('--allow-unsafe', is_flag=True, default=False,\n help=\"Pin packages considered unsafe: pip, setuptools & distribute\")\[email protected]('--generate-hashes', is_flag=True, default=False,\n help=\"Generate pip 8 style hashes in the resulting requirements file.\")\[email protected]('--max-rounds', default=10,\n help=\"Maximum number of rounds before resolving the requirements aborts.\")\[email protected]('src_files', nargs=-1, type=click.Path(exists=True, allow_dash=True))\ndef cli(verbose, dry_run, pre, rebuild, find_links, index_url, extra_index_url,\n client_cert, trusted_host, header, index, emit_trusted_host, annotate,\n upgrade, upgrade_packages, output_file, allow_unsafe, generate_hashes,\n src_files, max_rounds):\n \"\"\"Compiles requirements.txt from requirements.in specs.\"\"\"\n log.verbose = verbose\n\n if len(src_files) == 0:\n if os.path.exists(DEFAULT_REQUIREMENTS_FILE):\n src_files = (DEFAULT_REQUIREMENTS_FILE,)\n elif os.path.exists('setup.py'):\n src_files = ('setup.py',)\n if not output_file:\n output_file = 'requirements.txt'\n else:\n raise click.BadParameter((\"If you do not specify an input file, \"\n \"the default is {} or setup.py\").format(DEFAULT_REQUIREMENTS_FILE))\n\n if len(src_files) == 1 and src_files[0] == '-':\n if not output_file:\n raise click.BadParameter('--output-file is required if input is from stdin')\n\n if len(src_files) > 1 and not output_file:\n raise click.BadParameter('--output-file is required if two or more input files are given.')\n\n if output_file:\n dst_file = output_file\n else:\n base_name = src_files[0].rsplit('.', 1)[0]\n dst_file = base_name + '.txt'\n\n if upgrade and upgrade_packages:\n raise click.BadParameter('Only one of --upgrade or --upgrade-package can be provided as an argument.')\n\n ###\n # Setup\n ###\n\n pip_command = get_pip_command()\n\n pip_args = []\n if find_links:\n for link in find_links:\n pip_args.extend(['-f', link])\n if index_url:\n pip_args.extend(['-i', index_url])\n if extra_index_url:\n for extra_index in extra_index_url:\n pip_args.extend(['--extra-index-url', extra_index])\n if client_cert:\n pip_args.extend(['--client-cert', client_cert])\n if pre:\n pip_args.extend(['--pre'])\n if trusted_host:\n for host in trusted_host:\n pip_args.extend(['--trusted-host', host])\n\n pip_options, _ = pip_command.parse_args(pip_args)\n\n session = pip_command._build_session(pip_options)\n repository = PyPIRepository(pip_options, session)\n\n # Pre-parse the inline package upgrade specs: they should take precedence\n # over the stuff in the requirements files\n upgrade_packages = [InstallRequirement.from_line(pkg)\n for pkg in upgrade_packages]\n\n # Proxy with a LocalRequirementsRepository if --upgrade is not specified\n # (= default invocation)\n if not (upgrade or upgrade_packages) and os.path.exists(dst_file):\n ireqs = parse_requirements(dst_file, finder=repository.finder, session=repository.session, options=pip_options)\n existing_pins = {key_from_req(ireq.req): ireq for ireq in ireqs if is_pinned_requirement(ireq)}\n repository = LocalRequirementsRepository(existing_pins, repository)\n\n log.debug('Using indexes:')\n # remove duplicate index urls before processing\n repository.finder.index_urls = list(dedup(repository.finder.index_urls))\n for index_url in repository.finder.index_urls:\n log.debug(' {}'.format(index_url))\n\n if repository.finder.find_links:\n log.debug('')\n log.debug('Configuration:')\n for find_link in repository.finder.find_links:\n log.debug(' -f {}'.format(find_link))\n\n ###\n # Parsing/collecting initial requirements\n ###\n\n constraints = []\n for src_file in src_files:\n is_setup_file = os.path.basename(src_file) == 'setup.py'\n if is_setup_file or src_file == '-':\n # pip requires filenames and not files. Since we want to support\n # piping from stdin, we need to briefly save the input from stdin\n # to a temporary file and have pip read that. also used for\n # reading requirements from install_requires in setup.py.\n tmpfile = tempfile.NamedTemporaryFile(mode='wt', delete=False)\n if is_setup_file:\n from distutils.core import run_setup\n dist = run_setup(src_file)\n tmpfile.write('\\n'.join(dist.install_requires))\n else:\n tmpfile.write(sys.stdin.read())\n tmpfile.flush()\n constraints.extend(parse_requirements(\n tmpfile.name, finder=repository.finder, session=repository.session, options=pip_options))\n else:\n constraints.extend(parse_requirements(\n src_file, finder=repository.finder, session=repository.session, options=pip_options))\n\n # Check the given base set of constraints first\n Resolver.check_constraints(constraints)\n\n try:\n resolver = Resolver(constraints, repository, prereleases=pre,\n clear_caches=rebuild, allow_unsafe=allow_unsafe)\n results = resolver.resolve(max_rounds=max_rounds)\n if generate_hashes:\n hashes = resolver.resolve_hashes(results)\n else:\n hashes = None\n except PipToolsError as e:\n log.error(str(e))\n sys.exit(2)\n\n log.debug('')\n\n ##\n # Output\n ##\n\n # Compute reverse dependency annotations statically, from the\n # dependency cache that the resolver has populated by now.\n #\n # TODO (1a): reverse deps for any editable package are lost\n # what SHOULD happen is that they are cached in memory, just\n # not persisted to disk!\n #\n # TODO (1b): perhaps it's easiest if the dependency cache has an API\n # that could take InstallRequirements directly, like:\n #\n # cache.set(ireq, ...)\n #\n # then, when ireq is editable, it would store in\n #\n # editables[egg_name][link_without_fragment] = deps\n # editables['pip-tools']['git+...ols.git@future'] = {'click>=3.0', 'six'}\n #\n # otherwise:\n #\n # self[as_name_version_tuple(ireq)] = {'click>=3.0', 'six'}\n #\n reverse_dependencies = None\n if annotate:\n reverse_dependencies = resolver.reverse_dependencies(results)\n\n writer = OutputWriter(src_files, dst_file, dry_run=dry_run,\n emit_header=header, emit_index=index,\n emit_trusted_host=emit_trusted_host,\n annotate=annotate,\n generate_hashes=generate_hashes,\n default_index_url=repository.DEFAULT_INDEX_URL,\n index_urls=repository.finder.index_urls,\n trusted_hosts=pip_options.trusted_hosts,\n format_control=repository.finder.format_control)\n writer.write(results=results,\n reverse_dependencies=reverse_dependencies,\n primary_packages={key_from_req(ireq.req) for ireq in constraints if not ireq.constraint},\n markers={key_from_req(ireq.req): ireq.markers\n for ireq in constraints if ireq.markers},\n hashes=hashes)\n\n if dry_run:\n log.warning('Dry-run, so nothing updated.')\n\n\ndef get_pip_command():\n # Use pip's parser for pip.conf management and defaults.\n # General options (find_links, index_url, extra_index_url, trusted_host,\n # and pre) are defered to pip.\n pip_command = PipCommand()\n index_opts = pip.cmdoptions.make_option_group(\n pip.cmdoptions.index_group,\n pip_command.parser,\n )\n pip_command.parser.insert_option_group(0, index_opts)\n pip_command.parser.add_option(optparse.Option('--pre', action='store_true', default=False))\n\n return pip_command\n", "path": "piptools/scripts/compile.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index 902b16430..8522af6f4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,6 @@ # 1.9.0 (Unreleased) +- Fixed the default output file name when the source file has no extension ([#470](https://github.com/jazzband/pip-tools/issues/470)) - Added a `--max-rounds` argument to the pip-compile command to allow for solving large requirement sets ([#472](https://github.com/jazzband/pip-tools/pull/472)) - Exclude unsafe packages' dependencies when `--allow-unsafe` is not in use (#445) - Exclude irrelevant pip constraints ([#471](https://github.com/jazzband/pip-tools/pull/471)) diff --git a/piptools/scripts/compile.py b/piptools/scripts/compile.py index d88a72963..850e4a503 100755 --- a/piptools/scripts/compile.py +++ b/piptools/scripts/compile.py @@ -92,7 +92,7 @@ def cli(verbose, dry_run, pre, rebuild, find_links, index_url, extra_index_url, if output_file: dst_file = output_file else: - base_name, _, _ = src_files[0].rpartition('.') + base_name = src_files[0].rsplit('.', 1)[0] dst_file = base_name + '.txt' if upgrade and upgrade_packages: diff --git a/tests/test_cli.py b/tests/test_cli.py index 36178fe71..0976ecfda 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -219,3 +219,21 @@ def test_editable_package(tmpdir): assert out.exit_code == 0 assert fake_package_dir in out.output assert 'six==1.10.0' in out.output + + +def test_input_file_without_extension(tmpdir): + """ + piptools can compile a file without an extension, + and add .txt as the defaut output file extension. + """ + runner = CliRunner() + with runner.isolated_filesystem(): + with open('requirements', 'w') as req_in: + req_in.write('six==1.10.0') + + out = runner.invoke(cli, ['-n', 'requirements']) + + print(out.output) + assert out.exit_code == 0 + assert '--output-file requirements.txt' in out.output + assert 'six==1.10.0' in out.output
microsoft__playwright-python-145
DEBUG outputs won't get forwarded
[ { "content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport subprocess\nimport sys\nfrom typing import Any\n\nfrom greenlet import greenlet\n\nfrom playwright.async_api import Playwright as AsyncPlaywright\nfrom playwright.connection import Connection\nfrom playwright.helper import Error\nfrom playwright.object_factory import create_remote_object\nfrom playwright.path_utils import get_file_dirname\nfrom playwright.playwright import Playwright\nfrom playwright.sync_api import Playwright as SyncPlaywright\nfrom playwright.sync_base import dispatcher_fiber, set_dispatcher_fiber\n\n\ndef compute_driver_name() -> str:\n platform = sys.platform\n if platform == \"darwin\":\n result = \"driver-macos\"\n elif platform == \"linux\":\n result = \"driver-linux\"\n elif platform == \"win32\":\n result = \"driver-win.exe\"\n return result\n\n\nasync def run_driver_async() -> Connection:\n package_path = get_file_dirname()\n driver_name = compute_driver_name()\n driver_executable = package_path / \"drivers\" / driver_name\n\n proc = await asyncio.create_subprocess_exec(\n str(driver_executable),\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n limit=32768,\n )\n assert proc.stdout\n assert proc.stdin\n connection = Connection(\n proc.stdout, proc.stdin, create_remote_object, asyncio.get_event_loop()\n )\n return connection\n\n\ndef run_driver() -> Connection:\n loop = asyncio.get_event_loop()\n if loop.is_running():\n raise Error(\"Can only run one Playwright at a time.\")\n return loop.run_until_complete(run_driver_async())\n\n\nclass SyncPlaywrightContextManager:\n def __init__(self) -> None:\n self._connection = run_driver()\n self._playwright: SyncPlaywright\n\n def __enter__(self) -> SyncPlaywright:\n g_self = greenlet.getcurrent()\n\n def callback_wrapper(playwright_impl: Playwright) -> None:\n self._playwright = SyncPlaywright(playwright_impl)\n g_self.switch()\n\n self._connection.call_on_object_with_known_name(\"Playwright\", callback_wrapper)\n set_dispatcher_fiber(greenlet(lambda: self._connection.run_sync()))\n dispatcher_fiber().switch()\n return self._playwright\n\n def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:\n self._connection.stop_sync()\n\n\nclass AsyncPlaywrightContextManager:\n def __init__(self) -> None:\n self._connection: Connection\n\n async def __aenter__(self) -> AsyncPlaywright:\n self._connection = await run_driver_async()\n self._connection.run_async()\n return AsyncPlaywright(\n await self._connection.wait_for_object_with_known_name(\"Playwright\")\n )\n\n async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:\n self._connection.stop_async()\n\n\nif sys.platform == \"win32\":\n # Use ProactorEventLoop in 3.7, which is default in 3.8\n loop = asyncio.ProactorEventLoop()\n asyncio.set_event_loop(loop)\n\n\ndef main() -> None:\n if \"install\" not in sys.argv:\n print('Run \"python -m playwright install\" to complete installation')\n return\n package_path = get_file_dirname()\n driver_name = compute_driver_name()\n driver_executable = package_path / \"drivers\" / driver_name\n print(\"Installing the browsers...\")\n subprocess.check_call(f\"{driver_executable} install\", shell=True)\n\n print(\"Playwright is now ready for use\")\n", "path": "playwright/main.py" } ]
[ { "content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport subprocess\nimport sys\nfrom typing import Any\n\nfrom greenlet import greenlet\n\nfrom playwright.async_api import Playwright as AsyncPlaywright\nfrom playwright.connection import Connection\nfrom playwright.helper import Error\nfrom playwright.object_factory import create_remote_object\nfrom playwright.path_utils import get_file_dirname\nfrom playwright.playwright import Playwright\nfrom playwright.sync_api import Playwright as SyncPlaywright\nfrom playwright.sync_base import dispatcher_fiber, set_dispatcher_fiber\n\n\ndef compute_driver_name() -> str:\n platform = sys.platform\n if platform == \"darwin\":\n result = \"driver-macos\"\n elif platform == \"linux\":\n result = \"driver-linux\"\n elif platform == \"win32\":\n result = \"driver-win.exe\"\n return result\n\n\nasync def run_driver_async() -> Connection:\n package_path = get_file_dirname()\n driver_name = compute_driver_name()\n driver_executable = package_path / \"drivers\" / driver_name\n\n proc = await asyncio.create_subprocess_exec(\n str(driver_executable),\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=sys.stderr,\n limit=32768,\n )\n assert proc.stdout\n assert proc.stdin\n connection = Connection(\n proc.stdout, proc.stdin, create_remote_object, asyncio.get_event_loop()\n )\n return connection\n\n\ndef run_driver() -> Connection:\n loop = asyncio.get_event_loop()\n if loop.is_running():\n raise Error(\"Can only run one Playwright at a time.\")\n return loop.run_until_complete(run_driver_async())\n\n\nclass SyncPlaywrightContextManager:\n def __init__(self) -> None:\n self._connection = run_driver()\n self._playwright: SyncPlaywright\n\n def __enter__(self) -> SyncPlaywright:\n g_self = greenlet.getcurrent()\n\n def callback_wrapper(playwright_impl: Playwright) -> None:\n self._playwright = SyncPlaywright(playwright_impl)\n g_self.switch()\n\n self._connection.call_on_object_with_known_name(\"Playwright\", callback_wrapper)\n set_dispatcher_fiber(greenlet(lambda: self._connection.run_sync()))\n dispatcher_fiber().switch()\n return self._playwright\n\n def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:\n self._connection.stop_sync()\n\n\nclass AsyncPlaywrightContextManager:\n def __init__(self) -> None:\n self._connection: Connection\n\n async def __aenter__(self) -> AsyncPlaywright:\n self._connection = await run_driver_async()\n self._connection.run_async()\n return AsyncPlaywright(\n await self._connection.wait_for_object_with_known_name(\"Playwright\")\n )\n\n async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:\n self._connection.stop_async()\n\n\nif sys.platform == \"win32\":\n # Use ProactorEventLoop in 3.7, which is default in 3.8\n loop = asyncio.ProactorEventLoop()\n asyncio.set_event_loop(loop)\n\n\ndef main() -> None:\n if \"install\" not in sys.argv:\n print('Run \"python -m playwright install\" to complete installation')\n return\n package_path = get_file_dirname()\n driver_name = compute_driver_name()\n driver_executable = package_path / \"drivers\" / driver_name\n print(\"Installing the browsers...\")\n subprocess.check_call(f\"{driver_executable} install\", shell=True)\n\n print(\"Playwright is now ready for use\")\n", "path": "playwright/main.py" } ]
diff --git a/playwright/main.py b/playwright/main.py index 6a8451c0a..4726ef4bf 100644 --- a/playwright/main.py +++ b/playwright/main.py @@ -49,7 +49,7 @@ async def run_driver_async() -> Connection: str(driver_executable), stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, + stderr=sys.stderr, limit=32768, ) assert proc.stdout
urllib3__urllib3-2424
Remove integration tests for Botocore with Python 2.7 Botocore dropped support for Python 2.7 in July so we don't have to do integration testing with Botocore+Python 2.7 on the 1.26.x branch any longer. Reference: https://github.com/urllib3/urllib3/pull/2422
[ { "content": "import os\nimport shutil\nimport subprocess\n\nimport nox\n\nSOURCE_FILES = [\n \"docs/\",\n \"dummyserver/\",\n \"src/\",\n \"test/\",\n \"noxfile.py\",\n \"setup.py\",\n]\n\n\ndef tests_impl(session, extras=\"socks,secure,brotli\"):\n # Install deps and the package itself.\n session.install(\"-r\", \"dev-requirements.txt\")\n session.install(\".[{extras}]\".format(extras=extras))\n\n # Show the pip version.\n session.run(\"pip\", \"--version\")\n # Print the Python version and bytesize.\n session.run(\"python\", \"--version\")\n session.run(\"python\", \"-c\", \"import struct; print(struct.calcsize('P') * 8)\")\n # Print OpenSSL information.\n session.run(\"python\", \"-m\", \"OpenSSL.debug\")\n\n # Inspired from https://github.com/pyca/cryptography\n # We use parallel mode and then combine here so that coverage.py will take\n # the paths like .tox/pyXY/lib/pythonX.Y/site-packages/urllib3/__init__.py\n # and collapse them into src/urllib3/__init__.py.\n\n session.run(\n \"coverage\",\n \"run\",\n \"--parallel-mode\",\n \"-m\",\n \"pytest\",\n \"-r\",\n \"a\",\n \"--tb=native\",\n \"--no-success-flaky-report\",\n *(session.posargs or (\"test/\",)),\n env={\"PYTHONWARNINGS\": \"always::DeprecationWarning\"},\n )\n session.run(\"coverage\", \"combine\")\n session.run(\"coverage\", \"report\", \"-m\")\n session.run(\"coverage\", \"xml\")\n\n\[email protected](python=[\"2.7\", \"3.5\", \"3.6\", \"3.7\", \"3.8\", \"3.9\", \"3.10\", \"pypy\"])\ndef test(session):\n tests_impl(session)\n\n\[email protected](python=[\"2\", \"3\"])\ndef google_brotli(session):\n # https://pypi.org/project/Brotli/ is the Google version of brotli, so\n # install it separately and don't install our brotli extra (which installs\n # brotlipy).\n session.install(\"brotli\")\n tests_impl(session, extras=\"socks,secure\")\n\n\[email protected](python=\"2.7\")\ndef app_engine(session):\n session.install(\"-r\", \"dev-requirements.txt\")\n session.install(\".\")\n session.run(\n \"coverage\",\n \"run\",\n \"--parallel-mode\",\n \"-m\",\n \"pytest\",\n \"-r\",\n \"sx\",\n \"test/appengine\",\n *session.posargs,\n )\n session.run(\"coverage\", \"combine\")\n session.run(\"coverage\", \"report\", \"-m\")\n session.run(\"coverage\", \"xml\")\n\n\ndef git_clone(session, git_url):\n session.run(\"git\", \"clone\", \"--depth\", \"1\", git_url, external=True)\n\n\[email protected](python=[\"2.7\", \"3.9\"])\ndef downstream_botocore(session):\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/boto/botocore\")\n session.chdir(\"botocore\")\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.run(\"python\", \"scripts/ci/install\")\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/botocore\")\n\n session.run(\"python\", \"scripts/ci/run-tests\")\n\n\[email protected](python=[\"2.7\", \"3.9\"])\ndef downstream_requests(session):\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/psf/requests\")\n session.chdir(\"requests\")\n session.run(\"git\", \"apply\", f\"{root}/ci/requests.patch\", external=True)\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.install(\".[socks]\", silent=False)\n session.install(\"-r\", \"requirements-dev.txt\", silent=False)\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/requests\")\n\n session.run(\"pytest\", \"tests\")\n\n\[email protected]()\ndef format(session):\n \"\"\"Run code formatters.\"\"\"\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"--version\")\n\n process = subprocess.run(\n [\"pre-commit\", \"run\", \"--all-files\"],\n env=session.env,\n text=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n # Ensure that pre-commit itself ran successfully\n assert process.returncode in (0, 1)\n\n lint(session)\n\n\[email protected]\ndef lint(session):\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"run\", \"--all-files\")\n\n\[email protected]\ndef docs(session):\n session.install(\"-r\", \"docs/requirements.txt\")\n session.install(\".[socks,secure,brotli]\")\n\n session.chdir(\"docs\")\n if os.path.exists(\"_build\"):\n shutil.rmtree(\"_build\")\n session.run(\"sphinx-build\", \"-b\", \"html\", \"-W\", \".\", \"_build/html\")\n", "path": "noxfile.py" } ]
[ { "content": "import os\nimport shutil\nimport subprocess\n\nimport nox\n\nSOURCE_FILES = [\n \"docs/\",\n \"dummyserver/\",\n \"src/\",\n \"test/\",\n \"noxfile.py\",\n \"setup.py\",\n]\n\n\ndef tests_impl(session, extras=\"socks,secure,brotli\"):\n # Install deps and the package itself.\n session.install(\"-r\", \"dev-requirements.txt\")\n session.install(\".[{extras}]\".format(extras=extras))\n\n # Show the pip version.\n session.run(\"pip\", \"--version\")\n # Print the Python version and bytesize.\n session.run(\"python\", \"--version\")\n session.run(\"python\", \"-c\", \"import struct; print(struct.calcsize('P') * 8)\")\n # Print OpenSSL information.\n session.run(\"python\", \"-m\", \"OpenSSL.debug\")\n\n # Inspired from https://github.com/pyca/cryptography\n # We use parallel mode and then combine here so that coverage.py will take\n # the paths like .tox/pyXY/lib/pythonX.Y/site-packages/urllib3/__init__.py\n # and collapse them into src/urllib3/__init__.py.\n\n session.run(\n \"coverage\",\n \"run\",\n \"--parallel-mode\",\n \"-m\",\n \"pytest\",\n \"-r\",\n \"a\",\n \"--tb=native\",\n \"--no-success-flaky-report\",\n *(session.posargs or (\"test/\",)),\n env={\"PYTHONWARNINGS\": \"always::DeprecationWarning\"},\n )\n session.run(\"coverage\", \"combine\")\n session.run(\"coverage\", \"report\", \"-m\")\n session.run(\"coverage\", \"xml\")\n\n\[email protected](python=[\"2.7\", \"3.5\", \"3.6\", \"3.7\", \"3.8\", \"3.9\", \"3.10\", \"pypy\"])\ndef test(session):\n tests_impl(session)\n\n\[email protected](python=[\"2\", \"3\"])\ndef google_brotli(session):\n # https://pypi.org/project/Brotli/ is the Google version of brotli, so\n # install it separately and don't install our brotli extra (which installs\n # brotlipy).\n session.install(\"brotli\")\n tests_impl(session, extras=\"socks,secure\")\n\n\[email protected](python=\"2.7\")\ndef app_engine(session):\n session.install(\"-r\", \"dev-requirements.txt\")\n session.install(\".\")\n session.run(\n \"coverage\",\n \"run\",\n \"--parallel-mode\",\n \"-m\",\n \"pytest\",\n \"-r\",\n \"sx\",\n \"test/appengine\",\n *session.posargs,\n )\n session.run(\"coverage\", \"combine\")\n session.run(\"coverage\", \"report\", \"-m\")\n session.run(\"coverage\", \"xml\")\n\n\ndef git_clone(session, git_url):\n session.run(\"git\", \"clone\", \"--depth\", \"1\", git_url, external=True)\n\n\[email protected](python=[\"3.9\"])\ndef downstream_botocore(session):\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/boto/botocore\")\n session.chdir(\"botocore\")\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.run(\"python\", \"scripts/ci/install\")\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/botocore\")\n\n session.run(\"python\", \"scripts/ci/run-tests\")\n\n\[email protected](python=[\"2.7\", \"3.9\"])\ndef downstream_requests(session):\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/psf/requests\")\n session.chdir(\"requests\")\n session.run(\"git\", \"apply\", f\"{root}/ci/requests.patch\", external=True)\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.install(\".[socks]\", silent=False)\n session.install(\"-r\", \"requirements-dev.txt\", silent=False)\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/requests\")\n\n session.run(\"pytest\", \"tests\")\n\n\[email protected]()\ndef format(session):\n \"\"\"Run code formatters.\"\"\"\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"--version\")\n\n process = subprocess.run(\n [\"pre-commit\", \"run\", \"--all-files\"],\n env=session.env,\n text=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n # Ensure that pre-commit itself ran successfully\n assert process.returncode in (0, 1)\n\n lint(session)\n\n\[email protected]\ndef lint(session):\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"run\", \"--all-files\")\n\n\[email protected]\ndef docs(session):\n session.install(\"-r\", \"docs/requirements.txt\")\n session.install(\".[socks,secure,brotli]\")\n\n session.chdir(\"docs\")\n if os.path.exists(\"_build\"):\n shutil.rmtree(\"_build\")\n session.run(\"sphinx-build\", \"-b\", \"html\", \"-W\", \".\", \"_build/html\")\n", "path": "noxfile.py" } ]
diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index e421d8f3af..94c447d36f 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -12,6 +12,11 @@ jobs: matrix: python-version: [2.7, 3.9] downstream: [botocore, requests] + exclude: + # excludes botocore in Python 2.7 + - python-version: 2.7 + downstream: botocore + runs-on: ubuntu-18.04 steps: diff --git a/noxfile.py b/noxfile.py index f317e3b97e..7b27a97cd1 100644 --- a/noxfile.py +++ b/noxfile.py @@ -88,7 +88,7 @@ def git_clone(session, git_url): session.run("git", "clone", "--depth", "1", git_url, external=True) [email protected](python=["2.7", "3.9"]) [email protected](python=["3.9"]) def downstream_botocore(session): root = os.getcwd() tmp_dir = session.create_tmp()
flairNLP__flair-300
Update torch version to 1.0.0 torch version 1.0.0 is available. Use torch version 1.0.0 in Flair.
[ { "content": "from setuptools import setup, find_packages\n\nsetup(\n name='flair',\n version='0.3.2',\n description='A very simple framework for state-of-the-art NLP',\n long_description=open(\"README.md\", encoding='utf-8').read(),\n long_description_content_type=\"text/markdown\",\n author='Alan Akbik',\n author_email='[email protected]',\n url='https://github.com/zalandoresearch/flair',\n packages=find_packages(exclude='test'), # same as name\n license='MIT',\n install_requires=[\n 'torch==0.4.1',\n 'gensim==3.4.0',\n 'typing==3.6.4',\n 'tqdm==4.26.0',\n 'segtok==1.5.7',\n 'matplotlib==3.0.0',\n 'mpld3==0.3',\n 'sklearn',\n 'sqlitedict==1.6.0',\n 'deprecated==1.2.4',\n 'hyperopt==0.1.1',\n 'pytorch-pretrained-bert==0.3.0'\n ],\n include_package_data=True,\n python_requires='>=3.6',\n)\n", "path": "setup.py" } ]
[ { "content": "from setuptools import setup, find_packages\n\nsetup(\n name='flair',\n version='0.3.2',\n description='A very simple framework for state-of-the-art NLP',\n long_description=open(\"README.md\", encoding='utf-8').read(),\n long_description_content_type=\"text/markdown\",\n author='Alan Akbik',\n author_email='[email protected]',\n url='https://github.com/zalandoresearch/flair',\n packages=find_packages(exclude='test'), # same as name\n license='MIT',\n install_requires=[\n 'torch==1.0.0',\n 'gensim==3.4.0',\n 'typing==3.6.4',\n 'tqdm==4.26.0',\n 'segtok==1.5.7',\n 'matplotlib==3.0.0',\n 'mpld3==0.3',\n 'sklearn',\n 'sqlitedict==1.6.0',\n 'deprecated==1.2.4',\n 'hyperopt==0.1.1',\n 'pytorch-pretrained-bert==0.3.0'\n ],\n include_package_data=True,\n python_requires='>=3.6',\n)\n", "path": "setup.py" } ]
diff --git a/requirements.txt b/requirements.txt index 71da5a8102..e1aef4f572 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -torch==0.4.1 +torch==1.0.0 gensim==3.4.0 typing==3.6.4 pytest==3.6.4 diff --git a/setup.py b/setup.py index 90749487f3..4234e19ade 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ packages=find_packages(exclude='test'), # same as name license='MIT', install_requires=[ - 'torch==0.4.1', + 'torch==1.0.0', 'gensim==3.4.0', 'typing==3.6.4', 'tqdm==4.26.0',
Kaggle__docker-python-1326
NameError: name 'io' is not defined ## 🐛 Bug I am trying to run my scripts on GPU notebook, and I keep getting the following error. ```shell Traceback (most recent call last): File "/opt/conda/lib/python3.10/site-packages/wandb/sdk/wandb_init.py", line 1172, in init getcaller() File "/opt/conda/lib/python3.10/site-packages/wandb/sdk/wandb_init.py", line 846, in getcaller src, line, func, stack = logger.findCaller(stack_info=True) File "/root/.local/lib/python3.10/site-packages/log.py", line 42, in findCaller sio = io.StringIO() NameError: name 'io' is not defined ``` In addition, I found that there is no import `io` package in [this](https://github.com/Kaggle/docker-python/blob/main/patches/log.py) code. ### To Reproduce ### Expected behavior ### Additional context <!-- Add any other context about the problem here. -->
[ { "content": "import logging\nimport os\n\nimport google.auth\n\n\n_LOG_TO_FILE_ENV = os.getenv(\"KAGGLE_LOG_TO_FILE\")\n\n\nclass _LogFormatter(logging.Formatter):\n \"\"\"A logging formatter which truncates long messages.\"\"\"\n\n _MAX_LOG_LENGTH = 10000 # Be generous, not to truncate long backtraces.\n\n def format(self, record):\n msg = super(_LogFormatter, self).format(record)\n return msg[:_LogFormatter._MAX_LOG_LENGTH] if msg else msg\n\n# TODO(vimota): Clean this up once we're using python 3.8 and can use\n# (https://github.com/python/cpython/commit/dde9fdbe453925279ac3d2a6a72102f6f9ef247c)\n# Right now, making the logging module display the intended frame's information\n# when the logging calls (info, warn, ...) are wrapped (as is the case in our\n# Log class) involves fragile logic.\nclass _Logger(logging.Logger):\n\n # This is a copy of logging.Logger.findCaller with the filename ignore\n # set expanded to include the current filename (\".../log.py\").\n # Copyright 2001-2015 by Vinay Sajip. All Rights Reserved.\n # License: https://github.com/python/cpython/blob/ce9e62544571e7ade7186697d5dd065fb4c5243f/LICENSE\n def findCaller(self, stack_info=False, stacklevel=1):\n f = logging.currentframe()\n f = f.f_back\n rv = \"(unknown file)\", 0, \"(unknown function)\", None\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n if filename in _ignore_srcfiles:\n f = f.f_back\n continue\n sinfo = None\n if stack_info:\n sio = io.StringIO()\n sio.write('Stack (most recent call last):\\n')\n traceback.print_stack(f, file=sio)\n sinfo = sio.getvalue()\n if sinfo[-1] == '\\n':\n sinfo = sinfo[:-1]\n sio.close()\n rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)\n break\n return rv\n\n\n_srcfile = os.path.normcase(_Logger.findCaller.__code__.co_filename)\n_ignore_srcfiles = (_srcfile, logging._srcfile)\n\nclass Log:\n \"\"\" Helper aggregate for all things related to logging activity. \"\"\"\n\n _GLOBAL_LOG = logging.getLogger(\"\")\n _initialized = False\n\n # These are convenience helpers. For performance, consider saving Log.get_logger() and using that\n @staticmethod\n def critical(msg, *args, **kwargs):\n Log._GLOBAL_LOG.critical(msg, *args, **kwargs)\n\n @staticmethod\n def fatal(msg, *args, **kwargs):\n Log._GLOBAL_LOG.fatal(msg, *args, **kwargs)\n\n @staticmethod\n def exception(msg, *args, **kwargs):\n Log._GLOBAL_LOG.exception(msg, *args, **kwargs)\n\n @staticmethod\n def error(msg, *args, **kwargs):\n Log._GLOBAL_LOG.error(msg, *args, **kwargs)\n\n @staticmethod\n def warn(msg, *args, **kwargs):\n Log._GLOBAL_LOG.warn(msg, *args, **kwargs)\n\n @staticmethod\n def warning(msg, *args, **kwargs):\n Log._GLOBAL_LOG.warning(msg, *args, **kwargs)\n\n @staticmethod\n def debug(msg, *args, **kwargs):\n Log._GLOBAL_LOG.debug(msg, *args, **kwargs)\n\n @staticmethod\n def info(msg, *args, **kwargs):\n Log._GLOBAL_LOG.info(msg, *args, **kwargs)\n\n @staticmethod\n def set_level(loglevel):\n if isinstance(loglevel, int):\n Log._GLOBAL_LOG.setLevel(loglevel)\n return\n elif isinstance(loglevel, str):\n # idea from https://docs.python.org/3.5/howto/logging.html#logging-to-a-file\n numeric_level = getattr(logging, loglevel.upper(), None)\n if isinstance(numeric_level, int):\n Log._GLOBAL_LOG.setLevel(numeric_level)\n return\n\n raise ValueError('Invalid log level: %s' % loglevel)\n\n @staticmethod\n def _static_init():\n if Log._initialized:\n return\n\n logging.setLoggerClass(_Logger)\n # The root logger's type is unfortunately (and surprisingly) not affected by\n # `setLoggerClass`. Monkey patch it instead. TODO(vimota): Remove this, see the TODO\n # associated with _Logger.\n logging.RootLogger.findCaller = _Logger.findCaller\n log_to_file = _LOG_TO_FILE_ENV.lower() in (\"yes\", \"true\", \"t\", \"1\") if _LOG_TO_FILE_ENV is not None else True\n if log_to_file:\n handler = logging.FileHandler(filename='/tmp/kaggle.log', mode='w')\n else:\n handler = logging.StreamHandler()\n \n # \".1s\" is for the first letter: http://stackoverflow.com/a/27453084/1869.\n format_string = \"%(asctime)s %(levelname).1s %(process)d %(filename)s:%(lineno)d] %(message)s\"\n handler.setFormatter(_LogFormatter(format_string))\n logging.basicConfig(level=logging.INFO, handlers=[handler])\n Log._initialized = True\n\nLog._static_init()", "path": "patches/log.py" } ]
[ { "content": "import io\nimport logging\nimport os\n\nimport google.auth\n\n\n_LOG_TO_FILE_ENV = os.getenv(\"KAGGLE_LOG_TO_FILE\")\n\n\nclass _LogFormatter(logging.Formatter):\n \"\"\"A logging formatter which truncates long messages.\"\"\"\n\n _MAX_LOG_LENGTH = 10000 # Be generous, not to truncate long backtraces.\n\n def format(self, record):\n msg = super(_LogFormatter, self).format(record)\n return msg[:_LogFormatter._MAX_LOG_LENGTH] if msg else msg\n\n# TODO(vimota): Clean this up once we're using python 3.8 and can use\n# (https://github.com/python/cpython/commit/dde9fdbe453925279ac3d2a6a72102f6f9ef247c)\n# Right now, making the logging module display the intended frame's information\n# when the logging calls (info, warn, ...) are wrapped (as is the case in our\n# Log class) involves fragile logic.\nclass _Logger(logging.Logger):\n\n # This is a copy of logging.Logger.findCaller with the filename ignore\n # set expanded to include the current filename (\".../log.py\").\n # Copyright 2001-2015 by Vinay Sajip. All Rights Reserved.\n # License: https://github.com/python/cpython/blob/ce9e62544571e7ade7186697d5dd065fb4c5243f/LICENSE\n def findCaller(self, stack_info=False, stacklevel=1):\n f = logging.currentframe()\n f = f.f_back\n rv = \"(unknown file)\", 0, \"(unknown function)\", None\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n if filename in _ignore_srcfiles:\n f = f.f_back\n continue\n sinfo = None\n if stack_info:\n sio = io.StringIO()\n sio.write('Stack (most recent call last):\\n')\n traceback.print_stack(f, file=sio)\n sinfo = sio.getvalue()\n if sinfo[-1] == '\\n':\n sinfo = sinfo[:-1]\n sio.close()\n rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)\n break\n return rv\n\n\n_srcfile = os.path.normcase(_Logger.findCaller.__code__.co_filename)\n_ignore_srcfiles = (_srcfile, logging._srcfile)\n\nclass Log:\n \"\"\" Helper aggregate for all things related to logging activity. \"\"\"\n\n _GLOBAL_LOG = logging.getLogger(\"\")\n _initialized = False\n\n # These are convenience helpers. For performance, consider saving Log.get_logger() and using that\n @staticmethod\n def critical(msg, *args, **kwargs):\n Log._GLOBAL_LOG.critical(msg, *args, **kwargs)\n\n @staticmethod\n def fatal(msg, *args, **kwargs):\n Log._GLOBAL_LOG.fatal(msg, *args, **kwargs)\n\n @staticmethod\n def exception(msg, *args, **kwargs):\n Log._GLOBAL_LOG.exception(msg, *args, **kwargs)\n\n @staticmethod\n def error(msg, *args, **kwargs):\n Log._GLOBAL_LOG.error(msg, *args, **kwargs)\n\n @staticmethod\n def warn(msg, *args, **kwargs):\n Log._GLOBAL_LOG.warn(msg, *args, **kwargs)\n\n @staticmethod\n def warning(msg, *args, **kwargs):\n Log._GLOBAL_LOG.warning(msg, *args, **kwargs)\n\n @staticmethod\n def debug(msg, *args, **kwargs):\n Log._GLOBAL_LOG.debug(msg, *args, **kwargs)\n\n @staticmethod\n def info(msg, *args, **kwargs):\n Log._GLOBAL_LOG.info(msg, *args, **kwargs)\n\n @staticmethod\n def set_level(loglevel):\n if isinstance(loglevel, int):\n Log._GLOBAL_LOG.setLevel(loglevel)\n return\n elif isinstance(loglevel, str):\n # idea from https://docs.python.org/3.5/howto/logging.html#logging-to-a-file\n numeric_level = getattr(logging, loglevel.upper(), None)\n if isinstance(numeric_level, int):\n Log._GLOBAL_LOG.setLevel(numeric_level)\n return\n\n raise ValueError('Invalid log level: %s' % loglevel)\n\n @staticmethod\n def _static_init():\n if Log._initialized:\n return\n\n logging.setLoggerClass(_Logger)\n # The root logger's type is unfortunately (and surprisingly) not affected by\n # `setLoggerClass`. Monkey patch it instead. TODO(vimota): Remove this, see the TODO\n # associated with _Logger.\n logging.RootLogger.findCaller = _Logger.findCaller\n log_to_file = _LOG_TO_FILE_ENV.lower() in (\"yes\", \"true\", \"t\", \"1\") if _LOG_TO_FILE_ENV is not None else True\n if log_to_file:\n handler = logging.FileHandler(filename='/tmp/kaggle.log', mode='w')\n else:\n handler = logging.StreamHandler()\n \n # \".1s\" is for the first letter: http://stackoverflow.com/a/27453084/1869.\n format_string = \"%(asctime)s %(levelname).1s %(process)d %(filename)s:%(lineno)d] %(message)s\"\n handler.setFormatter(_LogFormatter(format_string))\n logging.basicConfig(level=logging.INFO, handlers=[handler])\n Log._initialized = True\n\nLog._static_init()\n", "path": "patches/log.py" } ]
diff --git a/patches/log.py b/patches/log.py index 2da5993c..59a07c8c 100644 --- a/patches/log.py +++ b/patches/log.py @@ -1,3 +1,4 @@ +import io import logging import os @@ -129,4 +130,4 @@ def _static_init(): logging.basicConfig(level=logging.INFO, handlers=[handler]) Log._initialized = True -Log._static_init() \ No newline at end of file +Log._static_init()
readthedocs__readthedocs.org-4676
Improve Intro and Getting Started documentation I think the [introduction](https://docs.readthedocs.io/en/latest/) and [getting started guide](https://docs.readthedocs.io/en/latest/getting_started.html) could use a few improvements to make it easier for brand new users who may not already know about Sphinx/MkDocs/Markdown/RestructuredText and are just looking for a guide on how to write some docs. I also think our introduction could stand some improvements to point users in the right direction. We have a lot of docs, but a few layout and explanation improvements will help users find the right section for them. Here are some specific goals and improvements: - Make it easier to start a brand new docs project * Have a getting started guide for Sphinx * Have a getting started guide for Sphinx with commonmark * Have a getting started guide for MkDocs * Explain the why between the above technologies - Improve the intro paragraphs ("Read the Docs hosts documentation for...") on the index page to explain RTD's value proposition and why somebody should choose Read the Docs. - Full sentence/paragraph descriptions on different sections (eg. User documentation) rather than just a big toctree.
[ { "content": "# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function, unicode_literals\n\nimport os\nimport sys\n\nimport sphinx_rtd_theme\nfrom recommonmark.parser import CommonMarkParser\n\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.append(os.path.dirname(__file__))\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"readthedocs.settings.dev\")\n\nfrom django.conf import settings\nfrom django.utils import timezone\n\nimport django\ndjango.setup()\n\n\nsys.path.append(os.path.abspath('_ext'))\nextensions = [\n 'sphinx.ext.autosectionlabel',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinxcontrib.httpdomain',\n 'djangodocs',\n 'doc_extensions',\n]\ntemplates_path = ['_templates']\n\nsource_suffix = ['.rst', '.md']\nsource_parsers = {\n '.md': CommonMarkParser,\n}\n\nmaster_doc = 'index'\nproject = u'Read the Docs'\ncopyright = '2010-{}, Read the Docs, Inc & contributors'.format(\n timezone.now().year\n)\nversion = '1.0'\nrelease = '1.0'\nexclude_patterns = ['_build']\ndefault_role = 'obj'\nintersphinx_mapping = {\n 'python': ('http://python.readthedocs.io/en/latest/', None),\n 'django': ('http://django.readthedocs.io/en/1.9.x/', None),\n 'sphinx': ('http://sphinx.readthedocs.io/en/latest/', None),\n}\nhtmlhelp_basename = 'ReadTheDocsdoc'\nlatex_documents = [\n ('index', 'ReadTheDocs.tex', u'Read the Docs Documentation',\n u'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'),\n]\nman_pages = [\n ('index', 'read-the-docs', u'Read the Docs Documentation',\n [u'Eric Holscher, Charlie Leifer, Bobby Grace'], 1)\n]\n\nexclude_patterns = [\n # 'api' # needed for ``make gettext`` to not die.\n]\n\nlanguage = 'en'\n\nlocale_dirs = [\n 'locale/',\n]\ngettext_compact = False\n\nhtml_theme = 'sphinx_rtd_theme'\n# html_static_path = ['_static']\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nhtml_logo = 'img/logo.svg'\nhtml_theme_options = {\n 'logo_only': True,\n 'display_version': False,\n}\n\n# Activate autosectionlabel plugin\nautosectionlabel_prefix_document = True\n", "path": "docs/conf.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function, unicode_literals\n\nimport os\nimport sys\n\nimport sphinx_rtd_theme\nfrom recommonmark.parser import CommonMarkParser\n\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.append(os.path.dirname(__file__))\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"readthedocs.settings.dev\")\n\nfrom django.conf import settings\nfrom django.utils import timezone\n\nimport django\ndjango.setup()\n\n\nsys.path.append(os.path.abspath('_ext'))\nextensions = [\n 'sphinx.ext.autosectionlabel',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinxcontrib.httpdomain',\n 'djangodocs',\n 'doc_extensions',\n]\ntemplates_path = ['_templates']\n\nsource_suffix = ['.rst', '.md']\nsource_parsers = {\n '.md': CommonMarkParser,\n}\n\nmaster_doc = 'index'\nproject = u'Read the Docs'\ncopyright = '2010-{}, Read the Docs, Inc & contributors'.format(\n timezone.now().year\n)\nversion = '1.0'\nrelease = '1.0'\nexclude_patterns = ['_build']\ndefault_role = 'obj'\nintersphinx_mapping = {\n 'python': ('http://python.readthedocs.io/en/latest/', None),\n 'django': ('http://django.readthedocs.io/en/1.9.x/', None),\n 'sphinx': ('http://sphinx.readthedocs.io/en/latest/', None),\n}\nhtmlhelp_basename = 'ReadTheDocsdoc'\nlatex_documents = [\n ('index', 'ReadTheDocs.tex', u'Read the Docs Documentation',\n u'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'),\n]\nman_pages = [\n ('index', 'read-the-docs', u'Read the Docs Documentation',\n [u'Eric Holscher, Charlie Leifer, Bobby Grace'], 1)\n]\n\nexclude_patterns = [\n # 'api' # needed for ``make gettext`` to not die.\n]\n\nlanguage = 'en'\n\nlocale_dirs = [\n 'locale/',\n]\ngettext_compact = False\n\nhtml_theme = 'sphinx_rtd_theme'\nhtml_static_path = ['_static']\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nhtml_logo = 'img/logo.svg'\nhtml_theme_options = {\n 'logo_only': True,\n 'display_version': False,\n}\n\n# Activate autosectionlabel plugin\nautosectionlabel_prefix_document = True\n", "path": "docs/conf.py" } ]
diff --git a/docs/_static/images/first-steps/import-a-repository.png b/docs/_static/images/first-steps/import-a-repository.png new file mode 100644 index 00000000000..252f69834df Binary files /dev/null and b/docs/_static/images/first-steps/import-a-repository.png differ diff --git a/docs/_static/images/first-steps/mkdocs-hello-world.png b/docs/_static/images/first-steps/mkdocs-hello-world.png new file mode 100644 index 00000000000..0c1bde0bf17 Binary files /dev/null and b/docs/_static/images/first-steps/mkdocs-hello-world.png differ diff --git a/docs/_static/images/first-steps/sphinx-hello-world.png b/docs/_static/images/first-steps/sphinx-hello-world.png new file mode 100644 index 00000000000..cc7639664cf Binary files /dev/null and b/docs/_static/images/first-steps/sphinx-hello-world.png differ diff --git a/docs/conf.py b/docs/conf.py index 71d66d0a881..1cb99137f38 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -71,7 +71,7 @@ gettext_compact = False html_theme = 'sphinx_rtd_theme' -# html_static_path = ['_static'] +html_static_path = ['_static'] html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] html_logo = 'img/logo.svg' html_theme_options = { diff --git a/docs/connected-accounts.rst b/docs/connected-accounts.rst new file mode 100644 index 00000000000..3db7ed6ca51 --- /dev/null +++ b/docs/connected-accounts.rst @@ -0,0 +1,20 @@ +Connecting Your Account +----------------------- + +If you are going to import repositories from GitHub, Bitbucket, or GitLab, +you should connect your Read the Docs account to your repository host first. +Connecting your account allows for: + +* Easier importing of your repositories +* Automatically configure your repository :doc:`webhooks` + which allow Read the Docs to build your docs on every change to your repository +* Log into Read the Docs with your GitHub, Bitbucket, or GitLab credentials + +If you signed up or logged in to Read the Docs with your GitHub, Bitbucket, or GitLab +credentials, you're all done. Your account is connected. + +To connect your unconnected account, go to your *Settings* dashboard +and select `Connected Services <https://readthedocs.org/accounts/social/connections/>`_. +From here, you'll be able to connect to your GitHub, Bitbucket or GitLab +account. This process will ask you to authorize a connection to Read the Docs, +that allows us to read information about and clone your repositories. diff --git a/docs/getting_started.rst b/docs/getting_started.rst deleted file mode 100644 index 3eb2330a9bd..00000000000 --- a/docs/getting_started.rst +++ /dev/null @@ -1,151 +0,0 @@ -Getting Started -=============== - -This document will show you how to get up and running with Read the Docs. -You will have your docs imported on Read the Docs in 5 minutes, -displayed beautifully for the world. - -If you are already using Sphinx or Markdown for your docs, skip ahead to -:ref:`getting_started:Import Your Docs`. - -Write Your Docs ---------------- - -You have two options for formatting your documentation: - -* `In reStructuredText`_ -* `In Markdown`_ - -In reStructuredText -~~~~~~~~~~~~~~~~~~~ - -There is `a screencast`_ that will help you get started if you prefer. - -Sphinx_ is a tool that makes it easy to create beautiful documentation. -Assuming you have Python_ already, `install Sphinx`_:: - - $ pip install sphinx sphinx-autobuild - -Create a directory inside your project to hold your docs:: - - $ cd /path/to/project - $ mkdir docs - -Run ``sphinx-quickstart`` in there:: - - $ cd docs - $ sphinx-quickstart - -This quick start will walk you through creating the basic configuration; in most cases, you -can just accept the defaults. When it's done, you'll have an ``index.rst``, a -``conf.py`` and some other files. Add these to revision control. - -Now, edit your ``index.rst`` and add some information about your project. -Include as much detail as you like (refer to the reStructuredText_ syntax -or `this template`_ if you need help). Build them to see how they look:: - - $ make html - -.. note:: You can use ``sphinx-autobuild`` to auto-reload your docs. Run ``sphinx-autobuild . _build/html`` instead. - -Edit your files and rebuild until you like what you see, then commit your changes and push to your public repository. -Once you have Sphinx documentation in a public repository, you can start using Read the Docs. - -In Markdown -~~~~~~~~~~~ - -You can use Markdown and reStructuredText in the same Sphinx project. -We support this natively on Read the Docs, and you can do it locally:: - - $ pip install recommonmark - -Then in your ``conf.py``: - -.. code-block:: python - - from recommonmark.parser import CommonMarkParser - - source_parsers = { - '.md': CommonMarkParser, - } - - source_suffix = ['.rst', '.md'] - -.. note:: Markdown doesn't support a lot of the features of Sphinx, - like inline markup and directives. However, it works for - basic prose content. reStructuredText is the preferred - format for technical documentation, please read `this blog post`_ - for motivation. - -.. _this blog post: http://ericholscher.com/blog/2016/mar/15/dont-use-markdown-for-technical-docs/ - -Sign Up and Connect an External Account ---------------------------------------- - -If you are going to import a repository from GitHub, Bitbucket or GitLab, you should -connect your account to your provider first. Connecting your account allows for -easier importing and enables Read the Docs to configure your repository webhooks -automatically. - -To connect your account, go to your *Settings* dashboard and select *Connected -Services*. From here, you'll be able to connect to your GitHub, Bitbucket or GitLab -account. This process will ask you to authorize a connection to Read the Docs, -that allows us to read information about and clone your repositories. - -Import Your Docs ----------------- - -To import a repository, visit your dashboard_ and click Import_. - -If you have a connected account, you will see a list of your repositories that -we are able to import. To import one of these projects, just click the import -icon next to the repository you'd like to import. This will bring up a form that -is already filled with your project's information. Feel free to edit any of -these properties, and the click **Next** to build your documentation. - -Manually Import Your Docs -~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you do not have a connected account, you will need select **Import Manually** -and enter the information for your repository yourself. You will also need to -manually configure the webhook for your repository as well. When importing your -project, you will be asked for the repository URL, along with some other -information for you new project. The URL is normally the URL or path name you'd -use to checkout, clone, or branch your repository. Some examples: - -* Git: ``http://github.com/ericholscher/django-kong.git`` -* Mercurial: ``https://bitbucket.org/ianb/pip`` -* Subversion: ``http://varnish-cache.org/svn/trunk`` -* Bazaar: ``lp:pasta`` - -Add an optional homepage URL and some tags, and then click **Next**. - -Once your project is created, you'll need to manually configure the repository -webhook if you would like to have new changesets to trigger builds for your -project on Read the Docs. Go to your project's **Integrations** page to -configure a new webhook, or see :ref:`our steps for webhook creation <webhooks:Webhook Creation>` -for more information on this process. - -Within a few seconds your code will automatically be fetched from your public repository, -and the documentation will be built. -Check out our :doc:`builds` page to learn more about how we build your docs, -and to troubleshoot any issues that arise. - -Read the Docs will host multiple versions of your code. You can read more about -how to use this well on our :doc:`versions` page. - -If you have any more trouble, don't hesitate to reach out to us. The :doc:`support` page has more information on getting in touch. - -.. _a screencast: https://youtu.be/oJsUvBQyHBs -.. _Python: https://www.python.org/ -.. _Sphinx: http://sphinx-doc.org/ -.. _Markdown: http://daringfireball.net/projects/markdown/syntax -.. _Mkdocs: http://www.mkdocs.org/ -.. _install Sphinx: http://sphinx-doc.org/latest/install.html -.. _install Mkdocs: http://www.mkdocs.org/#installation -.. _reStructuredText: http://sphinx-doc.org/rest.html -.. _this template: https://www.writethedocs.org/guide/writing/beginners-guide-to-docs/#id1 -.. _Sign up: https://readthedocs.org/accounts/signup -.. _log in: https://readthedocs.org/accounts/login -.. _dashboard: https://readthedocs.org/dashboard -.. _Import: https://readthedocs.org/dashboard/import diff --git a/docs/index.rst b/docs/index.rst index 4258087a11a..536ac4c516d 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,34 +1,53 @@ -Welcome to Read The Docs -======================== +Read the Docs: Documentation Simplified +======================================= -`Read the Docs`_ hosts documentation for the open source community. -We support Sphinx_ docs written with reStructuredText_ and `CommonMark`_. -We pull your code from your Subversion_, Bazaar_, Git_, and Mercurial_ repositories. -Then we build documentation and host it for you. +`Read the Docs`_ simplifies software documentation +by automating building, versioning, and hosting of your docs for you. Think of it as *Continuous Documentation*. -The code is open source, and `available on GitHub`_. +Never out of sync + Whenever you push code to your favorite version control system, + whether that is Git, Mercurial, Bazaar, or Subversion, + Read the Docs will automatically build your docs + so your code and documentation are always up-to-date. + +Multiple versions + Read the Docs can host and build multiple versions of your docs + so having a 1.0 version of your docs and a 2.0 version + of your docs is as easy as having a separate branch or tag in your version control system. + +Free and open source + Read the Docs is free and open source and hosts documentation + for nearly 100,000 large and small open source projects + in almost every human and computer language. .. _Read the docs: http://readthedocs.org/ -.. _Sphinx: http://sphinx.pocoo.org/ -.. _reStructuredText: http://sphinx.pocoo.org/rest.html -.. _CommonMark: http://commonmark.org/ -.. _Subversion: http://subversion.tigris.org/ -.. _Bazaar: http://bazaar.canonical.com/ -.. _Git: http://git-scm.com/ -.. _Mercurial: https://www.mercurial-scm.org/ -.. _available on GitHub: http://github.com/rtfd/readthedocs.org -The main documentation for the site is organized into a couple sections: -* :ref:`user-docs` -* :ref:`feature-docs` -* :ref:`about-docs` +First steps +----------- + +Are you new to software documentation +or are you looking to use your existing docs with Read the Docs? +Learn about documentation authoring tools such as Sphinx and MkDocs +to help you create fantastic documentation for your project. + +* **Getting started**: + :doc:`With Sphinx <intro/getting-started-with-sphinx>` | + :doc:`With MkDocs <intro/getting-started-with-mkdocs>` + +* **Importing your existing documentation**: + :doc:`Import guide <intro/import-guide>` + +.. toctree:: + :maxdepth: 2 + :hidden: + :caption: First Steps -Information about development is also available: + intro/getting-started-with-sphinx + intro/getting-started-with-mkdocs + intro/import-guide -* :ref:`dev-docs` -* :ref:`design-docs` .. _user-docs: @@ -36,10 +55,10 @@ Information about development is also available: :maxdepth: 2 :caption: User Documentation - getting_started versions builds features + connected-accounts support faq yaml-config @@ -47,25 +66,6 @@ Information about development is also available: api/index embed -.. _about-docs: - -.. toctree:: - :maxdepth: 2 - :caption: About Read the Docs - - contribute - roadmap - team - gsoc - code-of-conduct - privacy-policy - advertising/index - sponsors - open-source-philosophy - story - abandoned-projects - dmca/index - .. _feature-docs: .. toctree:: @@ -87,11 +87,29 @@ Information about development is also available: automatic-redirects features/* +.. _about-docs: + +.. toctree:: + :maxdepth: 1 + :caption: About Read the Docs + + contribute + roadmap + team + gsoc + code-of-conduct + privacy-policy + advertising/index + sponsors + open-source-philosophy + story + abandoned-projects + dmca/index .. _dev-docs: .. toctree:: - :maxdepth: 2 + :maxdepth: 1 :caption: Developer Documentation changelog @@ -107,6 +125,8 @@ Information about development is also available: i18n issue-labels security + design + RTD Theme <https://sphinx-rtd-theme.readthedocs.io/en/latest/> .. _business-docs: @@ -123,13 +143,3 @@ Information about development is also available: :caption: Custom Install Documentation custom_installs/index - -.. _design-docs: - -.. toctree:: - :maxdepth: 2 - :caption: Designer Documentation - - design - Theme <https://sphinx-rtd-theme.readthedocs.io/en/latest/> - diff --git a/docs/install.rst b/docs/install.rst index 68c335882db..5aa2cbd1125 100644 --- a/docs/install.rst +++ b/docs/install.rst @@ -151,4 +151,4 @@ Importing your docs One of the goals of readthedocs.org is to make it easy for any open source developer to get high quality hosted docs with great visibility! Simply provide us with the clone URL to your repo, we'll pull your code, extract your docs, and build them! We make available a post-commit webhook that can be configured to update the docs whenever you commit to your repo. -See our :doc:`getting_started` page to learn more. +See :doc:`/intro/import-guide` to learn more. diff --git a/docs/intro/getting-started-with-mkdocs.rst b/docs/intro/getting-started-with-mkdocs.rst new file mode 100644 index 00000000000..04b09f945f7 --- /dev/null +++ b/docs/intro/getting-started-with-mkdocs.rst @@ -0,0 +1,73 @@ +Getting Started with MkDocs +=========================== + +MkDocs is a documentation generator that focuses on speed and simplicity. +It has many great features including: + +* Preview your documentation as you write it +* Easy customization with themes and extensions +* Writing documentation with Markdown + +.. note:: + + MkDocs is a great choice for building technical documentation. + However, Read the Docs also supports :doc:`Sphinx </intro/getting-started-with-sphinx>`, + another tool for writing and building documentation. + + +Quick start +----------- + +Assuming you have Python already, `install MkDocs`_: + +.. sourcecode:: bash + + $ pip install mkdocs + +Setup your MkDocs project: + +.. sourcecode:: bash + + $ mkdocs new . + +This command creates ``mkdocs.yml`` which holds your MkDocs configuration, +and ``docs/index.md`` which is the Markdown file +that is the entry point for your documentation. + +You can edit this ``index.md`` file to add more details about your project +and then you can build your documentation: + +.. sourcecode:: bash + + $ mkdocs serve + +This command builds your Markdown files into HTML +and starts a development server to browse your documentation. +Open up http://127.0.0.1:8000/ in your web browser to see your documentation. +You can make changes to your Markdown files and your docs will automatically rebuild. + +.. figure:: ../_static/images/first-steps/mkdocs-hello-world.png + :align: right + :figwidth: 300px + :target: ../_static/images/first-steps/mkdocs-hello-world.png + + Your MkDocs project is built + +Once you have your documentation in a public repository such as GitHub, Bitbucket, or GitLab, +you can start using Read the Docs by :doc:`importing your docs </intro/import-guide>`. + +.. _install MkDocs: https://www.mkdocs.org/#installation + + +External resources +------------------ + +Here are some external resources to help you learn more about MkDocs. + +* `MkDocs documentation`_ +* `Markdown syntax guide`_ +* `Writing your docs with MkDocs`_ + +.. _MkDocs documentation: https://www.mkdocs.org/ +.. _Markdown syntax guide: http://daringfireball.net/projects/markdown/syntax +.. _Writing your docs with MkDocs: https://www.mkdocs.org/user-guide/writing-your-docs/ diff --git a/docs/intro/getting-started-with-sphinx.rst b/docs/intro/getting-started-with-sphinx.rst new file mode 100644 index 00000000000..782cdd39b8a --- /dev/null +++ b/docs/intro/getting-started-with-sphinx.rst @@ -0,0 +1,127 @@ +Getting Started with Sphinx +=========================== + +Sphinx is a powerful documentation generator that +has many great features for writing technical documentation including: + +* Generate web pages, printable PDFs, documents for e-readers (ePub), + and more all from the same sources +* You can use reStructuredText or :ref:`Markdown <intro/getting-started-with-sphinx:Using Markdown with Sphinx>` + to write documentation +* An extensive system of cross-referencing code and documentation +* Syntax highlighted code samples +* A vibrant ecosystem of first and third-party extensions_ + +.. _extensions: http://www.sphinx-doc.org/en/master/ext/builtins.html#builtin-sphinx-extensions + + +Quick start video +----------------- + +This screencast will help you get started or you can +:ref:`read our guide below <intro/getting-started-with-sphinx:Quick start>`. + +.. raw:: html + + <div style="text-align: center; margin-bottom: 2em;"> + <iframe width="100%" height="350" src="https://www.youtube.com/embed/oJsUvBQyHBs?rel=0" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe> + </div> + + +Quick start +----------- + +Assuming you have Python already, `install Sphinx`_: + +.. sourcecode:: bash + + $ pip install sphinx + +Create a directory inside your project to hold your docs: + +.. sourcecode:: bash + + $ cd /path/to/project + $ mkdir docs + +Run ``sphinx-quickstart`` in there: + +.. sourcecode:: bash + + $ cd docs + $ sphinx-quickstart + +This quick start will walk you through creating the basic configuration; in most cases, you +can just accept the defaults. When it's done, you'll have an ``index.rst``, a +``conf.py`` and some other files. Add these to revision control. + +Now, edit your ``index.rst`` and add some information about your project. +Include as much detail as you like (refer to the reStructuredText_ syntax +or `this template`_ if you need help). Build them to see how they look: + +.. sourcecode:: bash + + $ make html + +Your ``index.rst`` has been built into ``index.html`` +in your documentation output directory (typically ``_build/html/index.html``). +Open this file in your web browser to see your docs. + +.. figure:: ../_static/images/first-steps/sphinx-hello-world.png + :align: right + :figwidth: 300px + :target: ../_static/images/first-steps/sphinx-hello-world.png + + Your Sphinx project is built + +Edit your files and rebuild until you like what you see, then commit your changes and push to your public repository. +Once you have Sphinx documentation in a public repository, you can start using Read the Docs +by :doc:`importing your docs </intro/import-guide>`. + +.. _install Sphinx: http://sphinx-doc.org/latest/install.html +.. _reStructuredText: http://sphinx-doc.org/rest.html +.. _this template: https://www.writethedocs.org/guide/writing/beginners-guide-to-docs/#id1 + +Using Markdown with Sphinx +-------------------------- + +You can use Markdown and reStructuredText in the same Sphinx project. +We support this natively on Read the Docs, and you can do it locally: + +.. sourcecode:: bash + + $ pip install recommonmark + +Then in your ``conf.py``: + +.. code-block:: python + + from recommonmark.parser import CommonMarkParser + + source_parsers = { + '.md': CommonMarkParser, + } + + source_suffix = ['.rst', '.md'] + +.. warning:: Markdown doesn't support a lot of the features of Sphinx, + like inline markup and directives. However, it works for + basic prose content. reStructuredText is the preferred + format for technical documentation, please read `this blog post`_ + for motivation. + +.. _this blog post: http://ericholscher.com/blog/2016/mar/15/dont-use-markdown-for-technical-docs/ + + +External resources +------------------ + +Here are some external resources to help you learn more about Sphinx. + +* `Sphinx documentation`_ +* `RestructuredText primer`_ +* `An introduction to Sphinx and Read the Docs for technical writers`_ + +.. _Sphinx documentation: http://www.sphinx-doc.org/ +.. _RestructuredText primer: http://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html +.. _An introduction to Sphinx and Read the Docs for technical writers: http://ericholscher.com/blog/2016/jul/1/sphinx-and-rtd-for-writers/ diff --git a/docs/intro/import-guide.rst b/docs/intro/import-guide.rst new file mode 100644 index 00000000000..b44cdfe4d67 --- /dev/null +++ b/docs/intro/import-guide.rst @@ -0,0 +1,68 @@ +Importing Your Documentation +============================ + +To import a documentation repository, visit your `Read the Docs dashboard`_ and click Import_. + +If you have :doc:`connected your Read the Docs account <../connected-accounts>` to GitHub, Bitbucket, or GitLab, +you will see a list of your repositories that we are able to import. +To import one of these projects, just click the import +icon next to the repository you'd like to import. This will bring up a form that +is already filled with your project's information. Feel free to edit any of +these properties, and the click **Next** to +:ref:`build your documentation <intro/import-guide:Building your documentation>`. + +.. _Read the Docs dashboard: https://readthedocs.org/dashboard +.. _Import: https://readthedocs.org/dashboard/import + + +.. figure:: ../_static/images/first-steps/import-a-repository.png + :align: right + :figwidth: 300px + :target: ../_static/images/first-steps/import-a-repository.png + + Importing a repository + + +Manually import your docs +------------------------- + +If you do not have a connected account, you will need select **Import Manually** +and enter the information for your repository yourself. You will also need to +manually configure the webhook for your repository as well. When importing your +project, you will be asked for the repository URL, along with some other +information for you new project. The URL is normally the URL or path name you'd +use to checkout, clone, or branch your repository. Some examples: + +* Git: ``https://github.com/ericholscher/django-kong.git`` +* Mercurial: ``https://bitbucket.org/ianb/pip`` +* Subversion: ``http://varnish-cache.org/svn/trunk`` +* Bazaar: ``lp:pasta`` + +Add an optional homepage URL and some tags, and then click **Next**. + +Once your project is created, you'll need to manually configure the repository +webhook if you would like to have new changes trigger builds for your +project on Read the Docs. Go to your project's **Integrations** page to +configure a new webhook, or see :ref:`our steps for webhook creation <webhooks:Webhook Creation>` +for more information on this process. + + +Building your documentation +--------------------------- + +Within a few seconds of completing the import process, +your code will automatically be fetched from your public repository, +and the documentation will be built. +Check out our :doc:`/builds` page to learn more about how Read the Docs builds your docs, +and to troubleshoot any issues that arise. + +Some documentation projects require additional configuration to build +such as specifying a certain version of Python or installing additional dependencies. +You can configure these settings in a ``readthedocs.yml`` file. +See our :doc:`/yaml-config` docs for more details. + +Read the Docs will host multiple versions of your code. You can read more about +how to use this well on our :doc:`/versions` page. + +If you have any more trouble, don't hesitate to reach out to us. +The :doc:`/support` page has more information on getting in touch. diff --git a/docs/webhooks.rst b/docs/webhooks.rst index 47b2e0e4aee..ecf18611b50 100644 --- a/docs/webhooks.rst +++ b/docs/webhooks.rst @@ -23,7 +23,7 @@ repository provider such as GitHub, GitLab, or Bitbucket. Webhook Creation ---------------- -If you import a project using a :ref:`connected account <getting_started:Sign Up and Connect an External Account>`, +If you have :doc:`connected your Read the Docs account </connected-accounts>` to GitHub, Bitbucket, or GitLab, a webhook will be set up automatically for your repository. However, if your project was not imported through a connected account, you may need to manually configure a webhook for your project.
huggingface__accelerate-127
Error when loading optimizer state Thanks for this awesome product!! When I try to load optimizer state on TPUs, I get an error, since self.device_placement is never initialized in AcceleratedOptimizer..! https://github.com/huggingface/accelerate/blob/e0a420f7cb32124cadeeae690b56e463f8fc598f/src/accelerate/optimizer.py#L83-L86 Error when loading optimizer state Thanks for this awesome product!! When I try to load optimizer state on TPUs, I get an error, since self.device_placement is never initialized in AcceleratedOptimizer..! https://github.com/huggingface/accelerate/blob/e0a420f7cb32124cadeeae690b56e463f8fc598f/src/accelerate/optimizer.py#L83-L86
[ { "content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\n\nfrom packaging import version\n\nfrom .state import AcceleratorState, DistributedType, is_tpu_available\nfrom .utils import honor_type\n\n\nif is_tpu_available():\n import torch_xla.core.xla_model as xm\n\n\ndef move_to_device(state, device):\n if isinstance(state, (list, tuple)):\n return honor_type(state, (move_to_device(t, device) for t in state))\n elif isinstance(state, dict):\n return type(state)({k: move_to_device(v, device) for k, v in state.items()})\n elif isinstance(state, torch.Tensor):\n return state.to(device)\n return state\n\n\nclass AcceleratedOptimizer(torch.optim.Optimizer):\n \"\"\"\n Internal wrapper around a torch optimizer.\n\n Args:\n optimizer (:obj:`torch.optim.optimizer.Optimizer`):\n The optimizer to wrap.\n device_placement (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of\n :obj:`optimizer` on the right device.\n scaler (:obj:`torch.cuda.amp.grad_scaler.GradScaler`, `optional`):\n The scaler to use in the step function if training with mixed precision.\n \"\"\"\n\n def __init__(self, optimizer, device_placement=True, scaler=None):\n self.optimizer = optimizer\n self.scaler = scaler\n self.state = AcceleratorState()\n\n # Handle device placement\n if device_placement:\n state_dict = self.optimizer.state_dict()\n if self.state.distributed_type == DistributedType.TPU:\n xm.send_cpu_data_to_device(state_dict, self.state.device)\n else:\n state_dict = move_to_device(state_dict, self.state.device)\n self.optimizer.load_state_dict(state_dict)\n\n @property\n def param_groups(self):\n return self.optimizer.param_groups\n\n @param_groups.setter\n def param_groups(self, param_groups):\n self.optimizer.param_groups = param_groups\n\n @property\n def defaults(self):\n return self.optimizer.defaults\n\n @defaults.setter\n def defaults(self, defaults):\n self.optimizer.defaults = defaults\n\n def add_param_group(self, param_group):\n self.optimizer.add_param_group(param_group)\n\n def load_state_dict(self, state_dict):\n if self.state.distributed_type == DistributedType.TPU and self.device_placement:\n xm.send_cpu_data_to_device(state_dict, self.state.device)\n self.optimizer.load_state_dict(state_dict)\n\n def state_dict(self):\n return self.optimizer.state_dict()\n\n def zero_grad(self, set_to_none=None):\n if version.parse(torch.__version__) < version.parse(\"1.7.0\"):\n if set_to_none is not None:\n raise ValueError(\n \"`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for \"\n f\"earlier versions (found version {torch.__version__}).\"\n )\n self.optimizer.zero_grad()\n else:\n if set_to_none is not None:\n set_to_none = False\n self.optimizer.zero_grad(set_to_none=set_to_none)\n\n def step(self, closure=None):\n if self.state.distributed_type == DistributedType.TPU:\n optimizer_args = {\"closure\": closure} if closure is not None else {}\n xm.optimizer_step(self.optimizer, optimizer_args=optimizer_args)\n elif self.scaler is not None:\n self.scaler.step(self.optimizer, closure)\n self.scaler.update()\n else:\n self.optimizer.step(closure)\n\n def _switch_parameters(self, parameters_map):\n for param_group in self.optimizer.param_groups:\n param_group[\"params\"] = [parameters_map.get(p, p) for p in param_group[\"params\"]]\n\n @property\n def is_overflow(self):\n \"\"\"This needs to be implemented at the end\"\"\"\n return False # TODO: implement it\n", "path": "src/accelerate/optimizer.py" } ]
[ { "content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\n\nfrom packaging import version\n\nfrom .state import AcceleratorState, DistributedType, is_tpu_available\nfrom .utils import honor_type\n\n\nif is_tpu_available():\n import torch_xla.core.xla_model as xm\n\n\ndef move_to_device(state, device):\n if isinstance(state, (list, tuple)):\n return honor_type(state, (move_to_device(t, device) for t in state))\n elif isinstance(state, dict):\n return type(state)({k: move_to_device(v, device) for k, v in state.items()})\n elif isinstance(state, torch.Tensor):\n return state.to(device)\n return state\n\n\nclass AcceleratedOptimizer(torch.optim.Optimizer):\n \"\"\"\n Internal wrapper around a torch optimizer.\n\n Args:\n optimizer (:obj:`torch.optim.optimizer.Optimizer`):\n The optimizer to wrap.\n device_placement (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of\n :obj:`optimizer` on the right device.\n scaler (:obj:`torch.cuda.amp.grad_scaler.GradScaler`, `optional`):\n The scaler to use in the step function if training with mixed precision.\n \"\"\"\n\n def __init__(self, optimizer, device_placement=True, scaler=None):\n self.optimizer = optimizer\n self.scaler = scaler\n self.state = AcceleratorState()\n self.device_placement = device_placement\n\n # Handle device placement\n if device_placement:\n state_dict = self.optimizer.state_dict()\n if self.state.distributed_type == DistributedType.TPU:\n xm.send_cpu_data_to_device(state_dict, self.state.device)\n else:\n state_dict = move_to_device(state_dict, self.state.device)\n self.optimizer.load_state_dict(state_dict)\n\n @property\n def param_groups(self):\n return self.optimizer.param_groups\n\n @param_groups.setter\n def param_groups(self, param_groups):\n self.optimizer.param_groups = param_groups\n\n @property\n def defaults(self):\n return self.optimizer.defaults\n\n @defaults.setter\n def defaults(self, defaults):\n self.optimizer.defaults = defaults\n\n def add_param_group(self, param_group):\n self.optimizer.add_param_group(param_group)\n\n def load_state_dict(self, state_dict):\n if self.state.distributed_type == DistributedType.TPU and self.device_placement:\n xm.send_cpu_data_to_device(state_dict, self.state.device)\n self.optimizer.load_state_dict(state_dict)\n\n def state_dict(self):\n return self.optimizer.state_dict()\n\n def zero_grad(self, set_to_none=None):\n if version.parse(torch.__version__) < version.parse(\"1.7.0\"):\n if set_to_none is not None:\n raise ValueError(\n \"`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for \"\n f\"earlier versions (found version {torch.__version__}).\"\n )\n self.optimizer.zero_grad()\n else:\n if set_to_none is not None:\n set_to_none = False\n self.optimizer.zero_grad(set_to_none=set_to_none)\n\n def step(self, closure=None):\n if self.state.distributed_type == DistributedType.TPU:\n optimizer_args = {\"closure\": closure} if closure is not None else {}\n xm.optimizer_step(self.optimizer, optimizer_args=optimizer_args)\n elif self.scaler is not None:\n self.scaler.step(self.optimizer, closure)\n self.scaler.update()\n else:\n self.optimizer.step(closure)\n\n def _switch_parameters(self, parameters_map):\n for param_group in self.optimizer.param_groups:\n param_group[\"params\"] = [parameters_map.get(p, p) for p in param_group[\"params\"]]\n\n @property\n def is_overflow(self):\n \"\"\"This needs to be implemented at the end\"\"\"\n return False # TODO: implement it\n", "path": "src/accelerate/optimizer.py" } ]
diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py index 30fd8314eb0..57ea686f91c 100644 --- a/src/accelerate/optimizer.py +++ b/src/accelerate/optimizer.py @@ -52,6 +52,7 @@ def __init__(self, optimizer, device_placement=True, scaler=None): self.optimizer = optimizer self.scaler = scaler self.state = AcceleratorState() + self.device_placement = device_placement # Handle device placement if device_placement:
NVIDIA__NVFlare-191
The "show_stats" command got broken The "show_stats server" and "show_stats client" command got the following error. This is caused by this PR change (https://github.com/NVIDIA/NVFlare/pull/162): > show_stats server Error: Failed to communicate with Admin Server localhost on 8003: '_DefaultReplyProcessor' object has no attribute 'process_dict' Done [7269 usecs] 2022-02-08 17:26:12.865006 >
[ { "content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nfrom abc import ABC, abstractmethod\nfrom typing import Optional\n\nfrom nvflare.fuel.hci.table import Table\n\n\nclass ReplyProcessor:\n \"\"\"A base class for parsing server's response.\"\"\"\n\n def reply_start(self, api: AdminAPISpec, reply_json):\n pass\n\n def process_string(self, api: AdminAPISpec, item: str):\n pass\n\n def process_success(self, api: AdminAPISpec, item: str):\n pass\n\n def process_error(self, api: AdminAPISpec, err: str):\n pass\n\n def process_table(self, api: AdminAPISpec, table: Table):\n pass\n\n def process_shutdown(self, api: AdminAPISpec, msg: str):\n pass\n\n def process_token(self, api: AdminAPISpec, token: str):\n pass\n\n def protocol_error(self, api: AdminAPISpec, err: str):\n pass\n\n def reply_done(self, api: AdminAPISpec):\n pass\n\n\nclass AdminAPISpec(ABC):\n def __init__(self):\n self.reply_processor = None\n self.command_result = None\n\n @abstractmethod\n def server_execute(self, command: str, reply_processor: Optional[ReplyProcessor] = None):\n \"\"\"Executes a command on server side.\n\n Args:\n command: The command to be executed.\n reply_processor: Reply callback to use.\n \"\"\"\n pass\n\n def set_command_result(self, result):\n \"\"\"Sets the result returning from executing the command.\"\"\"\n self.command_result = result\n\n def get_command_result(self):\n \"\"\"Gets the result returning from executing the command.\"\"\"\n return self.command_result\n", "path": "nvflare/fuel/hci/client/api_spec.py" } ]
[ { "content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nfrom abc import ABC, abstractmethod\nfrom typing import Optional\n\nfrom nvflare.fuel.hci.table import Table\n\n\nclass ReplyProcessor:\n \"\"\"A base class for parsing server's response.\"\"\"\n\n def reply_start(self, api: AdminAPISpec, reply_json):\n pass\n\n def process_string(self, api: AdminAPISpec, item: str):\n pass\n\n def process_success(self, api: AdminAPISpec, item: str):\n pass\n\n def process_error(self, api: AdminAPISpec, err: str):\n pass\n\n def process_table(self, api: AdminAPISpec, table: Table):\n pass\n\n def process_dict(self, api: AdminAPISpec, data: dict):\n pass\n\n def process_shutdown(self, api: AdminAPISpec, msg: str):\n pass\n\n def process_token(self, api: AdminAPISpec, token: str):\n pass\n\n def protocol_error(self, api: AdminAPISpec, err: str):\n pass\n\n def reply_done(self, api: AdminAPISpec):\n pass\n\n\nclass AdminAPISpec(ABC):\n def __init__(self):\n self.reply_processor = None\n self.command_result = None\n\n @abstractmethod\n def server_execute(self, command: str, reply_processor: Optional[ReplyProcessor] = None):\n \"\"\"Executes a command on server side.\n\n Args:\n command: The command to be executed.\n reply_processor: Reply callback to use.\n \"\"\"\n pass\n\n def set_command_result(self, result):\n \"\"\"Sets the result returning from executing the command.\"\"\"\n self.command_result = result\n\n def get_command_result(self):\n \"\"\"Gets the result returning from executing the command.\"\"\"\n return self.command_result\n", "path": "nvflare/fuel/hci/client/api_spec.py" } ]
diff --git a/nvflare/fuel/hci/client/api_spec.py b/nvflare/fuel/hci/client/api_spec.py index cb5dacd40e..5d8b9a1c18 100644 --- a/nvflare/fuel/hci/client/api_spec.py +++ b/nvflare/fuel/hci/client/api_spec.py @@ -38,6 +38,9 @@ def process_error(self, api: AdminAPISpec, err: str): def process_table(self, api: AdminAPISpec, table: Table): pass + def process_dict(self, api: AdminAPISpec, data: dict): + pass + def process_shutdown(self, api: AdminAPISpec, msg: str): pass
holoviz__holoviews-3427
Bokeh streams callback fails with convert_timestamp in Python 2.7 Discovered this while writing a BoundsX to review dates of a selected data points in a time series. It fails out and kills the stream. https://github.com/ioam/holoviews/blob/9a6a630b727c8827a8bd6fbe77bf31e1f35a7e5a/holoviews/plotting/bokeh/util.py#L89 The output from browser console: ``` Python failed with the following traceback: ~/conda/lib/python2.7/site-packages/pyviz_comms/__init__.py _handle_msg L296 ~/conda/lib/python2.7/site-packages/holoviews/plotting/bokeh/callbacks.py on_msg L121 ~/conda/lib/python2.7/site-packages/holoviews/plotting/bokeh/callbacks.py _process_msg L821 ~/conda/lib/python2.7/site-packages/holoviews/plotting/bokeh/util.py convert_timestamp L89 AttributeError: 'module' object has no attribute 'timezone' ```
[ { "content": "from __future__ import absolute_import, division, unicode_literals\n\nimport re\nimport time\nimport sys\nimport calendar\nimport datetime as dt\n\nfrom collections import defaultdict\nfrom contextlib import contextmanager\n\nimport param\nimport bokeh\nimport numpy as np\n\nfrom bokeh.core.json_encoder import serialize_json # noqa (API import)\nfrom bokeh.core.properties import value\nfrom bokeh.layouts import WidgetBox, Row, Column\nfrom bokeh.models import tools\nfrom bokeh.models import Model, ToolbarBox, FactorRange, Range1d, Plot, Spacer, CustomJS\nfrom bokeh.models.widgets import DataTable, Tabs, Div\nfrom bokeh.plotting import Figure\nfrom bokeh.themes.theme import Theme\n\ntry:\n from bokeh.themes import built_in_themes\nexcept:\n built_in_themes = {}\n\ntry:\n from bkcharts import Chart\nexcept:\n Chart = type(None) # Create stub for isinstance check\n\nfrom ...core.overlay import Overlay\nfrom ...core.util import (\n LooseVersion, _getargspec, basestring, callable_name, cftime_types,\n cftime_to_timestamp, pd, unique_array)\nfrom ...core.spaces import get_nested_dmaps, DynamicMap\nfrom ..util import dim_axis_label\n\nbokeh_version = LooseVersion(bokeh.__version__) # noqa\n\n\nTOOL_TYPES = {\n 'pan': tools.PanTool,\n 'xpan': tools.PanTool,\n 'ypan': tools.PanTool,\n 'xwheel_pan': tools.WheelPanTool,\n 'ywheel_pan': tools.WheelPanTool,\n 'wheel_zoom': tools.WheelZoomTool,\n 'xwheel_zoom': tools.WheelZoomTool,\n 'ywheel_zoom': tools.WheelZoomTool,\n 'zoom_in': tools.ZoomInTool,\n 'xzoom_in': tools.ZoomInTool,\n 'yzoom_in': tools.ZoomInTool,\n 'zoom_out': tools.ZoomOutTool,\n 'xzoom_out': tools.ZoomOutTool,\n 'yzoom_out': tools.ZoomOutTool,\n 'click': tools.TapTool,\n 'tap': tools.TapTool,\n 'crosshair': tools.CrosshairTool,\n 'box_select': tools.BoxSelectTool,\n 'xbox_select': tools.BoxSelectTool,\n 'ybox_select': tools.BoxSelectTool,\n 'poly_select': tools.PolySelectTool,\n 'lasso_select': tools.LassoSelectTool,\n 'box_zoom': tools.BoxZoomTool,\n 'xbox_zoom': tools.BoxZoomTool,\n 'ybox_zoom': tools.BoxZoomTool,\n 'hover': tools.HoverTool,\n 'save': tools.SaveTool,\n 'undo': tools.UndoTool,\n 'redo': tools.RedoTool,\n 'reset': tools.ResetTool,\n 'help': tools.HelpTool,\n 'box_edit': tools.BoxEditTool,\n 'point_draw': tools.PointDrawTool,\n 'poly_draw': tools.PolyDrawTool,\n 'poly_edit': tools.PolyEditTool,\n 'freehand_draw': tools.FreehandDrawTool\n}\n\n\ndef convert_timestamp(timestamp):\n \"\"\"\n Converts bokehJS timestamp to datetime64.\n \"\"\"\n datetime = dt.datetime.fromtimestamp(timestamp/1000., dt.timezone.utc)\n return np.datetime64(datetime.replace(tzinfo=None))\n\n\ndef decode_bytes(array):\n \"\"\"\n Decodes an array, list or tuple of bytestrings to avoid python 3\n bokeh serialization errors\n \"\"\"\n if (sys.version_info.major == 2 or not len(array) or\n (isinstance(array, np.ndarray) and array.dtype.kind != 'O')):\n return array\n decoded = [v.decode('utf-8') if isinstance(v, bytes) else v for v in array]\n if isinstance(array, np.ndarray):\n return np.asarray(decoded)\n elif isinstance(array, tuple):\n return tuple(decoded)\n return decoded\n\n\ndef layout_padding(plots, renderer):\n \"\"\"\n Pads Nones in a list of lists of plots with empty plots.\n \"\"\"\n widths, heights = defaultdict(int), defaultdict(int)\n for r, row in enumerate(plots):\n for c, p in enumerate(row):\n if p is not None:\n width, height = renderer.get_size(p)\n widths[c] = max(widths[c], width)\n heights[r] = max(heights[r], height)\n\n expanded_plots = []\n for r, row in enumerate(plots):\n expanded_plots.append([])\n for c, p in enumerate(row):\n if p is None:\n p = empty_plot(widths[c], heights[r])\n elif hasattr(p, 'plot_width') and p.plot_width == 0 and p.plot_height == 0:\n p.plot_width = widths[c]\n p.plot_height = heights[r]\n expanded_plots[r].append(p)\n return expanded_plots\n\n\ndef compute_plot_size(plot):\n \"\"\"\n Computes the size of bokeh models that make up a layout such as\n figures, rows, columns, widgetboxes and Plot.\n \"\"\"\n if isinstance(plot, (Div, ToolbarBox)):\n # Cannot compute size for Div or ToolbarBox\n return 0, 0\n elif isinstance(plot, (Row, Column, WidgetBox, Tabs)):\n if not plot.children: return 0, 0\n if isinstance(plot, Row) or (isinstance(plot, ToolbarBox) and plot.toolbar_location not in ['right', 'left']):\n w_agg, h_agg = (np.sum, np.max)\n elif isinstance(plot, Tabs):\n w_agg, h_agg = (np.max, np.max)\n else:\n w_agg, h_agg = (np.max, np.sum)\n widths, heights = zip(*[compute_plot_size(child) for child in plot.children])\n width, height = w_agg(widths), h_agg(heights)\n elif isinstance(plot, (Figure, Chart)):\n width, height = plot.plot_width, plot.plot_height\n elif isinstance(plot, (Plot, DataTable, Spacer)):\n width, height = plot.width, plot.height\n return width, height\n\n\ndef empty_plot(width, height):\n \"\"\"\n Creates an empty and invisible plot of the specified size.\n \"\"\"\n x_range = Range1d(start=0, end=1)\n y_range = Range1d(start=0, end=1)\n p = Figure(plot_width=width, plot_height=height,\n x_range=x_range, y_range=y_range)\n p.xaxis.visible = False\n p.yaxis.visible = False\n p.outline_line_alpha = 0\n p.grid.grid_line_alpha = 0\n return p\n\n\ndef font_size_to_pixels(size):\n \"\"\"\n Convert a fontsize to a pixel value\n \"\"\"\n if size is None or not isinstance(size, basestring):\n return\n conversions = {'em': 16, 'pt': 16/12.}\n val = re.findall('\\d+', size)\n unit = re.findall('[a-z]+', size)\n if (val and not unit) or (val and unit[0] == 'px'):\n return int(val[0])\n elif val and unit[0] in conversions:\n return (int(int(val[0]) * conversions[unit[0]]))\n\n\ndef make_axis(axis, size, factors, dim, flip=False, rotation=0,\n label_size=None, tick_size=None, axis_height=35):\n factors = list(map(dim.pprint_value, factors))\n nchars = np.max([len(f) for f in factors])\n ranges = FactorRange(factors=factors)\n ranges2 = Range1d(start=0, end=1)\n axis_label = dim_axis_label(dim)\n reset = \"range.setv({start: 0, end: range.factors.length})\"\n ranges.callback = CustomJS(args=dict(range=ranges), code=reset)\n\n axis_props = {}\n if label_size:\n axis_props['axis_label_text_font_size'] = value(label_size)\n if tick_size:\n axis_props['major_label_text_font_size'] = value(tick_size)\n\n tick_px = font_size_to_pixels(tick_size)\n if tick_px is None:\n tick_px = 8\n label_px = font_size_to_pixels(label_size)\n if label_px is None:\n label_px = 10\n\n rotation = np.radians(rotation)\n if axis == 'x':\n align = 'center'\n # Adjust height to compensate for label rotation\n height = int(axis_height + np.abs(np.sin(rotation)) *\n ((nchars*tick_px)*0.82)) + tick_px + label_px\n opts = dict(x_axis_type='auto', x_axis_label=axis_label,\n x_range=ranges, y_range=ranges2, plot_height=height,\n plot_width=size)\n else:\n # Adjust width to compensate for label rotation\n align = 'left' if flip else 'right'\n width = int(axis_height + np.abs(np.cos(rotation)) *\n ((nchars*tick_px)*0.82)) + tick_px + label_px\n opts = dict(y_axis_label=axis_label, x_range=ranges2,\n y_range=ranges, plot_width=width, plot_height=size)\n\n p = Figure(toolbar_location=None, tools=[], **opts)\n p.outline_line_alpha = 0\n p.grid.grid_line_alpha = 0\n\n if axis == 'x':\n p.yaxis.visible = False\n axis = p.xaxis[0]\n if flip:\n p.above = p.below\n p.below = []\n p.xaxis[:] = p.above\n else:\n p.xaxis.visible = False\n axis = p.yaxis[0]\n if flip:\n p.right = p.left\n p.left = []\n p.yaxis[:] = p.right\n axis.major_label_orientation = rotation\n axis.major_label_text_align = align\n axis.major_label_text_baseline = 'middle'\n axis.update(**axis_props)\n return p\n\n\ndef hsv_to_rgb(hsv):\n \"\"\"\n Vectorized HSV to RGB conversion, adapted from:\n http://stackoverflow.com/questions/24852345/hsv-to-rgb-color-conversion\n \"\"\"\n h, s, v = (hsv[..., i] for i in range(3))\n shape = h.shape\n i = np.int_(h*6.)\n f = h*6.-i\n\n q = f\n t = 1.-f\n i = np.ravel(i)\n f = np.ravel(f)\n i%=6\n\n t = np.ravel(t)\n q = np.ravel(q)\n s = np.ravel(s)\n v = np.ravel(v)\n\n clist = (1-s*np.vstack([np.zeros_like(f),np.ones_like(f),q,t]))*v\n\n #0:v 1:p 2:q 3:t\n order = np.array([[0,3,1],[2,0,1],[1,0,3],[1,2,0],[3,1,0],[0,1,2]])\n rgb = clist[order[i], np.arange(np.prod(shape))[:,None]]\n\n return rgb.reshape(shape+(3,))\n\n\ndef pad_width(model, table_padding=0.85, tabs_padding=1.2):\n \"\"\"\n Computes the width of a model and sets up appropriate padding\n for Tabs and DataTable types.\n \"\"\"\n if isinstance(model, Row):\n vals = [pad_width(child) for child in model.children]\n width = np.max([v for v in vals if v is not None])\n elif isinstance(model, Column):\n vals = [pad_width(child) for child in model.children]\n width = np.sum([v for v in vals if v is not None])\n elif isinstance(model, Tabs):\n vals = [pad_width(t) for t in model.tabs]\n width = np.max([v for v in vals if v is not None])\n for model in model.tabs:\n model.width = width\n width = int(tabs_padding*width)\n elif isinstance(model, DataTable):\n width = model.width\n model.width = int(table_padding*width)\n elif isinstance(model, (WidgetBox, Div)):\n width = model.width\n elif model:\n width = model.plot_width\n else:\n width = 0\n return width\n\n\ndef pad_plots(plots):\n \"\"\"\n Accepts a grid of bokeh plots in form of a list of lists and\n wraps any DataTable or Tabs in a WidgetBox with appropriate\n padding. Required to avoid overlap in gridplot.\n \"\"\"\n widths = []\n for row in plots:\n row_widths = []\n for p in row:\n width = pad_width(p)\n row_widths.append(width)\n widths.append(row_widths)\n plots = [[WidgetBox(p, width=w) if isinstance(p, (DataTable, Tabs)) else p\n for p, w in zip(row, ws)] for row, ws in zip(plots, widths)]\n return plots\n\n\ndef filter_toolboxes(plots):\n \"\"\"\n Filters out toolboxes out of a list of plots to be able to compose\n them into a larger plot.\n \"\"\"\n if isinstance(plots, list):\n plots = [filter_toolboxes(plot) for plot in plots]\n elif hasattr(plots, 'children'):\n plots.children = [filter_toolboxes(child) for child in plots.children\n if not isinstance(child, ToolbarBox)]\n return plots\n\n\ndef py2js_tickformatter(formatter, msg=''):\n \"\"\"\n Uses flexx.pyscript to compile a python tick formatter to JS code\n \"\"\"\n try:\n from flexx.pyscript import py2js\n except ImportError:\n param.main.param.warning(\n msg+'Ensure Flexx is installed (\"conda install -c bokeh flexx\" '\n 'or \"pip install flexx\")')\n return\n try:\n jscode = py2js(formatter, 'formatter')\n except Exception as e:\n error = 'Pyscript raised an error: {0}'.format(e)\n error = error.replace('%', '%%')\n param.main.param.warning(msg+error)\n return\n\n args = _getargspec(formatter).args\n arg_define = 'var %s = tick;' % args[0] if args else ''\n return_js = 'return formatter();\\n'\n jsfunc = '\\n'.join([arg_define, jscode, return_js])\n match = re.search('(formatter \\= function \\(.*\\))', jsfunc )\n return jsfunc[:match.start()] + 'formatter = function ()' + jsfunc[match.end():]\n\n\ndef get_tab_title(key, frame, overlay):\n \"\"\"\n Computes a title for bokeh tabs from the key in the overlay, the\n element and the containing (Nd)Overlay.\n \"\"\"\n if isinstance(overlay, Overlay):\n if frame is not None:\n title = []\n if frame.label:\n title.append(frame.label)\n if frame.group != frame.params('group').default:\n title.append(frame.group)\n else:\n title.append(frame.group)\n else:\n title = key\n title = ' '.join(title)\n else:\n title = ' | '.join([d.pprint_value_string(k) for d, k in\n zip(overlay.kdims, key)])\n return title\n\n\n\ndef filter_batched_data(data, mapping):\n \"\"\"\n Iterates over the data and mapping for a ColumnDataSource and\n replaces columns with repeating values with a scalar. This is\n purely and optimization for scalar types.\n \"\"\"\n for k, v in list(mapping.items()):\n if isinstance(v, dict) and 'field' in v:\n if 'transform' in v:\n continue\n v = v['field']\n elif not isinstance(v, basestring):\n continue\n values = data[v]\n try:\n if len(unique_array(values)) == 1:\n mapping[k] = values[0]\n del data[v]\n except:\n pass\n\ndef cds_column_replace(source, data):\n \"\"\"\n Determine if the CDS.data requires a full replacement or simply\n needs to be updated. A replacement is required if untouched\n columns are not the same length as the columns being updated.\n \"\"\"\n current_length = [len(v) for v in source.data.values() if isinstance(v, (list, np.ndarray))]\n new_length = [len(v) for v in data.values() if isinstance(v, (list, np.ndarray))]\n untouched = [k for k in source.data if k not in data]\n return bool(untouched and current_length and new_length and current_length[0] != new_length[0])\n\n\n@contextmanager\ndef hold_policy(document, policy, server=False):\n \"\"\"\n Context manager to temporary override the hold policy.\n \"\"\"\n old_policy = document._hold\n document._hold = policy\n try:\n yield\n finally:\n if server and not old_policy:\n document.unhold()\n else:\n document._hold = old_policy\n\n\ndef recursive_model_update(model, props):\n \"\"\"\n Recursively updates attributes on a model including other\n models. If the type of the new model matches the old model\n properties are simply updated, otherwise the model is replaced.\n \"\"\"\n updates = {}\n valid_properties = model.properties_with_values()\n for k, v in props.items():\n if isinstance(v, Model):\n nested_model = getattr(model, k)\n if type(v) is type(nested_model):\n nested_props = v.properties_with_values(include_defaults=False)\n recursive_model_update(nested_model, nested_props)\n else:\n setattr(model, k, v)\n elif k in valid_properties and v != valid_properties[k]:\n updates[k] = v\n model.update(**updates)\n\n\ndef update_shared_sources(f):\n \"\"\"\n Context manager to ensures data sources shared between multiple\n plots are cleared and updated appropriately avoiding warnings and\n allowing empty frames on subplots. Expects a list of\n shared_sources and a mapping of the columns expected columns for\n each source in the plots handles.\n \"\"\"\n def wrapper(self, *args, **kwargs):\n source_cols = self.handles.get('source_cols', {})\n shared_sources = self.handles.get('shared_sources', [])\n for source in shared_sources:\n source.data.clear()\n if self.document and self.document._held_events:\n self.document._held_events = self.document._held_events[:-1]\n\n ret = f(self, *args, **kwargs)\n\n for source in shared_sources:\n expected = source_cols[id(source)]\n found = [c for c in expected if c in source.data]\n empty = np.full_like(source.data[found[0]], np.NaN) if found else []\n patch = {c: empty for c in expected if c not in source.data}\n source.data.update(patch)\n return ret\n return wrapper\n\n\ndef categorize_array(array, dim):\n \"\"\"\n Uses a Dimension instance to convert an array of values to categorical\n (i.e. string) values and applies escaping for colons, which bokeh\n treats as a categorical suffix.\n \"\"\"\n return np.array([dim.pprint_value(x) for x in array])\n\n\nclass periodic(object):\n \"\"\"\n Mocks the API of periodic Thread in hv.core.util, allowing a smooth\n API transition on bokeh server.\n \"\"\"\n\n def __init__(self, document):\n self.document = document\n self.callback = None\n self.period = None\n self.count = None\n self.counter = None\n self._start_time = None\n self.timeout = None\n\n @property\n def completed(self):\n return self.counter is None\n\n def start(self):\n self._start_time = time.time()\n if self.document is None:\n raise RuntimeError('periodic was registered to be run on bokeh'\n 'server but no document was found.')\n self.document.add_periodic_callback(self._periodic_callback, self.period)\n\n def __call__(self, period, count, callback, timeout=None, block=False):\n if isinstance(count, int):\n if count < 0: raise ValueError('Count value must be positive')\n elif not type(count) is type(None):\n raise ValueError('Count value must be a positive integer or None')\n\n self.callback = callback\n self.period = period*1000.\n self.timeout = timeout\n self.count = count\n self.counter = 0\n return self\n\n def _periodic_callback(self):\n self.callback(self.counter)\n self.counter += 1\n\n if self.timeout is not None:\n dt = (time.time() - self._start_time)\n if dt > self.timeout:\n self.stop()\n if self.counter == self.count:\n self.stop()\n\n def stop(self):\n self.counter = None\n self.timeout = None\n try:\n self.document.remove_periodic_callback(self._periodic_callback)\n except ValueError: # Already stopped\n pass\n\n def __repr__(self):\n return 'periodic(%s, %s, %s)' % (self.period,\n self.count,\n callable_name(self.callback))\n def __str__(self):\n return repr(self)\n\n\ndef attach_periodic(plot):\n \"\"\"\n Attaches plot refresh to all streams on the object.\n \"\"\"\n def append_refresh(dmap):\n for dmap in get_nested_dmaps(dmap):\n dmap.periodic._periodic_util = periodic(plot.document)\n return plot.hmap.traverse(append_refresh, [DynamicMap])\n\n\ndef date_to_integer(date):\n \"\"\"Converts support date types to milliseconds since epoch\n\n Attempts highest precision conversion of different datetime\n formats to milliseconds since the epoch (1970-01-01 00:00:00).\n If datetime is a cftime with a non-standard calendar the\n caveats described in hv.core.util.cftime_to_timestamp apply.\n\n Args:\n date: Date- or datetime-like object\n\n Returns:\n Milliseconds since 1970-01-01 00:00:00\n \"\"\"\n if pd and isinstance(date, pd.Timestamp):\n try:\n date = date.to_datetime64()\n except:\n date = date.to_datetime()\n\n if isinstance(date, np.datetime64):\n return date.astype('datetime64[ms]').astype(float)\n elif isinstance(date, cftime_types):\n return cftime_to_timestamp(date, 'ms')\n\n if hasattr(date, 'timetuple'):\n dt_int = calendar.timegm(date.timetuple())*1000\n else:\n raise ValueError('Datetime type not recognized')\n return dt_int\n\n\ndef glyph_order(keys, draw_order=[]):\n \"\"\"\n Orders a set of glyph handles using regular sort and an explicit\n sort order. The explicit draw order must take the form of a list\n of glyph names while the keys should be glyph names with a custom\n suffix. The draw order may only match subset of the keys and any\n matched items will take precedence over other entries.\n \"\"\"\n keys = sorted(keys)\n def order_fn(glyph):\n matches = [item for item in draw_order if glyph.startswith(item)]\n return ((draw_order.index(matches[0]), glyph) if matches else\n (1e9+keys.index(glyph), glyph))\n return sorted(keys, key=order_fn)\n\n\ndef colormesh(X, Y):\n \"\"\"\n Generates line paths for a quadmesh given 2D arrays of X and Y\n coordinates.\n \"\"\"\n X1 = X[0:-1, 0:-1].ravel()\n Y1 = Y[0:-1, 0:-1].ravel()\n X2 = X[1:, 0:-1].ravel()\n Y2 = Y[1:, 0:-1].ravel()\n X3 = X[1:, 1:].ravel()\n Y3 = Y[1:, 1:].ravel()\n X4 = X[0:-1, 1:].ravel()\n Y4 = Y[0:-1, 1:].ravel()\n\n X = np.column_stack([X1, X2, X3, X4, X1])\n Y = np.column_stack([Y1, Y2, Y3, Y4, Y1])\n return X, Y\n\n\ndef theme_attr_json(theme, attr):\n if isinstance(theme, str) and theme in built_in_themes:\n return built_in_themes[theme]._json['attrs'].get(attr, {})\n elif isinstance(theme, Theme):\n return theme._json['attrs'].get(attr, {})\n else:\n return {}\n\n\ndef multi_polygons_data(element):\n \"\"\"\n Expands polygon data which contains holes to a bokeh multi_polygons\n representation. Multi-polygons split by nans are expanded and the\n correct list of holes is assigned to each sub-polygon.\n \"\"\"\n paths = element.split(datatype='array', dimensions=element.kdims)\n xs, ys = ([path[:, idx] for path in paths] for idx in (0, 1))\n holes = element.holes()\n xsh, ysh = [], []\n for x, y, multi_hole in zip(xs, ys, holes):\n xhs = [[h[:, 0] for h in hole] for hole in multi_hole]\n yhs = [[h[:, 1] for h in hole] for hole in multi_hole]\n array = np.column_stack([x, y])\n splits = np.where(np.isnan(array[:, :2].astype('float')).sum(axis=1))[0]\n arrays = np.split(array, splits+1) if len(splits) else [array]\n multi_xs, multi_ys = [], []\n for i, (path, hx, hy) in enumerate(zip(arrays, xhs, yhs)):\n if i != (len(arrays)-1):\n path = path[:-1]\n multi_xs.append([path[:, 0]]+hx)\n multi_ys.append([path[:, 1]]+hy)\n xsh.append(multi_xs)\n ysh.append(multi_ys)\n return xsh, ysh\n\n\ndef match_dim_specs(specs1, specs2):\n \"\"\"Matches dimension specs used to link axes.\n\n Axis dimension specs consists of a list of tuples corresponding\n to each dimension, each tuple spec has the form (name, label, unit).\n The name and label must match exactly while the unit only has to\n match if both specs define one.\n \"\"\"\n if (specs1 is None or specs2 is None) or (len(specs1) != len(specs2)):\n return False\n for spec1, spec2 in zip(specs1, specs2):\n for s1, s2 in zip(spec1, spec2):\n if s1 is None or s2 is None:\n continue\n if s1 != s2:\n return False\n return True\n", "path": "holoviews/plotting/bokeh/util.py" } ]
[ { "content": "from __future__ import absolute_import, division, unicode_literals\n\nimport re\nimport time\nimport sys\nimport calendar\nimport datetime as dt\n\nfrom collections import defaultdict\nfrom contextlib import contextmanager\n\nimport param\nimport bokeh\nimport numpy as np\n\nfrom bokeh.core.json_encoder import serialize_json # noqa (API import)\nfrom bokeh.core.properties import value\nfrom bokeh.layouts import WidgetBox, Row, Column\nfrom bokeh.models import tools\nfrom bokeh.models import Model, ToolbarBox, FactorRange, Range1d, Plot, Spacer, CustomJS\nfrom bokeh.models.widgets import DataTable, Tabs, Div\nfrom bokeh.plotting import Figure\nfrom bokeh.themes.theme import Theme\n\ntry:\n from bokeh.themes import built_in_themes\nexcept:\n built_in_themes = {}\n\ntry:\n from bkcharts import Chart\nexcept:\n Chart = type(None) # Create stub for isinstance check\n\nfrom ...core.overlay import Overlay\nfrom ...core.util import (\n LooseVersion, _getargspec, basestring, callable_name, cftime_types,\n cftime_to_timestamp, pd, unique_array)\nfrom ...core.spaces import get_nested_dmaps, DynamicMap\nfrom ..util import dim_axis_label\n\nbokeh_version = LooseVersion(bokeh.__version__) # noqa\n\n\nTOOL_TYPES = {\n 'pan': tools.PanTool,\n 'xpan': tools.PanTool,\n 'ypan': tools.PanTool,\n 'xwheel_pan': tools.WheelPanTool,\n 'ywheel_pan': tools.WheelPanTool,\n 'wheel_zoom': tools.WheelZoomTool,\n 'xwheel_zoom': tools.WheelZoomTool,\n 'ywheel_zoom': tools.WheelZoomTool,\n 'zoom_in': tools.ZoomInTool,\n 'xzoom_in': tools.ZoomInTool,\n 'yzoom_in': tools.ZoomInTool,\n 'zoom_out': tools.ZoomOutTool,\n 'xzoom_out': tools.ZoomOutTool,\n 'yzoom_out': tools.ZoomOutTool,\n 'click': tools.TapTool,\n 'tap': tools.TapTool,\n 'crosshair': tools.CrosshairTool,\n 'box_select': tools.BoxSelectTool,\n 'xbox_select': tools.BoxSelectTool,\n 'ybox_select': tools.BoxSelectTool,\n 'poly_select': tools.PolySelectTool,\n 'lasso_select': tools.LassoSelectTool,\n 'box_zoom': tools.BoxZoomTool,\n 'xbox_zoom': tools.BoxZoomTool,\n 'ybox_zoom': tools.BoxZoomTool,\n 'hover': tools.HoverTool,\n 'save': tools.SaveTool,\n 'undo': tools.UndoTool,\n 'redo': tools.RedoTool,\n 'reset': tools.ResetTool,\n 'help': tools.HelpTool,\n 'box_edit': tools.BoxEditTool,\n 'point_draw': tools.PointDrawTool,\n 'poly_draw': tools.PolyDrawTool,\n 'poly_edit': tools.PolyEditTool,\n 'freehand_draw': tools.FreehandDrawTool\n}\n\n\ndef convert_timestamp(timestamp):\n \"\"\"\n Converts bokehJS timestamp to datetime64.\n \"\"\"\n datetime = dt.datetime.utcfromtimestamp(timestamp/1000.)\n return np.datetime64(datetime.replace(tzinfo=None))\n\n\ndef decode_bytes(array):\n \"\"\"\n Decodes an array, list or tuple of bytestrings to avoid python 3\n bokeh serialization errors\n \"\"\"\n if (sys.version_info.major == 2 or not len(array) or\n (isinstance(array, np.ndarray) and array.dtype.kind != 'O')):\n return array\n decoded = [v.decode('utf-8') if isinstance(v, bytes) else v for v in array]\n if isinstance(array, np.ndarray):\n return np.asarray(decoded)\n elif isinstance(array, tuple):\n return tuple(decoded)\n return decoded\n\n\ndef layout_padding(plots, renderer):\n \"\"\"\n Pads Nones in a list of lists of plots with empty plots.\n \"\"\"\n widths, heights = defaultdict(int), defaultdict(int)\n for r, row in enumerate(plots):\n for c, p in enumerate(row):\n if p is not None:\n width, height = renderer.get_size(p)\n widths[c] = max(widths[c], width)\n heights[r] = max(heights[r], height)\n\n expanded_plots = []\n for r, row in enumerate(plots):\n expanded_plots.append([])\n for c, p in enumerate(row):\n if p is None:\n p = empty_plot(widths[c], heights[r])\n elif hasattr(p, 'plot_width') and p.plot_width == 0 and p.plot_height == 0:\n p.plot_width = widths[c]\n p.plot_height = heights[r]\n expanded_plots[r].append(p)\n return expanded_plots\n\n\ndef compute_plot_size(plot):\n \"\"\"\n Computes the size of bokeh models that make up a layout such as\n figures, rows, columns, widgetboxes and Plot.\n \"\"\"\n if isinstance(plot, (Div, ToolbarBox)):\n # Cannot compute size for Div or ToolbarBox\n return 0, 0\n elif isinstance(plot, (Row, Column, WidgetBox, Tabs)):\n if not plot.children: return 0, 0\n if isinstance(plot, Row) or (isinstance(plot, ToolbarBox) and plot.toolbar_location not in ['right', 'left']):\n w_agg, h_agg = (np.sum, np.max)\n elif isinstance(plot, Tabs):\n w_agg, h_agg = (np.max, np.max)\n else:\n w_agg, h_agg = (np.max, np.sum)\n widths, heights = zip(*[compute_plot_size(child) for child in plot.children])\n width, height = w_agg(widths), h_agg(heights)\n elif isinstance(plot, (Figure, Chart)):\n width, height = plot.plot_width, plot.plot_height\n elif isinstance(plot, (Plot, DataTable, Spacer)):\n width, height = plot.width, plot.height\n return width, height\n\n\ndef empty_plot(width, height):\n \"\"\"\n Creates an empty and invisible plot of the specified size.\n \"\"\"\n x_range = Range1d(start=0, end=1)\n y_range = Range1d(start=0, end=1)\n p = Figure(plot_width=width, plot_height=height,\n x_range=x_range, y_range=y_range)\n p.xaxis.visible = False\n p.yaxis.visible = False\n p.outline_line_alpha = 0\n p.grid.grid_line_alpha = 0\n return p\n\n\ndef font_size_to_pixels(size):\n \"\"\"\n Convert a fontsize to a pixel value\n \"\"\"\n if size is None or not isinstance(size, basestring):\n return\n conversions = {'em': 16, 'pt': 16/12.}\n val = re.findall('\\d+', size)\n unit = re.findall('[a-z]+', size)\n if (val and not unit) or (val and unit[0] == 'px'):\n return int(val[0])\n elif val and unit[0] in conversions:\n return (int(int(val[0]) * conversions[unit[0]]))\n\n\ndef make_axis(axis, size, factors, dim, flip=False, rotation=0,\n label_size=None, tick_size=None, axis_height=35):\n factors = list(map(dim.pprint_value, factors))\n nchars = np.max([len(f) for f in factors])\n ranges = FactorRange(factors=factors)\n ranges2 = Range1d(start=0, end=1)\n axis_label = dim_axis_label(dim)\n reset = \"range.setv({start: 0, end: range.factors.length})\"\n ranges.callback = CustomJS(args=dict(range=ranges), code=reset)\n\n axis_props = {}\n if label_size:\n axis_props['axis_label_text_font_size'] = value(label_size)\n if tick_size:\n axis_props['major_label_text_font_size'] = value(tick_size)\n\n tick_px = font_size_to_pixels(tick_size)\n if tick_px is None:\n tick_px = 8\n label_px = font_size_to_pixels(label_size)\n if label_px is None:\n label_px = 10\n\n rotation = np.radians(rotation)\n if axis == 'x':\n align = 'center'\n # Adjust height to compensate for label rotation\n height = int(axis_height + np.abs(np.sin(rotation)) *\n ((nchars*tick_px)*0.82)) + tick_px + label_px\n opts = dict(x_axis_type='auto', x_axis_label=axis_label,\n x_range=ranges, y_range=ranges2, plot_height=height,\n plot_width=size)\n else:\n # Adjust width to compensate for label rotation\n align = 'left' if flip else 'right'\n width = int(axis_height + np.abs(np.cos(rotation)) *\n ((nchars*tick_px)*0.82)) + tick_px + label_px\n opts = dict(y_axis_label=axis_label, x_range=ranges2,\n y_range=ranges, plot_width=width, plot_height=size)\n\n p = Figure(toolbar_location=None, tools=[], **opts)\n p.outline_line_alpha = 0\n p.grid.grid_line_alpha = 0\n\n if axis == 'x':\n p.yaxis.visible = False\n axis = p.xaxis[0]\n if flip:\n p.above = p.below\n p.below = []\n p.xaxis[:] = p.above\n else:\n p.xaxis.visible = False\n axis = p.yaxis[0]\n if flip:\n p.right = p.left\n p.left = []\n p.yaxis[:] = p.right\n axis.major_label_orientation = rotation\n axis.major_label_text_align = align\n axis.major_label_text_baseline = 'middle'\n axis.update(**axis_props)\n return p\n\n\ndef hsv_to_rgb(hsv):\n \"\"\"\n Vectorized HSV to RGB conversion, adapted from:\n http://stackoverflow.com/questions/24852345/hsv-to-rgb-color-conversion\n \"\"\"\n h, s, v = (hsv[..., i] for i in range(3))\n shape = h.shape\n i = np.int_(h*6.)\n f = h*6.-i\n\n q = f\n t = 1.-f\n i = np.ravel(i)\n f = np.ravel(f)\n i%=6\n\n t = np.ravel(t)\n q = np.ravel(q)\n s = np.ravel(s)\n v = np.ravel(v)\n\n clist = (1-s*np.vstack([np.zeros_like(f),np.ones_like(f),q,t]))*v\n\n #0:v 1:p 2:q 3:t\n order = np.array([[0,3,1],[2,0,1],[1,0,3],[1,2,0],[3,1,0],[0,1,2]])\n rgb = clist[order[i], np.arange(np.prod(shape))[:,None]]\n\n return rgb.reshape(shape+(3,))\n\n\ndef pad_width(model, table_padding=0.85, tabs_padding=1.2):\n \"\"\"\n Computes the width of a model and sets up appropriate padding\n for Tabs and DataTable types.\n \"\"\"\n if isinstance(model, Row):\n vals = [pad_width(child) for child in model.children]\n width = np.max([v for v in vals if v is not None])\n elif isinstance(model, Column):\n vals = [pad_width(child) for child in model.children]\n width = np.sum([v for v in vals if v is not None])\n elif isinstance(model, Tabs):\n vals = [pad_width(t) for t in model.tabs]\n width = np.max([v for v in vals if v is not None])\n for model in model.tabs:\n model.width = width\n width = int(tabs_padding*width)\n elif isinstance(model, DataTable):\n width = model.width\n model.width = int(table_padding*width)\n elif isinstance(model, (WidgetBox, Div)):\n width = model.width\n elif model:\n width = model.plot_width\n else:\n width = 0\n return width\n\n\ndef pad_plots(plots):\n \"\"\"\n Accepts a grid of bokeh plots in form of a list of lists and\n wraps any DataTable or Tabs in a WidgetBox with appropriate\n padding. Required to avoid overlap in gridplot.\n \"\"\"\n widths = []\n for row in plots:\n row_widths = []\n for p in row:\n width = pad_width(p)\n row_widths.append(width)\n widths.append(row_widths)\n plots = [[WidgetBox(p, width=w) if isinstance(p, (DataTable, Tabs)) else p\n for p, w in zip(row, ws)] for row, ws in zip(plots, widths)]\n return plots\n\n\ndef filter_toolboxes(plots):\n \"\"\"\n Filters out toolboxes out of a list of plots to be able to compose\n them into a larger plot.\n \"\"\"\n if isinstance(plots, list):\n plots = [filter_toolboxes(plot) for plot in plots]\n elif hasattr(plots, 'children'):\n plots.children = [filter_toolboxes(child) for child in plots.children\n if not isinstance(child, ToolbarBox)]\n return plots\n\n\ndef py2js_tickformatter(formatter, msg=''):\n \"\"\"\n Uses flexx.pyscript to compile a python tick formatter to JS code\n \"\"\"\n try:\n from flexx.pyscript import py2js\n except ImportError:\n param.main.param.warning(\n msg+'Ensure Flexx is installed (\"conda install -c bokeh flexx\" '\n 'or \"pip install flexx\")')\n return\n try:\n jscode = py2js(formatter, 'formatter')\n except Exception as e:\n error = 'Pyscript raised an error: {0}'.format(e)\n error = error.replace('%', '%%')\n param.main.param.warning(msg+error)\n return\n\n args = _getargspec(formatter).args\n arg_define = 'var %s = tick;' % args[0] if args else ''\n return_js = 'return formatter();\\n'\n jsfunc = '\\n'.join([arg_define, jscode, return_js])\n match = re.search('(formatter \\= function \\(.*\\))', jsfunc )\n return jsfunc[:match.start()] + 'formatter = function ()' + jsfunc[match.end():]\n\n\ndef get_tab_title(key, frame, overlay):\n \"\"\"\n Computes a title for bokeh tabs from the key in the overlay, the\n element and the containing (Nd)Overlay.\n \"\"\"\n if isinstance(overlay, Overlay):\n if frame is not None:\n title = []\n if frame.label:\n title.append(frame.label)\n if frame.group != frame.params('group').default:\n title.append(frame.group)\n else:\n title.append(frame.group)\n else:\n title = key\n title = ' '.join(title)\n else:\n title = ' | '.join([d.pprint_value_string(k) for d, k in\n zip(overlay.kdims, key)])\n return title\n\n\n\ndef filter_batched_data(data, mapping):\n \"\"\"\n Iterates over the data and mapping for a ColumnDataSource and\n replaces columns with repeating values with a scalar. This is\n purely and optimization for scalar types.\n \"\"\"\n for k, v in list(mapping.items()):\n if isinstance(v, dict) and 'field' in v:\n if 'transform' in v:\n continue\n v = v['field']\n elif not isinstance(v, basestring):\n continue\n values = data[v]\n try:\n if len(unique_array(values)) == 1:\n mapping[k] = values[0]\n del data[v]\n except:\n pass\n\ndef cds_column_replace(source, data):\n \"\"\"\n Determine if the CDS.data requires a full replacement or simply\n needs to be updated. A replacement is required if untouched\n columns are not the same length as the columns being updated.\n \"\"\"\n current_length = [len(v) for v in source.data.values() if isinstance(v, (list, np.ndarray))]\n new_length = [len(v) for v in data.values() if isinstance(v, (list, np.ndarray))]\n untouched = [k for k in source.data if k not in data]\n return bool(untouched and current_length and new_length and current_length[0] != new_length[0])\n\n\n@contextmanager\ndef hold_policy(document, policy, server=False):\n \"\"\"\n Context manager to temporary override the hold policy.\n \"\"\"\n old_policy = document._hold\n document._hold = policy\n try:\n yield\n finally:\n if server and not old_policy:\n document.unhold()\n else:\n document._hold = old_policy\n\n\ndef recursive_model_update(model, props):\n \"\"\"\n Recursively updates attributes on a model including other\n models. If the type of the new model matches the old model\n properties are simply updated, otherwise the model is replaced.\n \"\"\"\n updates = {}\n valid_properties = model.properties_with_values()\n for k, v in props.items():\n if isinstance(v, Model):\n nested_model = getattr(model, k)\n if type(v) is type(nested_model):\n nested_props = v.properties_with_values(include_defaults=False)\n recursive_model_update(nested_model, nested_props)\n else:\n setattr(model, k, v)\n elif k in valid_properties and v != valid_properties[k]:\n updates[k] = v\n model.update(**updates)\n\n\ndef update_shared_sources(f):\n \"\"\"\n Context manager to ensures data sources shared between multiple\n plots are cleared and updated appropriately avoiding warnings and\n allowing empty frames on subplots. Expects a list of\n shared_sources and a mapping of the columns expected columns for\n each source in the plots handles.\n \"\"\"\n def wrapper(self, *args, **kwargs):\n source_cols = self.handles.get('source_cols', {})\n shared_sources = self.handles.get('shared_sources', [])\n for source in shared_sources:\n source.data.clear()\n if self.document and self.document._held_events:\n self.document._held_events = self.document._held_events[:-1]\n\n ret = f(self, *args, **kwargs)\n\n for source in shared_sources:\n expected = source_cols[id(source)]\n found = [c for c in expected if c in source.data]\n empty = np.full_like(source.data[found[0]], np.NaN) if found else []\n patch = {c: empty for c in expected if c not in source.data}\n source.data.update(patch)\n return ret\n return wrapper\n\n\ndef categorize_array(array, dim):\n \"\"\"\n Uses a Dimension instance to convert an array of values to categorical\n (i.e. string) values and applies escaping for colons, which bokeh\n treats as a categorical suffix.\n \"\"\"\n return np.array([dim.pprint_value(x) for x in array])\n\n\nclass periodic(object):\n \"\"\"\n Mocks the API of periodic Thread in hv.core.util, allowing a smooth\n API transition on bokeh server.\n \"\"\"\n\n def __init__(self, document):\n self.document = document\n self.callback = None\n self.period = None\n self.count = None\n self.counter = None\n self._start_time = None\n self.timeout = None\n\n @property\n def completed(self):\n return self.counter is None\n\n def start(self):\n self._start_time = time.time()\n if self.document is None:\n raise RuntimeError('periodic was registered to be run on bokeh'\n 'server but no document was found.')\n self.document.add_periodic_callback(self._periodic_callback, self.period)\n\n def __call__(self, period, count, callback, timeout=None, block=False):\n if isinstance(count, int):\n if count < 0: raise ValueError('Count value must be positive')\n elif not type(count) is type(None):\n raise ValueError('Count value must be a positive integer or None')\n\n self.callback = callback\n self.period = period*1000.\n self.timeout = timeout\n self.count = count\n self.counter = 0\n return self\n\n def _periodic_callback(self):\n self.callback(self.counter)\n self.counter += 1\n\n if self.timeout is not None:\n dt = (time.time() - self._start_time)\n if dt > self.timeout:\n self.stop()\n if self.counter == self.count:\n self.stop()\n\n def stop(self):\n self.counter = None\n self.timeout = None\n try:\n self.document.remove_periodic_callback(self._periodic_callback)\n except ValueError: # Already stopped\n pass\n\n def __repr__(self):\n return 'periodic(%s, %s, %s)' % (self.period,\n self.count,\n callable_name(self.callback))\n def __str__(self):\n return repr(self)\n\n\ndef attach_periodic(plot):\n \"\"\"\n Attaches plot refresh to all streams on the object.\n \"\"\"\n def append_refresh(dmap):\n for dmap in get_nested_dmaps(dmap):\n dmap.periodic._periodic_util = periodic(plot.document)\n return plot.hmap.traverse(append_refresh, [DynamicMap])\n\n\ndef date_to_integer(date):\n \"\"\"Converts support date types to milliseconds since epoch\n\n Attempts highest precision conversion of different datetime\n formats to milliseconds since the epoch (1970-01-01 00:00:00).\n If datetime is a cftime with a non-standard calendar the\n caveats described in hv.core.util.cftime_to_timestamp apply.\n\n Args:\n date: Date- or datetime-like object\n\n Returns:\n Milliseconds since 1970-01-01 00:00:00\n \"\"\"\n if pd and isinstance(date, pd.Timestamp):\n try:\n date = date.to_datetime64()\n except:\n date = date.to_datetime()\n\n if isinstance(date, np.datetime64):\n return date.astype('datetime64[ms]').astype(float)\n elif isinstance(date, cftime_types):\n return cftime_to_timestamp(date, 'ms')\n\n if hasattr(date, 'timetuple'):\n dt_int = calendar.timegm(date.timetuple())*1000\n else:\n raise ValueError('Datetime type not recognized')\n return dt_int\n\n\ndef glyph_order(keys, draw_order=[]):\n \"\"\"\n Orders a set of glyph handles using regular sort and an explicit\n sort order. The explicit draw order must take the form of a list\n of glyph names while the keys should be glyph names with a custom\n suffix. The draw order may only match subset of the keys and any\n matched items will take precedence over other entries.\n \"\"\"\n keys = sorted(keys)\n def order_fn(glyph):\n matches = [item for item in draw_order if glyph.startswith(item)]\n return ((draw_order.index(matches[0]), glyph) if matches else\n (1e9+keys.index(glyph), glyph))\n return sorted(keys, key=order_fn)\n\n\ndef colormesh(X, Y):\n \"\"\"\n Generates line paths for a quadmesh given 2D arrays of X and Y\n coordinates.\n \"\"\"\n X1 = X[0:-1, 0:-1].ravel()\n Y1 = Y[0:-1, 0:-1].ravel()\n X2 = X[1:, 0:-1].ravel()\n Y2 = Y[1:, 0:-1].ravel()\n X3 = X[1:, 1:].ravel()\n Y3 = Y[1:, 1:].ravel()\n X4 = X[0:-1, 1:].ravel()\n Y4 = Y[0:-1, 1:].ravel()\n\n X = np.column_stack([X1, X2, X3, X4, X1])\n Y = np.column_stack([Y1, Y2, Y3, Y4, Y1])\n return X, Y\n\n\ndef theme_attr_json(theme, attr):\n if isinstance(theme, str) and theme in built_in_themes:\n return built_in_themes[theme]._json['attrs'].get(attr, {})\n elif isinstance(theme, Theme):\n return theme._json['attrs'].get(attr, {})\n else:\n return {}\n\n\ndef multi_polygons_data(element):\n \"\"\"\n Expands polygon data which contains holes to a bokeh multi_polygons\n representation. Multi-polygons split by nans are expanded and the\n correct list of holes is assigned to each sub-polygon.\n \"\"\"\n paths = element.split(datatype='array', dimensions=element.kdims)\n xs, ys = ([path[:, idx] for path in paths] for idx in (0, 1))\n holes = element.holes()\n xsh, ysh = [], []\n for x, y, multi_hole in zip(xs, ys, holes):\n xhs = [[h[:, 0] for h in hole] for hole in multi_hole]\n yhs = [[h[:, 1] for h in hole] for hole in multi_hole]\n array = np.column_stack([x, y])\n splits = np.where(np.isnan(array[:, :2].astype('float')).sum(axis=1))[0]\n arrays = np.split(array, splits+1) if len(splits) else [array]\n multi_xs, multi_ys = [], []\n for i, (path, hx, hy) in enumerate(zip(arrays, xhs, yhs)):\n if i != (len(arrays)-1):\n path = path[:-1]\n multi_xs.append([path[:, 0]]+hx)\n multi_ys.append([path[:, 1]]+hy)\n xsh.append(multi_xs)\n ysh.append(multi_ys)\n return xsh, ysh\n\n\ndef match_dim_specs(specs1, specs2):\n \"\"\"Matches dimension specs used to link axes.\n\n Axis dimension specs consists of a list of tuples corresponding\n to each dimension, each tuple spec has the form (name, label, unit).\n The name and label must match exactly while the unit only has to\n match if both specs define one.\n \"\"\"\n if (specs1 is None or specs2 is None) or (len(specs1) != len(specs2)):\n return False\n for spec1, spec2 in zip(specs1, specs2):\n for s1, s2 in zip(spec1, spec2):\n if s1 is None or s2 is None:\n continue\n if s1 != s2:\n return False\n return True\n", "path": "holoviews/plotting/bokeh/util.py" } ]
diff --git a/holoviews/plotting/bokeh/util.py b/holoviews/plotting/bokeh/util.py index 4edb0e259c..36cd7f1795 100644 --- a/holoviews/plotting/bokeh/util.py +++ b/holoviews/plotting/bokeh/util.py @@ -86,7 +86,7 @@ def convert_timestamp(timestamp): """ Converts bokehJS timestamp to datetime64. """ - datetime = dt.datetime.fromtimestamp(timestamp/1000., dt.timezone.utc) + datetime = dt.datetime.utcfromtimestamp(timestamp/1000.) return np.datetime64(datetime.replace(tzinfo=None))
carpentries__amy-690
No reverse match for rest_framework namespace The error for a very strange reason shows when accessing these URLs: https://github.com/swcarpentry/amy/blob/develop/api/urls.py#L57 I wasn't able to get rid of it; it's not being used at all, so maybe it should be removed…?
[ { "content": "from django.conf.urls import url, include\nfrom rest_framework_nested import routers\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nfrom . import views\n\n# new in Django 1.9: this defines a namespace for URLs; there's no need for\n# `namespace='api'` in the include()\napp_name = 'api'\n\n# routers generate URLs for methods like `.list` or `.retrieve`\nrouter = routers.SimpleRouter()\nrouter.register('reports', views.ReportsViewSet, base_name='reports')\nrouter.register('persons', views.PersonViewSet)\nawards_router = routers.NestedSimpleRouter(router, 'persons', lookup='person')\nawards_router.register('awards', views.AwardViewSet, base_name='person-awards')\nperson_task_router = routers.NestedSimpleRouter(router, 'persons',\n lookup='person')\nperson_task_router.register('tasks', views.PersonTaskViewSet,\n base_name='person-tasks')\nrouter.register('events', views.EventViewSet)\ntasks_router = routers.NestedSimpleRouter(router, 'events', lookup='event')\ntasks_router.register('tasks', views.TaskViewSet, base_name='event-tasks')\ntodos_router = routers.NestedSimpleRouter(router, 'events', lookup='event')\ntodos_router.register('todos', views.TodoViewSet, base_name='event-todos')\nrouter.register('hosts', views.HostViewSet)\nrouter.register('airports', views.AirportViewSet)\n\nurlpatterns = [\n url('^$', views.ApiRoot.as_view(), name='root'),\n # TODO: turn these export views into ViewSets and add them to the router\n url('^export/badges/$',\n views.ExportBadgesView.as_view(),\n name='export-badges'),\n url('^export/instructors/$',\n views.ExportInstructorLocationsView.as_view(),\n name='export-instructors'),\n url('^export/members/$',\n views.ExportMembersView.as_view(),\n name='export-members'),\n url('^events/published/$',\n views.PublishedEvents.as_view(),\n name='events-published'),\n url('^todos/user/$',\n views.UserTodoItems.as_view(),\n name='user-todos'),\n\n url('^', include(router.urls)),\n url('^', include(awards_router.urls)),\n url('^', include(person_task_router.urls)),\n url('^', include(tasks_router.urls)),\n url('^', include(todos_router.urls)),\n]\n\n# for login-logout functionality\nurlpatterns += [\n url(r'^api-auth/',\n include('rest_framework.urls', namespace='rest_framework')),\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns) # allow to specify format\n", "path": "api/urls.py" } ]
[ { "content": "from django.conf.urls import url, include\nfrom rest_framework_nested import routers\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nfrom . import views\n\n# new in Django 1.9: this defines a namespace for URLs; there's no need for\n# `namespace='api'` in the include()\napp_name = 'api'\n\n# routers generate URLs for methods like `.list` or `.retrieve`\nrouter = routers.SimpleRouter()\nrouter.register('reports', views.ReportsViewSet, base_name='reports')\nrouter.register('persons', views.PersonViewSet)\nawards_router = routers.NestedSimpleRouter(router, 'persons', lookup='person')\nawards_router.register('awards', views.AwardViewSet, base_name='person-awards')\nperson_task_router = routers.NestedSimpleRouter(router, 'persons',\n lookup='person')\nperson_task_router.register('tasks', views.PersonTaskViewSet,\n base_name='person-tasks')\nrouter.register('events', views.EventViewSet)\ntasks_router = routers.NestedSimpleRouter(router, 'events', lookup='event')\ntasks_router.register('tasks', views.TaskViewSet, base_name='event-tasks')\ntodos_router = routers.NestedSimpleRouter(router, 'events', lookup='event')\ntodos_router.register('todos', views.TodoViewSet, base_name='event-todos')\nrouter.register('hosts', views.HostViewSet)\nrouter.register('airports', views.AirportViewSet)\n\nurlpatterns = [\n url('^$', views.ApiRoot.as_view(), name='root'),\n # TODO: turn these export views into ViewSets and add them to the router\n url('^export/badges/$',\n views.ExportBadgesView.as_view(),\n name='export-badges'),\n url('^export/instructors/$',\n views.ExportInstructorLocationsView.as_view(),\n name='export-instructors'),\n url('^export/members/$',\n views.ExportMembersView.as_view(),\n name='export-members'),\n url('^events/published/$',\n views.PublishedEvents.as_view(),\n name='events-published'),\n url('^todos/user/$',\n views.UserTodoItems.as_view(),\n name='user-todos'),\n\n url('^', include(router.urls)),\n url('^', include(awards_router.urls)),\n url('^', include(person_task_router.urls)),\n url('^', include(tasks_router.urls)),\n url('^', include(todos_router.urls)),\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns) # allow to specify format\n", "path": "api/urls.py" } ]
diff --git a/api/urls.py b/api/urls.py index 4eecbc2b7..ad8aa1df3 100644 --- a/api/urls.py +++ b/api/urls.py @@ -52,10 +52,4 @@ url('^', include(todos_router.urls)), ] -# for login-logout functionality -urlpatterns += [ - url(r'^api-auth/', - include('rest_framework.urls', namespace='rest_framework')), -] - urlpatterns = format_suffix_patterns(urlpatterns) # allow to specify format
hpcaitech__ColossalAI-5442
[tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests
[ { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nContinual Pre-training/Supervised fine-tuning of Colossal-LLaMA-2 developed by Colossal-AI Team\n\"\"\"\n\nimport argparse\nimport json\nimport os\nimport resource\nfrom contextlib import nullcontext\n\nimport torch\nimport torch.distributed as dist\nfrom colossal_llama2.dataset.loader import (\n DataCollatorForSupervisedDataset,\n StatefulDistributedSampler,\n load_tokenized_dataset,\n)\nfrom colossal_llama2.utils.ckpt_io import load_checkpoint, save_checkpoint\nfrom colossal_llama2.utils.flash_attention_patch import replace_with_flash_attention\nfrom colossal_llama2.utils.froze import freeze_non_embeds_parameters\nfrom colossal_llama2.utils.neftune_patch import activate_neftune, deactivate_neftune\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm\nfrom transformers import LlamaForCausalLM, LlamaTokenizer\n\nimport colossalai\nfrom colossalai.accelerator import get_accelerator\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import GeminiPlugin, HybridParallelPlugin, LowLevelZeroPlugin\nfrom colossalai.cluster import DistCoordinator\nfrom colossalai.lazy import LazyInitContext\nfrom colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR\nfrom colossalai.nn.optimizer import HybridAdam\nfrom colossalai.utils import get_current_device\n\n\ndef get_model_numel(model: torch.nn.Module) -> int:\n return sum(p.numel() for p in model.parameters())\n\n\ndef format_numel_str(numel: int) -> str:\n B = 1024**3\n M = 1024**2\n K = 1024\n if numel >= B:\n return f\"{numel / B:.2f} B\"\n elif numel >= M:\n return f\"{numel / M:.2f} M\"\n elif numel >= K:\n return f\"{numel / K:.2f} K\"\n else:\n return f\"{numel}\"\n\n\ndef all_reduce_mean(tensor: torch.Tensor) -> torch.Tensor:\n dist.all_reduce(tensor=tensor, op=dist.ReduceOp.SUM)\n tensor.div_(dist.get_world_size())\n return tensor\n\n\ndef main() -> None:\n # ==============================\n # Parse Arguments\n # ==============================\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--pretrained\",\n type=str,\n default=None,\n help=\"Address of the pre-trained modeling\",\n )\n parser.add_argument(\"--dataset\", nargs=\"+\", default=[])\n parser.add_argument(\n \"--plugin\",\n type=str,\n default=\"gemini\",\n choices=[\"gemini\", \"gemini_auto\", \"zero2\", \"zero2_cpu\", \"3d\"],\n help=\"Choose which plugin to use\",\n )\n parser.add_argument(\"--load_checkpoint\", type=str, default=None, help=\"Load checkpoint\")\n parser.add_argument(\"--save_interval\", type=int, default=1000, help=\"Save interval\")\n parser.add_argument(\"--save_dir\", type=str, default=\"checkpoint_dir\", help=\"Checkpoint directory\")\n parser.add_argument(\"--tensorboard_dir\", type=str, default=\"logs_dir\", help=\"Tensorboard directory\")\n parser.add_argument(\"--config_file\", type=str, default=\"config_file\", help=\"Config file\")\n parser.add_argument(\"--num_epochs\", type=int, default=1, help=\"Number of training epochs\")\n parser.add_argument(\"--accumulation_steps\", type=int, default=1, help=\"Number of accumulation steps\")\n parser.add_argument(\"--micro_batch_size\", type=int, default=2, help=\"Batch size of each process\")\n parser.add_argument(\"--lr\", type=float, default=3e-4, help=\"Learning rate\")\n parser.add_argument(\"--max_length\", type=int, default=4096, help=\"Model max length\")\n parser.add_argument(\n \"--mixed_precision\",\n type=str,\n default=\"fp16\",\n choices=[\"fp16\", \"bf16\"],\n help=\"Mixed precision\",\n )\n parser.add_argument(\"--grad_clip\", type=float, default=1.0, help=\"Gradient clipping value\")\n parser.add_argument(\"--weight_decay\", type=float, default=0.1, help=\"Weight decay\")\n parser.add_argument(\"--warmup_steps\", type=int, default=None, help=\"Warmup steps\")\n parser.add_argument(\n \"--use_grad_checkpoint\",\n action=\"store_true\",\n default=False,\n help=\"Use gradient checkpointing\",\n )\n parser.add_argument(\n \"--use_flash_attn\",\n action=\"store_true\",\n default=False,\n help=\"Use flash-attention\",\n )\n parser.add_argument(\n \"--use_neft\",\n action=\"store_true\",\n default=False,\n help=\"Use NEFTune\",\n )\n parser.add_argument(\n \"--freeze_non_embeds_params\",\n action=\"store_true\",\n default=False,\n help=\"Freeze non embeddings parameters\",\n )\n parser.add_argument(\"--tp\", type=int, default=1)\n parser.add_argument(\"--zero\", type=int, default=1)\n parser.add_argument(\"--pad_token\", choices=[\"eos\", \"unk\"], default=\"eos\")\n parser.add_argument(\"--padding_mode\", choices=[\"max_length\", \"longest\"], default=\"max_length\")\n args = parser.parse_args()\n\n with open(args.config_file, \"w\") as f:\n json.dump(args.__dict__, f, indent=4)\n\n # ==============================\n # Initialize Distributed Training\n # ==============================\n colossalai.launch_from_torch({})\n accelerator = get_accelerator()\n coordinator = DistCoordinator()\n\n # ==============================\n # Initialize Tensorboard\n # ==============================\n if coordinator.is_master():\n os.makedirs(args.tensorboard_dir, exist_ok=True)\n writer = SummaryWriter(args.tensorboard_dir)\n\n # ==============================\n # Initialize Booster\n # ==============================\n if args.plugin == \"gemini\":\n plugin = GeminiPlugin(\n precision=args.mixed_precision,\n initial_scale=2**16,\n max_norm=args.grad_clip,\n enable_gradient_accumulation=(args.accumulation_steps > 1),\n )\n elif args.plugin == \"gemini_auto\":\n plugin = GeminiPlugin(\n precision=args.mixed_precision,\n placement_policy=\"auto\",\n initial_scale=2**16,\n max_norm=args.grad_clip,\n enable_gradient_accumulation=(args.accumulation_steps > 1),\n )\n elif args.plugin == \"zero2\":\n plugin = LowLevelZeroPlugin(\n stage=2,\n precision=args.mixed_precision,\n initial_scale=2**16,\n max_norm=args.grad_clip,\n )\n elif args.plugin == \"zero2_cpu\":\n plugin = LowLevelZeroPlugin(\n stage=2,\n precision=args.mixed_precision,\n initial_scale=2**16,\n cpu_offload=True,\n max_norm=args.grad_clip,\n )\n elif args.plugin == \"3d\":\n plugin = HybridParallelPlugin(\n tp_size=args.tp,\n pp_size=1,\n zero_stage=args.zero,\n max_norm=args.grad_clip,\n precision=args.mixed_precision,\n )\n else:\n raise ValueError(f\"Unknown plugin {args.plugin}\")\n\n booster = Booster(plugin=plugin)\n\n # ======================================================\n # Initialize Tokenizer, Dataset, Collator and Dataloader\n # ======================================================\n tokenizer = LlamaTokenizer.from_pretrained(args.pretrained)\n if args.pad_token == \"eos\":\n tokenizer.pad_token = tokenizer.eos_token\n elif args.pad_token == \"unk\":\n tokenizer.pad_token = tokenizer.unk_token\n tokenizer.add_bos_token = False\n tokenizer.add_eos_token = False\n\n coordinator.print_on_master(f\"Configuration file will be saved at: {args.config_file}\")\n coordinator.print_on_master(f\"Tensorboard logs will be saved at: {args.tensorboard_dir}\")\n coordinator.print_on_master(f\"Model checkpoint will be saved at: {args.save_dir}\")\n\n coordinator.print_on_master(f\"Load dataset: {args.dataset}\")\n\n dataset = load_tokenized_dataset(dataset_paths=args.dataset, mode=\"train\")\n data_collator = DataCollatorForSupervisedDataset(\n tokenizer=tokenizer, max_length=args.max_length, padding=args.padding_mode\n )\n dataloader = plugin.prepare_dataloader(\n dataset=dataset,\n batch_size=args.micro_batch_size,\n shuffle=True,\n drop_last=True,\n collate_fn=data_collator,\n distributed_sampler_cls=StatefulDistributedSampler,\n )\n coordinator.print_on_master(\n f\"Max device memory after data loader: {accelerator.max_memory_allocated() / 1024 ** 2:.2f} MB\"\n )\n\n # ======================================================\n # Initialize Model, Objective, Optimizer and LR Scheduler\n # ======================================================\n init_ctx = (\n LazyInitContext(default_device=get_current_device())\n if isinstance(plugin, (GeminiPlugin, HybridParallelPlugin))\n else nullcontext()\n )\n with init_ctx:\n model = LlamaForCausalLM.from_pretrained(args.pretrained)\n # Freeze part of parameters.\n if args.freeze_non_embeds_params:\n freeze_non_embeds_parameters(model=model)\n # this is essential, otherwise the grad checkpoint will not work.\n model.train()\n\n if args.use_grad_checkpoint:\n model.gradient_checkpointing_enable()\n coordinator.print_on_master(msg=\"Gradient checkpointing enabled successfully\")\n if args.use_flash_attn:\n replace_with_flash_attention(model=model)\n coordinator.print_on_master(msg=\"Flash-attention enabled successfully\")\n\n model_numel = get_model_numel(model)\n coordinator.print_on_master(f\"Model params: {format_numel_str(model_numel)}\")\n\n optimizer = HybridAdam(\n model_params=filter(lambda p: p.requires_grad, model.parameters())\n if args.freeze_non_embeds_params\n else model.parameters(),\n lr=args.lr,\n betas=(0.9, 0.95),\n weight_decay=args.weight_decay,\n adamw_mode=True,\n )\n\n if args.warmup_steps is None:\n args.warmup_steps = int(args.num_epochs * 0.025 * (len(dataloader) // args.accumulation_steps))\n coordinator.print_on_master(f\"Warmup steps is set to {args.warmup_steps}\")\n\n lr_scheduler = CosineAnnealingWarmupLR(\n optimizer=optimizer,\n total_steps=args.num_epochs * (len(dataloader) // args.accumulation_steps),\n warmup_steps=args.warmup_steps,\n eta_min=0.1 * args.lr,\n )\n\n # Flash attention will be disabled because it does NOT support fp32.\n default_dtype = torch.float16 if args.mixed_precision == \"fp16\" else torch.bfloat16\n torch.set_default_dtype(default_dtype)\n model, optimizer, _, dataloader, lr_scheduler = booster.boost(\n model=model,\n optimizer=optimizer,\n lr_scheduler=lr_scheduler,\n dataloader=dataloader,\n )\n\n torch.set_default_dtype(torch.float)\n\n coordinator.print_on_master(\n f\"Booster init max device memory: {accelerator.max_memory_allocated() / 1024 ** 2:.2f} MB\"\n )\n coordinator.print_on_master(\n f\"Booster init max CPU memory: {resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024:.2f} MB\"\n )\n\n start_epoch = 0\n start_step = 0\n sampler_start_idx = 0\n if args.load_checkpoint is not None:\n if \"modeling\" in args.load_checkpoint:\n coordinator.print_on_master(f\"Continued pretrain from checkpoint {args.load_checkpoint}\")\n booster.load_model(model, args.load_checkpoint)\n else:\n coordinator.print_on_master(f\"Load model checkpoint from {args.load_checkpoint}\")\n start_epoch, start_step, sampler_start_idx = load_checkpoint(\n load_dir=args.load_checkpoint,\n booster=booster,\n model=model,\n optimizer=optimizer,\n lr_scheduler=lr_scheduler,\n )\n coordinator.print_on_master(\n f\"Loaded checkpoint {args.load_checkpoint} at epoch {start_epoch} step {start_step}\"\n )\n coordinator.print_on_master(f\"Loaded sample at index {sampler_start_idx}\")\n\n coordinator.print_on_master(\n f\"Checkpoint loaded max device memory: {accelerator.max_memory_allocated() / 1024 ** 2:.2f} MB\"\n )\n coordinator.print_on_master(\n f\"Checkpoint loaded device memory: {accelerator.memory_allocated() / 1024 ** 2:.2f} MB\"\n )\n coordinator.print_on_master(\n f\"Checkpoint loaded max CPU memory: {resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024:.2f} MB\"\n )\n\n if args.use_neft:\n coordinator.print_on_master(\"Activate NEFTune.\")\n model, handle = activate_neftune(model)\n\n num_steps_per_epoch = len(dataloader) // args.accumulation_steps\n # If resume training, set the sampler start index to the correct value\n assert isinstance(dataloader.sampler, StatefulDistributedSampler)\n dataloader.sampler.set_start_index(start_index=sampler_start_idx)\n\n for epoch in range(start_epoch, args.num_epochs):\n dataloader.sampler.set_epoch(epoch=epoch)\n pbar = tqdm(\n desc=f\"Epoch {epoch}\",\n disable=not coordinator.is_master(),\n total=num_steps_per_epoch,\n initial=start_step // args.accumulation_steps,\n )\n total_loss = torch.tensor(0.0, device=get_current_device())\n for step, batch in enumerate(dataloader, start=start_step):\n batch = {k: v.to(get_current_device()) for k, v in batch.items() if isinstance(v, torch.Tensor)}\n\n batch_output = model(**batch)\n\n loss = batch_output.loss / args.accumulation_steps\n total_loss.add_(loss.data)\n\n booster.backward(loss=loss, optimizer=optimizer)\n\n if (step + 1) % args.accumulation_steps == 0:\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n\n all_reduce_mean(tensor=total_loss)\n pbar.set_postfix({\"Loss\": f\"{total_loss.item():.4f}\"})\n if coordinator.is_master():\n global_step = (epoch * num_steps_per_epoch) + (step + 1) // args.accumulation_steps\n writer.add_scalar(tag=\"Loss\", scalar_value=total_loss.item(), global_step=global_step)\n writer.add_scalar(\n tag=\"Learning Rate\",\n scalar_value=lr_scheduler.get_last_lr()[0],\n global_step=global_step,\n )\n total_loss.fill_(0.0)\n pbar.update()\n # Save modeling.\n\n if (args.save_interval > 0 and (step + 1) % (args.save_interval * args.accumulation_steps) == 0) or (\n step + 1\n ) == len(dataloader):\n coordinator.print_on_master(\"\\nStart saving model checkpoint with running states\")\n\n if args.use_neft:\n coordinator.print_on_master(\"Deactivate NEFTune before saving model.\")\n deactivate_neftune(model, handle)\n\n accelerator.empty_cache()\n save_checkpoint(\n save_dir=args.save_dir,\n booster=booster,\n model=model,\n optimizer=optimizer,\n lr_scheduler=lr_scheduler,\n epoch=epoch,\n step=step + 1,\n batch_size=args.micro_batch_size,\n coordinator=coordinator,\n )\n coordinator.print_on_master(\n f\"Saved checkpoint at epoch {epoch} step {step + 1} at folder {args.save_dir}\"\n )\n\n if args.use_neft:\n coordinator.print_on_master(\"Activate NEFTune.\")\n model, handle = activate_neftune(model)\n\n # Delete cache.\n # del batch, batch_labels, batch_output, loss\n accelerator.empty_cache()\n\n # the continue epochs are not resumed, so we need to reset the sampler start index and start step\n dataloader.sampler.set_start_index(start_index=0)\n start_step = 0\n\n if args.use_neft:\n coordinator.print_on_master(\"Deactivate NEFTune.\")\n deactivate_neftune(model, handle)\n\n # Final save.\n coordinator.print_on_master(\"Start saving final model checkpoint\")\n booster.save_model(model, os.path.join(args.save_dir, \"modeling\"), shard=True)\n coordinator.print_on_master(f\"Saved final model checkpoint at epoch {epoch} at folder {args.save_dir}\")\n\n coordinator.print_on_master(f\"Max device memory usage: {accelerator.max_memory_allocated()/1024**2:.2f} MB\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "applications/Colossal-LLaMA-2/train.py" } ]
[ { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nContinual Pre-training/Supervised fine-tuning of Colossal-LLaMA-2 developed by Colossal-AI Team\n\"\"\"\n\nimport argparse\nimport json\nimport os\nimport resource\nfrom contextlib import nullcontext\n\nimport torch\nimport torch.distributed as dist\nfrom colossal_llama2.dataset.loader import (\n DataCollatorForSupervisedDataset,\n StatefulDistributedSampler,\n load_tokenized_dataset,\n)\nfrom colossal_llama2.utils.ckpt_io import load_checkpoint, save_checkpoint\nfrom colossal_llama2.utils.flash_attention_patch import replace_with_flash_attention\nfrom colossal_llama2.utils.froze import freeze_non_embeds_parameters\nfrom colossal_llama2.utils.neftune_patch import activate_neftune, deactivate_neftune\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm\nfrom transformers import LlamaForCausalLM, LlamaTokenizer\n\nimport colossalai\nfrom colossalai.accelerator import get_accelerator\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import GeminiPlugin, HybridParallelPlugin, LowLevelZeroPlugin\nfrom colossalai.cluster import DistCoordinator\nfrom colossalai.lazy import LazyInitContext\nfrom colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR\nfrom colossalai.nn.optimizer import HybridAdam\nfrom colossalai.utils import get_current_device\n\n\ndef get_model_numel(model: torch.nn.Module) -> int:\n return sum(p.numel() for p in model.parameters())\n\n\ndef format_numel_str(numel: int) -> str:\n B = 1024**3\n M = 1024**2\n K = 1024\n if numel >= B:\n return f\"{numel / B:.2f} B\"\n elif numel >= M:\n return f\"{numel / M:.2f} M\"\n elif numel >= K:\n return f\"{numel / K:.2f} K\"\n else:\n return f\"{numel}\"\n\n\ndef all_reduce_mean(tensor: torch.Tensor) -> torch.Tensor:\n dist.all_reduce(tensor=tensor, op=dist.ReduceOp.SUM)\n tensor = tensor.data\n tensor.div_(dist.get_world_size())\n return tensor\n\n\ndef main() -> None:\n # ==============================\n # Parse Arguments\n # ==============================\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--pretrained\",\n type=str,\n default=None,\n help=\"Address of the pre-trained modeling\",\n )\n parser.add_argument(\"--dataset\", nargs=\"+\", default=[])\n parser.add_argument(\n \"--plugin\",\n type=str,\n default=\"gemini\",\n choices=[\"gemini\", \"gemini_auto\", \"zero2\", \"zero2_cpu\", \"3d\"],\n help=\"Choose which plugin to use\",\n )\n parser.add_argument(\"--load_checkpoint\", type=str, default=None, help=\"Load checkpoint\")\n parser.add_argument(\"--save_interval\", type=int, default=1000, help=\"Save interval\")\n parser.add_argument(\"--save_dir\", type=str, default=\"checkpoint_dir\", help=\"Checkpoint directory\")\n parser.add_argument(\"--tensorboard_dir\", type=str, default=\"logs_dir\", help=\"Tensorboard directory\")\n parser.add_argument(\"--config_file\", type=str, default=\"config_file\", help=\"Config file\")\n parser.add_argument(\"--num_epochs\", type=int, default=1, help=\"Number of training epochs\")\n parser.add_argument(\"--accumulation_steps\", type=int, default=1, help=\"Number of accumulation steps\")\n parser.add_argument(\"--micro_batch_size\", type=int, default=2, help=\"Batch size of each process\")\n parser.add_argument(\"--lr\", type=float, default=3e-4, help=\"Learning rate\")\n parser.add_argument(\"--max_length\", type=int, default=4096, help=\"Model max length\")\n parser.add_argument(\n \"--mixed_precision\",\n type=str,\n default=\"fp16\",\n choices=[\"fp16\", \"bf16\"],\n help=\"Mixed precision\",\n )\n parser.add_argument(\"--grad_clip\", type=float, default=1.0, help=\"Gradient clipping value\")\n parser.add_argument(\"--weight_decay\", type=float, default=0.1, help=\"Weight decay\")\n parser.add_argument(\"--warmup_steps\", type=int, default=None, help=\"Warmup steps\")\n parser.add_argument(\n \"--use_grad_checkpoint\",\n action=\"store_true\",\n default=False,\n help=\"Use gradient checkpointing\",\n )\n parser.add_argument(\n \"--use_flash_attn\",\n action=\"store_true\",\n default=False,\n help=\"Use flash-attention\",\n )\n parser.add_argument(\n \"--use_neft\",\n action=\"store_true\",\n default=False,\n help=\"Use NEFTune\",\n )\n parser.add_argument(\n \"--freeze_non_embeds_params\",\n action=\"store_true\",\n default=False,\n help=\"Freeze non embeddings parameters\",\n )\n parser.add_argument(\"--tp\", type=int, default=1)\n parser.add_argument(\"--zero\", type=int, default=1)\n parser.add_argument(\"--pad_token\", choices=[\"eos\", \"unk\"], default=\"eos\")\n parser.add_argument(\"--padding_mode\", choices=[\"max_length\", \"longest\"], default=\"max_length\")\n args = parser.parse_args()\n\n with open(args.config_file, \"w\") as f:\n json.dump(args.__dict__, f, indent=4)\n\n # ==============================\n # Initialize Distributed Training\n # ==============================\n colossalai.launch_from_torch({})\n accelerator = get_accelerator()\n coordinator = DistCoordinator()\n\n # ==============================\n # Initialize Tensorboard\n # ==============================\n if coordinator.is_master():\n os.makedirs(args.tensorboard_dir, exist_ok=True)\n writer = SummaryWriter(args.tensorboard_dir)\n\n # ==============================\n # Initialize Booster\n # ==============================\n if args.plugin == \"gemini\":\n plugin = GeminiPlugin(\n precision=args.mixed_precision,\n initial_scale=2**16,\n max_norm=args.grad_clip,\n enable_gradient_accumulation=(args.accumulation_steps > 1),\n )\n elif args.plugin == \"gemini_auto\":\n plugin = GeminiPlugin(\n precision=args.mixed_precision,\n placement_policy=\"auto\",\n initial_scale=2**16,\n max_norm=args.grad_clip,\n enable_gradient_accumulation=(args.accumulation_steps > 1),\n )\n elif args.plugin == \"zero2\":\n plugin = LowLevelZeroPlugin(\n stage=2,\n precision=args.mixed_precision,\n initial_scale=2**16,\n max_norm=args.grad_clip,\n )\n elif args.plugin == \"zero2_cpu\":\n plugin = LowLevelZeroPlugin(\n stage=2,\n precision=args.mixed_precision,\n initial_scale=2**16,\n cpu_offload=True,\n max_norm=args.grad_clip,\n )\n elif args.plugin == \"3d\":\n plugin = HybridParallelPlugin(\n tp_size=args.tp,\n pp_size=1,\n zero_stage=args.zero,\n max_norm=args.grad_clip,\n precision=args.mixed_precision,\n )\n else:\n raise ValueError(f\"Unknown plugin {args.plugin}\")\n\n booster = Booster(plugin=plugin)\n\n # ======================================================\n # Initialize Tokenizer, Dataset, Collator and Dataloader\n # ======================================================\n tokenizer = LlamaTokenizer.from_pretrained(args.pretrained)\n if args.pad_token == \"eos\":\n tokenizer.pad_token = tokenizer.eos_token\n elif args.pad_token == \"unk\":\n tokenizer.pad_token = tokenizer.unk_token\n tokenizer.add_bos_token = False\n tokenizer.add_eos_token = False\n\n coordinator.print_on_master(f\"Configuration file will be saved at: {args.config_file}\")\n coordinator.print_on_master(f\"Tensorboard logs will be saved at: {args.tensorboard_dir}\")\n coordinator.print_on_master(f\"Model checkpoint will be saved at: {args.save_dir}\")\n\n coordinator.print_on_master(f\"Load dataset: {args.dataset}\")\n\n dataset = load_tokenized_dataset(dataset_paths=args.dataset, mode=\"train\")\n data_collator = DataCollatorForSupervisedDataset(\n tokenizer=tokenizer, max_length=args.max_length, padding=args.padding_mode\n )\n dataloader = plugin.prepare_dataloader(\n dataset=dataset,\n batch_size=args.micro_batch_size,\n shuffle=True,\n drop_last=True,\n collate_fn=data_collator,\n distributed_sampler_cls=StatefulDistributedSampler,\n )\n coordinator.print_on_master(\n f\"Max device memory after data loader: {accelerator.max_memory_allocated() / 1024 ** 2:.2f} MB\"\n )\n\n # ======================================================\n # Initialize Model, Objective, Optimizer and LR Scheduler\n # ======================================================\n init_ctx = (\n LazyInitContext(default_device=get_current_device())\n if isinstance(plugin, (GeminiPlugin, HybridParallelPlugin))\n else nullcontext()\n )\n with init_ctx:\n model = LlamaForCausalLM.from_pretrained(args.pretrained)\n # Freeze part of parameters.\n if args.freeze_non_embeds_params:\n freeze_non_embeds_parameters(model=model)\n # this is essential, otherwise the grad checkpoint will not work.\n model.train()\n\n if args.use_grad_checkpoint:\n model.gradient_checkpointing_enable()\n coordinator.print_on_master(msg=\"Gradient checkpointing enabled successfully\")\n if args.use_flash_attn:\n replace_with_flash_attention(model=model)\n coordinator.print_on_master(msg=\"Flash-attention enabled successfully\")\n\n model_numel = get_model_numel(model)\n coordinator.print_on_master(f\"Model params: {format_numel_str(model_numel)}\")\n\n optimizer = HybridAdam(\n model_params=filter(lambda p: p.requires_grad, model.parameters())\n if args.freeze_non_embeds_params\n else model.parameters(),\n lr=args.lr,\n betas=(0.9, 0.95),\n weight_decay=args.weight_decay,\n adamw_mode=True,\n )\n\n if args.warmup_steps is None:\n args.warmup_steps = int(args.num_epochs * 0.025 * (len(dataloader) // args.accumulation_steps))\n coordinator.print_on_master(f\"Warmup steps is set to {args.warmup_steps}\")\n\n lr_scheduler = CosineAnnealingWarmupLR(\n optimizer=optimizer,\n total_steps=args.num_epochs * (len(dataloader) // args.accumulation_steps),\n warmup_steps=args.warmup_steps,\n eta_min=0.1 * args.lr,\n )\n\n # Flash attention will be disabled because it does NOT support fp32.\n default_dtype = torch.float16 if args.mixed_precision == \"fp16\" else torch.bfloat16\n torch.set_default_dtype(default_dtype)\n model, optimizer, _, dataloader, lr_scheduler = booster.boost(\n model=model,\n optimizer=optimizer,\n lr_scheduler=lr_scheduler,\n dataloader=dataloader,\n )\n\n torch.set_default_dtype(torch.float)\n\n coordinator.print_on_master(\n f\"Booster init max device memory: {accelerator.max_memory_allocated() / 1024 ** 2:.2f} MB\"\n )\n coordinator.print_on_master(\n f\"Booster init max CPU memory: {resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024:.2f} MB\"\n )\n\n start_epoch = 0\n start_step = 0\n sampler_start_idx = 0\n if args.load_checkpoint is not None:\n if \"modeling\" in args.load_checkpoint:\n coordinator.print_on_master(f\"Continued pretrain from checkpoint {args.load_checkpoint}\")\n booster.load_model(model, args.load_checkpoint)\n else:\n coordinator.print_on_master(f\"Load model checkpoint from {args.load_checkpoint}\")\n start_epoch, start_step, sampler_start_idx = load_checkpoint(\n load_dir=args.load_checkpoint,\n booster=booster,\n model=model,\n optimizer=optimizer,\n lr_scheduler=lr_scheduler,\n )\n coordinator.print_on_master(\n f\"Loaded checkpoint {args.load_checkpoint} at epoch {start_epoch} step {start_step}\"\n )\n coordinator.print_on_master(f\"Loaded sample at index {sampler_start_idx}\")\n\n coordinator.print_on_master(\n f\"Checkpoint loaded max device memory: {accelerator.max_memory_allocated() / 1024 ** 2:.2f} MB\"\n )\n coordinator.print_on_master(\n f\"Checkpoint loaded device memory: {accelerator.memory_allocated() / 1024 ** 2:.2f} MB\"\n )\n coordinator.print_on_master(\n f\"Checkpoint loaded max CPU memory: {resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024:.2f} MB\"\n )\n\n if args.use_neft:\n coordinator.print_on_master(\"Activate NEFTune.\")\n model, handle = activate_neftune(model)\n\n num_steps_per_epoch = len(dataloader) // args.accumulation_steps\n # If resume training, set the sampler start index to the correct value\n assert isinstance(dataloader.sampler, StatefulDistributedSampler)\n dataloader.sampler.set_start_index(start_index=sampler_start_idx)\n\n for epoch in range(start_epoch, args.num_epochs):\n dataloader.sampler.set_epoch(epoch=epoch)\n pbar = tqdm(\n desc=f\"Epoch {epoch}\",\n disable=not coordinator.is_master(),\n total=num_steps_per_epoch,\n initial=start_step // args.accumulation_steps,\n )\n total_loss = torch.tensor(0.0, device=get_current_device())\n for step, batch in enumerate(dataloader, start=start_step):\n batch = {k: v.to(get_current_device()) for k, v in batch.items() if isinstance(v, torch.Tensor)}\n\n batch_output = model(**batch)\n\n loss = batch_output.loss / args.accumulation_steps\n total_loss.add_(loss.data)\n\n booster.backward(loss=loss, optimizer=optimizer)\n\n if (step + 1) % args.accumulation_steps == 0:\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n\n all_reduce_mean(tensor=total_loss)\n pbar.set_postfix({\"Loss\": f\"{total_loss.item():.4f}\"})\n if coordinator.is_master():\n global_step = (epoch * num_steps_per_epoch) + (step + 1) // args.accumulation_steps\n writer.add_scalar(tag=\"Loss\", scalar_value=total_loss.item(), global_step=global_step)\n writer.add_scalar(\n tag=\"Learning Rate\",\n scalar_value=lr_scheduler.get_last_lr()[0],\n global_step=global_step,\n )\n total_loss.fill_(0.0)\n pbar.update()\n # Save modeling.\n\n if (args.save_interval > 0 and (step + 1) % (args.save_interval * args.accumulation_steps) == 0) or (\n step + 1\n ) == len(dataloader):\n coordinator.print_on_master(\"\\nStart saving model checkpoint with running states\")\n\n if args.use_neft:\n coordinator.print_on_master(\"Deactivate NEFTune before saving model.\")\n deactivate_neftune(model, handle)\n\n accelerator.empty_cache()\n save_checkpoint(\n save_dir=args.save_dir,\n booster=booster,\n model=model,\n optimizer=optimizer,\n lr_scheduler=lr_scheduler,\n epoch=epoch,\n step=step + 1,\n batch_size=args.micro_batch_size,\n coordinator=coordinator,\n )\n coordinator.print_on_master(\n f\"Saved checkpoint at epoch {epoch} step {step + 1} at folder {args.save_dir}\"\n )\n\n if args.use_neft:\n coordinator.print_on_master(\"Activate NEFTune.\")\n model, handle = activate_neftune(model)\n\n # Delete cache.\n # del batch, batch_labels, batch_output, loss\n accelerator.empty_cache()\n\n # the continue epochs are not resumed, so we need to reset the sampler start index and start step\n dataloader.sampler.set_start_index(start_index=0)\n start_step = 0\n\n if args.use_neft:\n coordinator.print_on_master(\"Deactivate NEFTune.\")\n deactivate_neftune(model, handle)\n\n # Final save.\n coordinator.print_on_master(\"Start saving final model checkpoint\")\n booster.save_model(model, os.path.join(args.save_dir, \"modeling\"), shard=True)\n coordinator.print_on_master(f\"Saved final model checkpoint at epoch {epoch} at folder {args.save_dir}\")\n\n coordinator.print_on_master(f\"Max device memory usage: {accelerator.max_memory_allocated()/1024**2:.2f} MB\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "applications/Colossal-LLaMA-2/train.py" } ]
diff --git a/applications/Colossal-LLaMA-2/train.py b/applications/Colossal-LLaMA-2/train.py index 2e4bab75a085..d97da61e4dc8 100644 --- a/applications/Colossal-LLaMA-2/train.py +++ b/applications/Colossal-LLaMA-2/train.py @@ -56,6 +56,7 @@ def format_numel_str(numel: int) -> str: def all_reduce_mean(tensor: torch.Tensor) -> torch.Tensor: dist.all_reduce(tensor=tensor, op=dist.ReduceOp.SUM) + tensor = tensor.data tensor.div_(dist.get_world_size()) return tensor
Mailu__Mailu-2513
Include start and end dates in the auto-reply period <!-- Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests. For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net). To be able to help you best, we need some more information. Before you open your issue - Check if no issue or pull-request for this already exists. - Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page) - You understand `Mailu` is made by volunteers in their **free time** — be concise, civil and accept that delays can occur. - The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title. Please put your text outside of the comment blocks to be visible. You can use the button "Preview" above to check. --> ## Environment & Version ### Environment - [X] docker-compose - [ ] kubernetes - [ ] docker swarm ### Version - Version: `1.9` ## Description The administration interface provides an [auto-reply](https://mailu.io/master/webadministration.html#auto-reply) page where automatic replies can be configured with start and end dates. Unfortunately both the start date and the end date are not included in the auto-reply period (i.e. no auto replies are being sent on these two days). To work around this issue you have to insert the day before your vacation as start date and the day after your vacation as end date. This is not intuitive. ## Replication Steps Activate the auto-reply feature, insert subject and body text and the current date as "start of vacation" ("end of vacation" has to be a date in the future). Then send an email from another email account (external) to your email address hosted on Mailu. ## Observed behaviour No auto reply message received by the sender. ## Expected behaviour Auto reply message received by the sender. To verify this behaviour you can add yesterday's date as "start of vacation" date and send another email to your Mailu account ... the sender will receive an auto reply message ... The same applies to the "end of vacation" date. ## Logs n/a
[ { "content": "\"\"\" Mailu config storage model\n\"\"\"\n\nimport os\nimport smtplib\nimport json\n\nfrom datetime import date\nfrom email.mime import text\nfrom itertools import chain\n\nimport flask_sqlalchemy\nimport sqlalchemy\nimport passlib.context\nimport passlib.hash\nimport passlib.registry\nimport time\nimport os\nimport smtplib\nimport idna\nimport dns.resolver\nimport dns.exception\n\nfrom flask import current_app as app\nfrom sqlalchemy.ext import declarative\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.inspection import inspect\nfrom sqlalchemy.orm.attributes import flag_modified\nfrom werkzeug.utils import cached_property\n\nfrom mailu import dkim, utils\n\n\ndb = flask_sqlalchemy.SQLAlchemy()\n\n\nclass IdnaDomain(db.TypeDecorator):\n \"\"\" Stores a Unicode string in it's IDNA representation (ASCII only)\n \"\"\"\n\n impl = db.String(80)\n cache_ok = True\n python_type = str\n\n def process_bind_param(self, value, dialect):\n \"\"\" encode unicode domain name to punycode \"\"\"\n return idna.encode(value.lower()).decode('ascii')\n\n def process_result_value(self, value, dialect):\n \"\"\" decode punycode domain name to unicode \"\"\"\n return idna.decode(value)\n\nclass IdnaEmail(db.TypeDecorator):\n \"\"\" Stores a Unicode string in it's IDNA representation (ASCII only)\n \"\"\"\n\n impl = db.String(255)\n cache_ok = True\n python_type = str\n\n def process_bind_param(self, value, dialect):\n \"\"\" encode unicode domain part of email address to punycode \"\"\"\n if not '@' in value:\n raise ValueError('invalid email address (no \"@\")')\n localpart, domain_name = value.lower().rsplit('@', 1)\n if '@' in localpart:\n raise ValueError('email local part must not contain \"@\"')\n return f'{localpart}@{idna.encode(domain_name).decode(\"ascii\")}'\n\n def process_result_value(self, value, dialect):\n \"\"\" decode punycode domain part of email to unicode \"\"\"\n localpart, domain_name = value.rsplit('@', 1)\n return f'{localpart}@{idna.decode(domain_name)}'\n\nclass CommaSeparatedList(db.TypeDecorator):\n \"\"\" Stores a list as a comma-separated string, compatible with Postfix.\n \"\"\"\n\n impl = db.String\n cache_ok = True\n python_type = list\n\n def process_bind_param(self, value, dialect):\n \"\"\" join list of items to comma separated string \"\"\"\n if not isinstance(value, (list, tuple, set)):\n raise TypeError('Must be a list of strings')\n for item in value:\n if ',' in item:\n raise ValueError('list item must not contain \",\"')\n return ','.join(sorted(set(value)))\n\n def process_result_value(self, value, dialect):\n \"\"\" split comma separated string to list \"\"\"\n return list(filter(bool, (item.strip() for item in value.split(',')))) if value else []\n\nclass JSONEncoded(db.TypeDecorator):\n \"\"\" Represents an immutable structure as a json-encoded string.\n \"\"\"\n\n impl = db.String\n cache_ok = True\n python_type = str\n\n def process_bind_param(self, value, dialect):\n \"\"\" encode data as json \"\"\"\n return json.dumps(value) if value else None\n\n def process_result_value(self, value, dialect):\n \"\"\" decode json to data \"\"\"\n return json.loads(value) if value else None\n\nclass Base(db.Model):\n \"\"\" Base class for all models\n \"\"\"\n\n __abstract__ = True\n\n metadata = sqlalchemy.schema.MetaData(\n naming_convention={\n 'fk': '%(table_name)s_%(column_0_name)s_fkey',\n 'pk': '%(table_name)s_pkey'\n }\n )\n\n created_at = db.Column(db.Date, nullable=False, default=date.today)\n updated_at = db.Column(db.Date, nullable=True, onupdate=date.today)\n comment = db.Column(db.String(255), nullable=True, default='')\n\n def __str__(self):\n pkey = self.__table__.primary_key.columns.values()[0].name\n if pkey == 'email':\n # ugly hack for email declared attr. _email is not always up2date\n return str(f'{self.localpart}@{self.domain_name}')\n return str(getattr(self, pkey))\n\n def __repr__(self):\n return f'<{self.__class__.__name__} {str(self)!r}>'\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n pkey = self.__table__.primary_key.columns.values()[0].name\n this = getattr(self, pkey, None)\n other = getattr(other, pkey, None)\n return this is not None and other is not None and str(this) == str(other)\n else:\n return NotImplemented\n\n # we need hashable instances here for sqlalchemy to update collections\n # in collections.bulk_replace, but auto-incrementing don't always have\n # a valid primary key, in this case we use the object's id\n __hashed = None\n def __hash__(self):\n if self.__hashed is None:\n primary = getattr(self, self.__table__.primary_key.columns.values()[0].name)\n self.__hashed = id(self) if primary is None else hash(primary)\n return self.__hashed\n\n def dont_change_updated_at(self):\n \"\"\" Mark updated_at as modified, but keep the old date when updating the model\"\"\"\n flag_modified(self, 'updated_at')\n\n\n# Many-to-many association table for domain managers\nmanagers = db.Table('manager', Base.metadata,\n db.Column('domain_name', IdnaDomain, db.ForeignKey('domain.name')),\n db.Column('user_email', IdnaEmail, db.ForeignKey('user.email'))\n)\n\n\nclass Config(Base):\n \"\"\" In-database configuration values\n \"\"\"\n\n name = db.Column(db.String(255), primary_key=True, nullable=False)\n value = db.Column(JSONEncoded)\n\n\ndef _save_dkim_keys(session):\n \"\"\" store DKIM keys after commit \"\"\"\n for obj in session.identity_map.values():\n if isinstance(obj, Domain):\n obj.save_dkim_key()\n\nclass Domain(Base):\n \"\"\" A DNS domain that has mail addresses associated to it.\n \"\"\"\n\n __tablename__ = 'domain'\n\n name = db.Column(IdnaDomain, primary_key=True, nullable=False)\n managers = db.relationship('User', secondary=managers,\n backref=db.backref('manager_of'), lazy='dynamic')\n max_users = db.Column(db.Integer, nullable=False, default=-1)\n max_aliases = db.Column(db.Integer, nullable=False, default=-1)\n max_quota_bytes = db.Column(db.BigInteger, nullable=False, default=0)\n signup_enabled = db.Column(db.Boolean, nullable=False, default=False)\n\n _dkim_key = None\n _dkim_key_on_disk = None\n\n def _dkim_file(self):\n \"\"\" return filename for active DKIM key \"\"\"\n return app.config['DKIM_PATH'].format(\n domain=self.name,\n selector=app.config['DKIM_SELECTOR']\n )\n\n def save_dkim_key(self):\n \"\"\" save changed DKIM key to disk \"\"\"\n if self._dkim_key != self._dkim_key_on_disk:\n file_path = self._dkim_file()\n if self._dkim_key:\n with open(file_path, 'wb') as handle:\n handle.write(self._dkim_key)\n elif os.path.exists(file_path):\n os.unlink(file_path)\n self._dkim_key_on_disk = self._dkim_key\n\n @cached_property\n def dns_mx(self):\n \"\"\" return MX record for domain \"\"\"\n hostname = app.config['HOSTNAME']\n return f'{self.name}. 600 IN MX 10 {hostname}.'\n\n @cached_property\n def dns_spf(self):\n \"\"\" return SPF record for domain \"\"\"\n hostname = app.config['HOSTNAME']\n return f'{self.name}. 600 IN TXT \"v=spf1 mx a:{hostname} ~all\"'\n\n @property\n def dns_dkim(self):\n \"\"\" return DKIM record for domain \"\"\"\n if self.dkim_key:\n selector = app.config['DKIM_SELECTOR']\n txt = f'v=DKIM1; k=rsa; p={self.dkim_publickey}'\n record = ' '.join(f'\"{txt[p:p+250]}\"' for p in range(0, len(txt), 250))\n return f'{selector}._domainkey.{self.name}. 600 IN TXT {record}'\n\n @cached_property\n def dns_dmarc(self):\n \"\"\" return DMARC record for domain \"\"\"\n if self.dkim_key:\n domain = app.config['DOMAIN']\n rua = app.config['DMARC_RUA']\n rua = f' rua=mailto:{rua}@{domain};' if rua else ''\n ruf = app.config['DMARC_RUF']\n ruf = f' ruf=mailto:{ruf}@{domain};' if ruf else ''\n return f'_dmarc.{self.name}. 600 IN TXT \"v=DMARC1; p=reject;{rua}{ruf} adkim=s; aspf=s\"'\n\n @cached_property\n def dns_dmarc_report(self):\n \"\"\" return DMARC report record for mailu server \"\"\"\n if self.dkim_key:\n domain = app.config['DOMAIN']\n return f'{self.name}._report._dmarc.{domain}. 600 IN TXT \"v=DMARC1\"'\n\n @cached_property\n def dns_autoconfig(self):\n \"\"\" return list of auto configuration records (RFC6186) \"\"\"\n hostname = app.config['HOSTNAME']\n protocols = [\n ('imap', 143, 20),\n ('pop3', 110, 20),\n ('submission', 587, 20),\n ]\n if app.config['TLS_FLAVOR'] != 'notls':\n protocols.extend([\n ('autodiscover', 443, 10),\n ('submissions', 465, 10),\n ('imaps', 993, 10),\n ('pop3s', 995, 10),\n ])\n\n return [\n f'_{proto}._tcp.{self.name}. 600 IN SRV {prio} 1 {port} {hostname}.'\n for proto, port, prio\n in protocols\n ]+[f'autoconfig.{self.name}. 600 IN CNAME {hostname}.']\n\n @cached_property\n def dns_tlsa(self):\n \"\"\" return TLSA record for domain when using letsencrypt \"\"\"\n hostname = app.config['HOSTNAME']\n if app.config['TLS_FLAVOR'] in ('letsencrypt', 'mail-letsencrypt'):\n # current ISRG Root X1 (RSA 4096, O = Internet Security Research Group, CN = ISRG Root X1) @20210902\n return f'_25._tcp.{hostname}. 86400 IN TLSA 2 1 1 0b9fa5a59eed715c26c1020c711b4f6ec42d58b0015e14337a39dad301c5afc3'\n\n @property\n def dkim_key(self):\n \"\"\" return private DKIM key \"\"\"\n if self._dkim_key is None:\n file_path = self._dkim_file()\n if os.path.exists(file_path):\n with open(file_path, 'rb') as handle:\n self._dkim_key = self._dkim_key_on_disk = handle.read()\n else:\n self._dkim_key = self._dkim_key_on_disk = b''\n return self._dkim_key if self._dkim_key else None\n\n @dkim_key.setter\n def dkim_key(self, value):\n \"\"\" set private DKIM key \"\"\"\n old_key = self.dkim_key\n self._dkim_key = value if value is not None else b''\n if self._dkim_key != old_key:\n if not sqlalchemy.event.contains(db.session, 'after_commit', _save_dkim_keys):\n sqlalchemy.event.listen(db.session, 'after_commit', _save_dkim_keys)\n\n @property\n def dkim_publickey(self):\n \"\"\" return public part of DKIM key \"\"\"\n dkim_key = self.dkim_key\n if dkim_key:\n return dkim.strip_key(dkim_key).decode('utf8')\n\n def generate_dkim_key(self):\n \"\"\" generate new DKIM key \"\"\"\n self.dkim_key = dkim.gen_key()\n\n def has_email(self, localpart):\n \"\"\" checks if localpart is configured for domain \"\"\"\n localpart = localpart.lower()\n for email in chain(self.users, self.aliases):\n if email.localpart == localpart:\n return True\n return False\n\n def check_mx(self):\n \"\"\" checks if MX record for domain points to mailu host \"\"\"\n try:\n hostnames = set(app.config['HOSTNAMES'].split(','))\n return any(\n rset.exchange.to_text().rstrip('.') in hostnames\n for rset in dns.resolver.resolve(self.name, 'MX')\n )\n except dns.exception.DNSException:\n return False\n\n\nclass Alternative(Base):\n \"\"\" Alternative name for a served domain.\n The name \"domain alias\" was avoided to prevent some confusion.\n \"\"\"\n\n __tablename__ = 'alternative'\n\n name = db.Column(IdnaDomain, primary_key=True, nullable=False)\n domain_name = db.Column(IdnaDomain, db.ForeignKey(Domain.name))\n domain = db.relationship(Domain,\n backref=db.backref('alternatives', cascade='all, delete-orphan'))\n\n\nclass Relay(Base):\n \"\"\" Relayed mail domain.\n The domain is either relayed publicly or through a specified SMTP host.\n \"\"\"\n\n __tablename__ = 'relay'\n\n name = db.Column(IdnaDomain, primary_key=True, nullable=False)\n smtp = db.Column(db.String(80), nullable=True)\n\n\nclass Email(object):\n \"\"\" Abstraction for an email address (localpart and domain).\n \"\"\"\n\n localpart = db.Column(db.String(80), nullable=False)\n\n @declarative.declared_attr\n def domain_name(cls):\n \"\"\" the domain part of the email address \"\"\"\n return db.Column(IdnaDomain, db.ForeignKey(Domain.name),\n nullable=False, default=IdnaDomain)\n\n # This field is redundant with both localpart and domain name.\n # It is however very useful for quick lookups without joining tables,\n # especially when the mail server is reading the database.\n @declarative.declared_attr\n def _email(cls):\n \"\"\" the complete email address (localpart@domain) \"\"\"\n\n def updater(ctx):\n key = f'{cls.__tablename__}_email'\n if key in ctx.current_parameters:\n return ctx.current_parameters[key]\n return '{localpart}@{domain_name}'.format_map(ctx.current_parameters)\n\n return db.Column('email', IdnaEmail, primary_key=True, nullable=False, onupdate=updater)\n\n # We need to keep email, localpart and domain_name in sync.\n # But IMHO using email as primary key was not a good idea in the first place.\n @hybrid_property\n def email(self):\n \"\"\" getter for email - gets _email \"\"\"\n return self._email\n\n @email.setter\n def email(self, value):\n \"\"\" setter for email - sets _email, localpart and domain_name at once \"\"\"\n self._email = value.lower()\n self.localpart, self.domain_name = self._email.rsplit('@', 1)\n\n @staticmethod\n def _update_localpart(target, value, *_):\n if target.domain_name:\n target._email = f'{value}@{target.domain_name}'\n\n @staticmethod\n def _update_domain_name(target, value, *_):\n if target.localpart:\n target._email = f'{target.localpart}@{value}'\n\n @classmethod\n def __declare_last__(cls):\n # gets called after mappings are completed\n sqlalchemy.event.listen(cls.localpart, 'set', cls._update_localpart, propagate=True)\n sqlalchemy.event.listen(cls.domain_name, 'set', cls._update_domain_name, propagate=True)\n\n def sendmail(self, subject, body):\n \"\"\" send an email to the address \"\"\"\n f_addr = f'{app.config[\"POSTMASTER\"]}@{idna.encode(app.config[\"DOMAIN\"]).decode(\"ascii\")}'\n with smtplib.SMTP(app.config['HOST_AUTHSMTP'], port=10025) as smtp:\n to_address = f'{self.localpart}@{idna.encode(self.domain_name).decode(\"ascii\")}'\n msg = text.MIMEText(body)\n msg['Subject'] = subject\n msg['From'] = f_addr\n msg['To'] = to_address\n smtp.sendmail(f_addr, [to_address], msg.as_string())\n\n @classmethod\n def resolve_domain(cls, email):\n \"\"\" resolves domain alternative to real domain \"\"\"\n localpart, domain_name = email.rsplit('@', 1) if '@' in email else (None, email)\n if alternative := Alternative.query.get(domain_name):\n domain_name = alternative.domain_name\n return (localpart, domain_name)\n\n @classmethod\n def resolve_destination(cls, localpart, domain_name, ignore_forward_keep=False):\n \"\"\" return destination for email address localpart@domain_name \"\"\"\n\n localpart_stripped = None\n stripped_alias = None\n\n if delims := os.environ.get('RECIPIENT_DELIMITER'):\n try:\n pos = next(i for i, c in enumerate(localpart) if c in delims)\n except StopIteration:\n pass\n else:\n localpart_stripped = localpart[:pos]\n\n # is localpart@domain_name or localpart_stripped@domain_name an user?\n user = User.query.get(f'{localpart}@{domain_name}')\n if not user and localpart_stripped:\n user = User.query.get(f'{localpart_stripped}@{domain_name}')\n\n if user:\n email = f'{localpart}@{domain_name}'\n\n if not user.forward_enabled:\n return [email]\n\n destination = user.forward_destination\n if user.forward_keep or ignore_forward_keep:\n destination.append(email)\n return destination\n\n # is localpart, domain_name or localpart_stripped@domain_name an alias?\n if pure_alias := Alias.resolve(localpart, domain_name):\n if not pure_alias.wildcard:\n return pure_alias.destination\n\n if stripped_alias := Alias.resolve(localpart_stripped, domain_name):\n return stripped_alias.destination\n\n if pure_alias:\n return pure_alias.destination\n\n return None\n\n\nclass User(Base, Email):\n \"\"\" A user is an email address that has a password to access a mailbox.\n \"\"\"\n\n __tablename__ = 'user'\n _ctx = None\n _credential_cache = {}\n\n domain = db.relationship(Domain,\n backref=db.backref('users', cascade='all, delete-orphan'))\n password = db.Column(db.String(255), nullable=False)\n quota_bytes = db.Column(db.BigInteger, nullable=False, default=10**9)\n quota_bytes_used = db.Column(db.BigInteger, nullable=False, default=0)\n global_admin = db.Column(db.Boolean, nullable=False, default=False)\n enabled = db.Column(db.Boolean, nullable=False, default=True)\n\n # Features\n enable_imap = db.Column(db.Boolean, nullable=False, default=True)\n enable_pop = db.Column(db.Boolean, nullable=False, default=True)\n\n # Filters\n forward_enabled = db.Column(db.Boolean, nullable=False, default=False)\n forward_destination = db.Column(CommaSeparatedList, nullable=True, default=list)\n forward_keep = db.Column(db.Boolean, nullable=False, default=True)\n reply_enabled = db.Column(db.Boolean, nullable=False, default=False)\n reply_subject = db.Column(db.String(255), nullable=True, default=None)\n reply_body = db.Column(db.Text, nullable=True, default=None)\n reply_startdate = db.Column(db.Date, nullable=False,\n default=date(1900, 1, 1))\n reply_enddate = db.Column(db.Date, nullable=False,\n default=date(2999, 12, 31))\n\n # Settings\n displayed_name = db.Column(db.String(160), nullable=False, default='')\n spam_enabled = db.Column(db.Boolean, nullable=False, default=True)\n spam_mark_as_read = db.Column(db.Boolean, nullable=False, default=True)\n spam_threshold = db.Column(db.Integer, nullable=False, default=lambda:int(app.config.get(\"DEFAULT_SPAM_THRESHOLD\", 80)))\n\n # Flask-login attributes\n is_authenticated = True\n is_active = True\n is_anonymous = False\n\n def get_id(self):\n \"\"\" return users email address \"\"\"\n return self.email\n\n @property\n def destination(self):\n \"\"\" returns comma separated string of destinations \"\"\"\n if self.forward_enabled:\n result = list(self.forward_destination)\n if self.forward_keep:\n result.append(self.email)\n return ','.join(result)\n else:\n return self.email\n\n @property\n def reply_active(self):\n \"\"\" returns status of autoreply function \"\"\"\n now = date.today()\n return (\n self.reply_enabled and\n self.reply_startdate < now and\n self.reply_enddate > now\n )\n\n @property\n def sender_limiter(self):\n return utils.limiter.get_limiter(\n app.config[\"MESSAGE_RATELIMIT\"], \"sender\", self.email\n )\n\n @classmethod\n def get_password_context(cls):\n \"\"\" create password context for hashing and verification\n \"\"\"\n if cls._ctx:\n return cls._ctx\n\n # compile schemes\n # - skip scrypt (throws a warning if the native wheels aren't found)\n # - skip plaintext schemes (will be misidentified)\n schemes = [\n scheme for scheme in passlib.registry.list_crypt_handlers()\n if not (scheme == 'scrypt' or scheme.endswith('plaintext'))\n ]\n cls._ctx = passlib.context.CryptContext(\n schemes=schemes,\n default='bcrypt_sha256',\n bcrypt_sha256__rounds=app.config['CREDENTIAL_ROUNDS'],\n deprecated='auto'\n )\n return cls._ctx\n\n def check_password(self, password):\n \"\"\" verifies password against stored hash\n and updates hash if outdated\n \"\"\"\n if password == '':\n return False\n cache_result = self._credential_cache.get(self.get_id())\n current_salt = self.password.split('$')[3] if len(self.password.split('$')) == 5 else None\n if cache_result and current_salt:\n cache_salt, cache_hash = cache_result\n if cache_salt == current_salt:\n return passlib.hash.pbkdf2_sha256.verify(password, cache_hash)\n else:\n # the cache is local per gunicorn; the password has changed\n # so the local cache can be invalidated\n del self._credential_cache[self.get_id()]\n reference = self.password\n # strip {scheme} if that's something mailu has added\n # passlib will identify *crypt based hashes just fine\n # on its own\n if reference.startswith(('{PBKDF2}', '{BLF-CRYPT}', '{SHA512-CRYPT}', '{SHA256-CRYPT}', '{MD5-CRYPT}', '{CRYPT}')):\n reference = reference.split('}', 1)[1]\n\n result, new_hash = User.get_password_context().verify_and_update(password, reference)\n if new_hash:\n self.password = new_hash\n db.session.add(self)\n db.session.commit()\n\n if result:\n \"\"\"The credential cache uses a low number of rounds to be fast.\nWhile it's not meant to be persisted to cold-storage, no additional measures\nare taken to ensure it isn't (mlock(), encrypted swap, ...) on the basis that\nwe have little control over GC and string interning anyways.\n\n An attacker that can dump the process' memory is likely to find credentials\nin clear-text regardless of the presence of the cache.\n \"\"\"\n self._credential_cache[self.get_id()] = (self.password.split('$')[3], passlib.hash.pbkdf2_sha256.using(rounds=1).hash(password))\n return result\n\n def set_password(self, password, raw=False):\n \"\"\" Set password for user\n @password: plain text password to encrypt (or, if raw is True: the hash itself)\n \"\"\"\n self.password = password if raw else User.get_password_context().hash(password)\n\n def get_managed_domains(self):\n \"\"\" return list of domains this user can manage \"\"\"\n if self.global_admin:\n return Domain.query.all()\n else:\n return self.manager_of\n\n def get_managed_emails(self, include_aliases=True):\n \"\"\" returns list of email addresses this user can manage \"\"\"\n emails = []\n for domain in self.get_managed_domains():\n emails.extend(domain.users)\n if include_aliases:\n emails.extend(domain.aliases)\n return emails\n\n def send_welcome(self):\n \"\"\" send welcome email to user \"\"\"\n if app.config['WELCOME']:\n self.sendmail(app.config['WELCOME_SUBJECT'], app.config['WELCOME_BODY'])\n\n @classmethod\n def get(cls, email):\n \"\"\" find user object for email address \"\"\"\n return cls.query.get(email)\n\n @classmethod\n def login(cls, email, password):\n \"\"\" login user when enabled and password is valid \"\"\"\n user = cls.query.get(email)\n return user if (user and user.enabled and user.check_password(password)) else None\n\n\nclass Alias(Base, Email):\n \"\"\" An alias is an email address that redirects to some destination.\n \"\"\"\n\n __tablename__ = 'alias'\n\n domain = db.relationship(Domain,\n backref=db.backref('aliases', cascade='all, delete-orphan'))\n wildcard = db.Column(db.Boolean, nullable=False, default=False)\n destination = db.Column(CommaSeparatedList, nullable=False, default=list)\n\n @classmethod\n def resolve(cls, localpart, domain_name):\n \"\"\" find aliases matching email address localpart@domain_name \"\"\"\n\n alias_preserve_case = cls.query.filter(\n sqlalchemy.and_(cls.domain_name == domain_name,\n sqlalchemy.or_(\n sqlalchemy.and_(\n cls.wildcard == False,\n cls.localpart == localpart\n ), sqlalchemy.and_(\n cls.wildcard == True,\n sqlalchemy.bindparam('l', localpart).like(cls.localpart)\n )\n )\n )\n ).order_by(cls.wildcard, sqlalchemy.func.char_length(cls.localpart).desc()).first()\n\n localpart_lower = localpart.lower() if localpart else None\n alias_lower_case = cls.query.filter(\n sqlalchemy.and_(cls.domain_name == domain_name,\n sqlalchemy.or_(\n sqlalchemy.and_(\n cls.wildcard == False,\n sqlalchemy.func.lower(cls.localpart) == localpart_lower\n ), sqlalchemy.and_(\n cls.wildcard == True,\n sqlalchemy.bindparam('l', localpart_lower).like(\n sqlalchemy.func.lower(cls.localpart))\n )\n )\n )\n ).order_by(cls.wildcard, sqlalchemy.func.char_length(\n sqlalchemy.func.lower(cls.localpart)).desc()).first()\n\n if alias_preserve_case and alias_lower_case:\n return alias_lower_case if alias_preserve_case.wildcard else alias_preserve_case\n\n if alias_preserve_case and not alias_lower_case:\n return alias_preserve_case\n\n if alias_lower_case and not alias_preserve_case:\n return alias_lower_case\n\n return None\n\n\nclass Token(Base):\n \"\"\" A token is an application password for a given user.\n \"\"\"\n\n __tablename__ = 'token'\n\n id = db.Column(db.Integer, primary_key=True)\n user_email = db.Column(db.String(255), db.ForeignKey(User.email),\n nullable=False)\n user = db.relationship(User,\n backref=db.backref('tokens', cascade='all, delete-orphan'))\n password = db.Column(db.String(255), nullable=False)\n ip = db.Column(db.String(255))\n\n def check_password(self, password):\n \"\"\" verifies password against stored hash\n and updates hash if outdated\n \"\"\"\n if self.password.startswith(\"$5$\"):\n if passlib.hash.sha256_crypt.verify(password, self.password):\n self.set_password(password)\n db.session.add(self)\n db.session.commit()\n return True\n return False\n return passlib.hash.pbkdf2_sha256.verify(password, self.password)\n\n def set_password(self, password):\n \"\"\" sets password using pbkdf2_sha256 (1 round) \"\"\"\n # tokens have 128bits of entropy, they are not bruteforceable\n self.password = passlib.hash.pbkdf2_sha256.using(rounds=1).hash(password)\n\n def __repr__(self):\n return f'<Token #{self.id}: {self.comment or self.ip or self.password}>'\n\n\nclass Fetch(Base):\n \"\"\" A fetched account is a remote POP/IMAP account fetched into a local\n account.\n \"\"\"\n\n __tablename__ = 'fetch'\n\n id = db.Column(db.Integer, primary_key=True)\n user_email = db.Column(db.String(255), db.ForeignKey(User.email),\n nullable=False)\n user = db.relationship(User,\n backref=db.backref('fetches', cascade='all, delete-orphan'))\n protocol = db.Column(db.Enum('imap', 'pop3'), nullable=False)\n host = db.Column(db.String(255), nullable=False)\n port = db.Column(db.Integer, nullable=False)\n tls = db.Column(db.Boolean, nullable=False, default=False)\n username = db.Column(db.String(255), nullable=False)\n password = db.Column(db.String(255), nullable=False)\n keep = db.Column(db.Boolean, nullable=False, default=False)\n last_check = db.Column(db.DateTime, nullable=True)\n error = db.Column(db.String(1023), nullable=True)\n\n def __repr__(self):\n return (\n f'<Fetch #{self.id}: {self.protocol}{\"s\" if self.tls else \"\"}:'\n f'//{self.username}@{self.host}:{self.port}>'\n )\n\n\nclass MailuConfig:\n \"\"\" Class which joins whole Mailu config for dumping\n and loading\n \"\"\"\n\n class MailuCollection:\n \"\"\" Provides dict- and list-like access to instances\n of a sqlalchemy model\n \"\"\"\n\n def __init__(self, model : db.Model):\n self.model = model\n\n def __repr__(self):\n return f'<{self.model.__name__}-Collection>'\n\n @cached_property\n def _items(self):\n return {\n inspect(item).identity: item\n for item in self.model.query.all()\n }\n\n def __len__(self):\n return len(self._items)\n\n def __iter__(self):\n return iter(self._items.values())\n\n def __getitem__(self, key):\n return self._items[key]\n\n def __setitem__(self, key, item):\n if not isinstance(item, self.model):\n raise TypeError(f'expected {self.model.name}')\n if key != inspect(item).identity:\n raise ValueError(f'item identity != key {key!r}')\n self._items[key] = item\n\n def __delitem__(self, key):\n del self._items[key]\n\n def append(self, item, update=False):\n \"\"\" list-like append \"\"\"\n if not isinstance(item, self.model):\n raise TypeError(f'expected {self.model.name}')\n key = inspect(item).identity\n if key in self._items:\n if not update:\n raise ValueError(f'item {key!r} already present in collection')\n self._items[key] = item\n\n def extend(self, items, update=False):\n \"\"\" list-like extend \"\"\"\n add = {}\n for item in items:\n if not isinstance(item, self.model):\n raise TypeError(f'expected {self.model.name}')\n key = inspect(item).identity\n if not update and key in self._items:\n raise ValueError(f'item {key!r} already present in collection')\n add[key] = item\n self._items.update(add)\n\n def pop(self, *args):\n \"\"\" list-like (no args) and dict-like (1 or 2 args) pop \"\"\"\n if args:\n if len(args) > 2:\n raise TypeError(f'pop expected at most 2 arguments, got {len(args)}')\n return self._items.pop(*args)\n else:\n return self._items.popitem()[1]\n\n def popitem(self):\n \"\"\" dict-like popitem \"\"\"\n return self._items.popitem()\n\n def remove(self, item):\n \"\"\" list-like remove \"\"\"\n if not isinstance(item, self.model):\n raise TypeError(f'expected {self.model.name}')\n key = inspect(item).identity\n if not key in self._items:\n raise ValueError(f'item {key!r} not found in collection')\n del self._items[key]\n\n def clear(self):\n \"\"\" dict-like clear \"\"\"\n while True:\n try:\n self.pop()\n except IndexError:\n break\n\n def update(self, items):\n \"\"\" dict-like update \"\"\"\n for key, item in items:\n if not isinstance(item, self.model):\n raise TypeError(f'expected {self.model.name}')\n if key != inspect(item).identity:\n raise ValueError(f'item identity != key {key!r}')\n self._items.update(items)\n\n def setdefault(self, key, item=None):\n \"\"\" dict-like setdefault \"\"\"\n if key in self._items:\n return self._items[key]\n if item is None:\n return None\n if not isinstance(item, self.model):\n raise TypeError(f'expected {self.model.name}')\n if key != inspect(item).identity:\n raise ValueError(f'item identity != key {key!r}')\n self._items[key] = item\n return item\n\n def __init__(self):\n\n # section-name -> attr\n self._sections = {\n name: getattr(self, name)\n for name in dir(self)\n if isinstance(getattr(self, name), self.MailuCollection)\n }\n\n # known models\n self._models = tuple(section.model for section in self._sections.values())\n\n # model -> attr\n self._sections.update({\n section.model: section for section in self._sections.values()\n })\n\n def _get_model(self, section):\n if section is None:\n return None\n model = self._sections.get(section)\n if model is None:\n raise ValueError(f'Invalid section: {section!r}')\n if isinstance(model, self.MailuCollection):\n return model.model\n return model\n\n def _add(self, items, section, update):\n\n model = self._get_model(section)\n if isinstance(items, self._models):\n items = [items]\n elif not hasattr(items, '__iter__'):\n raise ValueError(f'{items!r} is not iterable')\n\n for item in items:\n if model is not None and not isinstance(item, model):\n what = item.__class__.__name__.capitalize()\n raise ValueError(f'{what} can not be added to section {section!r}')\n self._sections[type(item)].append(item, update=update)\n\n def add(self, items, section=None):\n \"\"\" add item to config \"\"\"\n self._add(items, section, update=False)\n\n def update(self, items, section=None):\n \"\"\" add or replace item in config \"\"\"\n self._add(items, section, update=True)\n\n def remove(self, items, section=None):\n \"\"\" remove item from config \"\"\"\n model = self._get_model(section)\n if isinstance(items, self._models):\n items = [items]\n elif not hasattr(items, '__iter__'):\n raise ValueError(f'{items!r} is not iterable')\n\n for item in items:\n if isinstance(item, str):\n if section is None:\n raise ValueError(f'Cannot remove key {item!r} without section')\n del self._sections[model][item]\n elif model is not None and not isinstance(item, model):\n what = item.__class__.__name__.capitalize()\n raise ValueError(f'{what} can not be removed from section {section!r}')\n self._sections[type(item)].remove(item,)\n\n def clear(self, models=None):\n \"\"\" remove complete configuration \"\"\"\n for model in self._models:\n if models is None or model in models:\n db.session.query(model).delete()\n\n def check(self):\n \"\"\" check for duplicate domain names \"\"\"\n dup = set()\n for fqdn in chain(\n db.session.query(Domain.name),\n db.session.query(Alternative.name),\n db.session.query(Relay.name)\n ):\n if fqdn in dup:\n raise ValueError(f'Duplicate domain name: {fqdn}')\n dup.add(fqdn)\n\n domain = MailuCollection(Domain)\n user = MailuCollection(User)\n alias = MailuCollection(Alias)\n relay = MailuCollection(Relay)\n config = MailuCollection(Config)\n", "path": "core/admin/mailu/models.py" } ]
[ { "content": "\"\"\" Mailu config storage model\n\"\"\"\n\nimport os\nimport smtplib\nimport json\n\nfrom datetime import date\nfrom email.mime import text\nfrom itertools import chain\n\nimport flask_sqlalchemy\nimport sqlalchemy\nimport passlib.context\nimport passlib.hash\nimport passlib.registry\nimport time\nimport os\nimport smtplib\nimport idna\nimport dns.resolver\nimport dns.exception\n\nfrom flask import current_app as app\nfrom sqlalchemy.ext import declarative\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.inspection import inspect\nfrom sqlalchemy.orm.attributes import flag_modified\nfrom werkzeug.utils import cached_property\n\nfrom mailu import dkim, utils\n\n\ndb = flask_sqlalchemy.SQLAlchemy()\n\n\nclass IdnaDomain(db.TypeDecorator):\n \"\"\" Stores a Unicode string in it's IDNA representation (ASCII only)\n \"\"\"\n\n impl = db.String(80)\n cache_ok = True\n python_type = str\n\n def process_bind_param(self, value, dialect):\n \"\"\" encode unicode domain name to punycode \"\"\"\n return idna.encode(value.lower()).decode('ascii')\n\n def process_result_value(self, value, dialect):\n \"\"\" decode punycode domain name to unicode \"\"\"\n return idna.decode(value)\n\nclass IdnaEmail(db.TypeDecorator):\n \"\"\" Stores a Unicode string in it's IDNA representation (ASCII only)\n \"\"\"\n\n impl = db.String(255)\n cache_ok = True\n python_type = str\n\n def process_bind_param(self, value, dialect):\n \"\"\" encode unicode domain part of email address to punycode \"\"\"\n if not '@' in value:\n raise ValueError('invalid email address (no \"@\")')\n localpart, domain_name = value.lower().rsplit('@', 1)\n if '@' in localpart:\n raise ValueError('email local part must not contain \"@\"')\n return f'{localpart}@{idna.encode(domain_name).decode(\"ascii\")}'\n\n def process_result_value(self, value, dialect):\n \"\"\" decode punycode domain part of email to unicode \"\"\"\n localpart, domain_name = value.rsplit('@', 1)\n return f'{localpart}@{idna.decode(domain_name)}'\n\nclass CommaSeparatedList(db.TypeDecorator):\n \"\"\" Stores a list as a comma-separated string, compatible with Postfix.\n \"\"\"\n\n impl = db.String\n cache_ok = True\n python_type = list\n\n def process_bind_param(self, value, dialect):\n \"\"\" join list of items to comma separated string \"\"\"\n if not isinstance(value, (list, tuple, set)):\n raise TypeError('Must be a list of strings')\n for item in value:\n if ',' in item:\n raise ValueError('list item must not contain \",\"')\n return ','.join(sorted(set(value)))\n\n def process_result_value(self, value, dialect):\n \"\"\" split comma separated string to list \"\"\"\n return list(filter(bool, (item.strip() for item in value.split(',')))) if value else []\n\nclass JSONEncoded(db.TypeDecorator):\n \"\"\" Represents an immutable structure as a json-encoded string.\n \"\"\"\n\n impl = db.String\n cache_ok = True\n python_type = str\n\n def process_bind_param(self, value, dialect):\n \"\"\" encode data as json \"\"\"\n return json.dumps(value) if value else None\n\n def process_result_value(self, value, dialect):\n \"\"\" decode json to data \"\"\"\n return json.loads(value) if value else None\n\nclass Base(db.Model):\n \"\"\" Base class for all models\n \"\"\"\n\n __abstract__ = True\n\n metadata = sqlalchemy.schema.MetaData(\n naming_convention={\n 'fk': '%(table_name)s_%(column_0_name)s_fkey',\n 'pk': '%(table_name)s_pkey'\n }\n )\n\n created_at = db.Column(db.Date, nullable=False, default=date.today)\n updated_at = db.Column(db.Date, nullable=True, onupdate=date.today)\n comment = db.Column(db.String(255), nullable=True, default='')\n\n def __str__(self):\n pkey = self.__table__.primary_key.columns.values()[0].name\n if pkey == 'email':\n # ugly hack for email declared attr. _email is not always up2date\n return str(f'{self.localpart}@{self.domain_name}')\n return str(getattr(self, pkey))\n\n def __repr__(self):\n return f'<{self.__class__.__name__} {str(self)!r}>'\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n pkey = self.__table__.primary_key.columns.values()[0].name\n this = getattr(self, pkey, None)\n other = getattr(other, pkey, None)\n return this is not None and other is not None and str(this) == str(other)\n else:\n return NotImplemented\n\n # we need hashable instances here for sqlalchemy to update collections\n # in collections.bulk_replace, but auto-incrementing don't always have\n # a valid primary key, in this case we use the object's id\n __hashed = None\n def __hash__(self):\n if self.__hashed is None:\n primary = getattr(self, self.__table__.primary_key.columns.values()[0].name)\n self.__hashed = id(self) if primary is None else hash(primary)\n return self.__hashed\n\n def dont_change_updated_at(self):\n \"\"\" Mark updated_at as modified, but keep the old date when updating the model\"\"\"\n flag_modified(self, 'updated_at')\n\n\n# Many-to-many association table for domain managers\nmanagers = db.Table('manager', Base.metadata,\n db.Column('domain_name', IdnaDomain, db.ForeignKey('domain.name')),\n db.Column('user_email', IdnaEmail, db.ForeignKey('user.email'))\n)\n\n\nclass Config(Base):\n \"\"\" In-database configuration values\n \"\"\"\n\n name = db.Column(db.String(255), primary_key=True, nullable=False)\n value = db.Column(JSONEncoded)\n\n\ndef _save_dkim_keys(session):\n \"\"\" store DKIM keys after commit \"\"\"\n for obj in session.identity_map.values():\n if isinstance(obj, Domain):\n obj.save_dkim_key()\n\nclass Domain(Base):\n \"\"\" A DNS domain that has mail addresses associated to it.\n \"\"\"\n\n __tablename__ = 'domain'\n\n name = db.Column(IdnaDomain, primary_key=True, nullable=False)\n managers = db.relationship('User', secondary=managers,\n backref=db.backref('manager_of'), lazy='dynamic')\n max_users = db.Column(db.Integer, nullable=False, default=-1)\n max_aliases = db.Column(db.Integer, nullable=False, default=-1)\n max_quota_bytes = db.Column(db.BigInteger, nullable=False, default=0)\n signup_enabled = db.Column(db.Boolean, nullable=False, default=False)\n\n _dkim_key = None\n _dkim_key_on_disk = None\n\n def _dkim_file(self):\n \"\"\" return filename for active DKIM key \"\"\"\n return app.config['DKIM_PATH'].format(\n domain=self.name,\n selector=app.config['DKIM_SELECTOR']\n )\n\n def save_dkim_key(self):\n \"\"\" save changed DKIM key to disk \"\"\"\n if self._dkim_key != self._dkim_key_on_disk:\n file_path = self._dkim_file()\n if self._dkim_key:\n with open(file_path, 'wb') as handle:\n handle.write(self._dkim_key)\n elif os.path.exists(file_path):\n os.unlink(file_path)\n self._dkim_key_on_disk = self._dkim_key\n\n @cached_property\n def dns_mx(self):\n \"\"\" return MX record for domain \"\"\"\n hostname = app.config['HOSTNAME']\n return f'{self.name}. 600 IN MX 10 {hostname}.'\n\n @cached_property\n def dns_spf(self):\n \"\"\" return SPF record for domain \"\"\"\n hostname = app.config['HOSTNAME']\n return f'{self.name}. 600 IN TXT \"v=spf1 mx a:{hostname} ~all\"'\n\n @property\n def dns_dkim(self):\n \"\"\" return DKIM record for domain \"\"\"\n if self.dkim_key:\n selector = app.config['DKIM_SELECTOR']\n txt = f'v=DKIM1; k=rsa; p={self.dkim_publickey}'\n record = ' '.join(f'\"{txt[p:p+250]}\"' for p in range(0, len(txt), 250))\n return f'{selector}._domainkey.{self.name}. 600 IN TXT {record}'\n\n @cached_property\n def dns_dmarc(self):\n \"\"\" return DMARC record for domain \"\"\"\n if self.dkim_key:\n domain = app.config['DOMAIN']\n rua = app.config['DMARC_RUA']\n rua = f' rua=mailto:{rua}@{domain};' if rua else ''\n ruf = app.config['DMARC_RUF']\n ruf = f' ruf=mailto:{ruf}@{domain};' if ruf else ''\n return f'_dmarc.{self.name}. 600 IN TXT \"v=DMARC1; p=reject;{rua}{ruf} adkim=s; aspf=s\"'\n\n @cached_property\n def dns_dmarc_report(self):\n \"\"\" return DMARC report record for mailu server \"\"\"\n if self.dkim_key:\n domain = app.config['DOMAIN']\n return f'{self.name}._report._dmarc.{domain}. 600 IN TXT \"v=DMARC1\"'\n\n @cached_property\n def dns_autoconfig(self):\n \"\"\" return list of auto configuration records (RFC6186) \"\"\"\n hostname = app.config['HOSTNAME']\n protocols = [\n ('imap', 143, 20),\n ('pop3', 110, 20),\n ('submission', 587, 20),\n ]\n if app.config['TLS_FLAVOR'] != 'notls':\n protocols.extend([\n ('autodiscover', 443, 10),\n ('submissions', 465, 10),\n ('imaps', 993, 10),\n ('pop3s', 995, 10),\n ])\n\n return [\n f'_{proto}._tcp.{self.name}. 600 IN SRV {prio} 1 {port} {hostname}.'\n for proto, port, prio\n in protocols\n ]+[f'autoconfig.{self.name}. 600 IN CNAME {hostname}.']\n\n @cached_property\n def dns_tlsa(self):\n \"\"\" return TLSA record for domain when using letsencrypt \"\"\"\n hostname = app.config['HOSTNAME']\n if app.config['TLS_FLAVOR'] in ('letsencrypt', 'mail-letsencrypt'):\n # current ISRG Root X1 (RSA 4096, O = Internet Security Research Group, CN = ISRG Root X1) @20210902\n return f'_25._tcp.{hostname}. 86400 IN TLSA 2 1 1 0b9fa5a59eed715c26c1020c711b4f6ec42d58b0015e14337a39dad301c5afc3'\n\n @property\n def dkim_key(self):\n \"\"\" return private DKIM key \"\"\"\n if self._dkim_key is None:\n file_path = self._dkim_file()\n if os.path.exists(file_path):\n with open(file_path, 'rb') as handle:\n self._dkim_key = self._dkim_key_on_disk = handle.read()\n else:\n self._dkim_key = self._dkim_key_on_disk = b''\n return self._dkim_key if self._dkim_key else None\n\n @dkim_key.setter\n def dkim_key(self, value):\n \"\"\" set private DKIM key \"\"\"\n old_key = self.dkim_key\n self._dkim_key = value if value is not None else b''\n if self._dkim_key != old_key:\n if not sqlalchemy.event.contains(db.session, 'after_commit', _save_dkim_keys):\n sqlalchemy.event.listen(db.session, 'after_commit', _save_dkim_keys)\n\n @property\n def dkim_publickey(self):\n \"\"\" return public part of DKIM key \"\"\"\n dkim_key = self.dkim_key\n if dkim_key:\n return dkim.strip_key(dkim_key).decode('utf8')\n\n def generate_dkim_key(self):\n \"\"\" generate new DKIM key \"\"\"\n self.dkim_key = dkim.gen_key()\n\n def has_email(self, localpart):\n \"\"\" checks if localpart is configured for domain \"\"\"\n localpart = localpart.lower()\n for email in chain(self.users, self.aliases):\n if email.localpart == localpart:\n return True\n return False\n\n def check_mx(self):\n \"\"\" checks if MX record for domain points to mailu host \"\"\"\n try:\n hostnames = set(app.config['HOSTNAMES'].split(','))\n return any(\n rset.exchange.to_text().rstrip('.') in hostnames\n for rset in dns.resolver.resolve(self.name, 'MX')\n )\n except dns.exception.DNSException:\n return False\n\n\nclass Alternative(Base):\n \"\"\" Alternative name for a served domain.\n The name \"domain alias\" was avoided to prevent some confusion.\n \"\"\"\n\n __tablename__ = 'alternative'\n\n name = db.Column(IdnaDomain, primary_key=True, nullable=False)\n domain_name = db.Column(IdnaDomain, db.ForeignKey(Domain.name))\n domain = db.relationship(Domain,\n backref=db.backref('alternatives', cascade='all, delete-orphan'))\n\n\nclass Relay(Base):\n \"\"\" Relayed mail domain.\n The domain is either relayed publicly or through a specified SMTP host.\n \"\"\"\n\n __tablename__ = 'relay'\n\n name = db.Column(IdnaDomain, primary_key=True, nullable=False)\n smtp = db.Column(db.String(80), nullable=True)\n\n\nclass Email(object):\n \"\"\" Abstraction for an email address (localpart and domain).\n \"\"\"\n\n localpart = db.Column(db.String(80), nullable=False)\n\n @declarative.declared_attr\n def domain_name(cls):\n \"\"\" the domain part of the email address \"\"\"\n return db.Column(IdnaDomain, db.ForeignKey(Domain.name),\n nullable=False, default=IdnaDomain)\n\n # This field is redundant with both localpart and domain name.\n # It is however very useful for quick lookups without joining tables,\n # especially when the mail server is reading the database.\n @declarative.declared_attr\n def _email(cls):\n \"\"\" the complete email address (localpart@domain) \"\"\"\n\n def updater(ctx):\n key = f'{cls.__tablename__}_email'\n if key in ctx.current_parameters:\n return ctx.current_parameters[key]\n return '{localpart}@{domain_name}'.format_map(ctx.current_parameters)\n\n return db.Column('email', IdnaEmail, primary_key=True, nullable=False, onupdate=updater)\n\n # We need to keep email, localpart and domain_name in sync.\n # But IMHO using email as primary key was not a good idea in the first place.\n @hybrid_property\n def email(self):\n \"\"\" getter for email - gets _email \"\"\"\n return self._email\n\n @email.setter\n def email(self, value):\n \"\"\" setter for email - sets _email, localpart and domain_name at once \"\"\"\n self._email = value.lower()\n self.localpart, self.domain_name = self._email.rsplit('@', 1)\n\n @staticmethod\n def _update_localpart(target, value, *_):\n if target.domain_name:\n target._email = f'{value}@{target.domain_name}'\n\n @staticmethod\n def _update_domain_name(target, value, *_):\n if target.localpart:\n target._email = f'{target.localpart}@{value}'\n\n @classmethod\n def __declare_last__(cls):\n # gets called after mappings are completed\n sqlalchemy.event.listen(cls.localpart, 'set', cls._update_localpart, propagate=True)\n sqlalchemy.event.listen(cls.domain_name, 'set', cls._update_domain_name, propagate=True)\n\n def sendmail(self, subject, body):\n \"\"\" send an email to the address \"\"\"\n f_addr = f'{app.config[\"POSTMASTER\"]}@{idna.encode(app.config[\"DOMAIN\"]).decode(\"ascii\")}'\n with smtplib.SMTP(app.config['HOST_AUTHSMTP'], port=10025) as smtp:\n to_address = f'{self.localpart}@{idna.encode(self.domain_name).decode(\"ascii\")}'\n msg = text.MIMEText(body)\n msg['Subject'] = subject\n msg['From'] = f_addr\n msg['To'] = to_address\n smtp.sendmail(f_addr, [to_address], msg.as_string())\n\n @classmethod\n def resolve_domain(cls, email):\n \"\"\" resolves domain alternative to real domain \"\"\"\n localpart, domain_name = email.rsplit('@', 1) if '@' in email else (None, email)\n if alternative := Alternative.query.get(domain_name):\n domain_name = alternative.domain_name\n return (localpart, domain_name)\n\n @classmethod\n def resolve_destination(cls, localpart, domain_name, ignore_forward_keep=False):\n \"\"\" return destination for email address localpart@domain_name \"\"\"\n\n localpart_stripped = None\n stripped_alias = None\n\n if delims := os.environ.get('RECIPIENT_DELIMITER'):\n try:\n pos = next(i for i, c in enumerate(localpart) if c in delims)\n except StopIteration:\n pass\n else:\n localpart_stripped = localpart[:pos]\n\n # is localpart@domain_name or localpart_stripped@domain_name an user?\n user = User.query.get(f'{localpart}@{domain_name}')\n if not user and localpart_stripped:\n user = User.query.get(f'{localpart_stripped}@{domain_name}')\n\n if user:\n email = f'{localpart}@{domain_name}'\n\n if not user.forward_enabled:\n return [email]\n\n destination = user.forward_destination\n if user.forward_keep or ignore_forward_keep:\n destination.append(email)\n return destination\n\n # is localpart, domain_name or localpart_stripped@domain_name an alias?\n if pure_alias := Alias.resolve(localpart, domain_name):\n if not pure_alias.wildcard:\n return pure_alias.destination\n\n if stripped_alias := Alias.resolve(localpart_stripped, domain_name):\n return stripped_alias.destination\n\n if pure_alias:\n return pure_alias.destination\n\n return None\n\n\nclass User(Base, Email):\n \"\"\" A user is an email address that has a password to access a mailbox.\n \"\"\"\n\n __tablename__ = 'user'\n _ctx = None\n _credential_cache = {}\n\n domain = db.relationship(Domain,\n backref=db.backref('users', cascade='all, delete-orphan'))\n password = db.Column(db.String(255), nullable=False)\n quota_bytes = db.Column(db.BigInteger, nullable=False, default=10**9)\n quota_bytes_used = db.Column(db.BigInteger, nullable=False, default=0)\n global_admin = db.Column(db.Boolean, nullable=False, default=False)\n enabled = db.Column(db.Boolean, nullable=False, default=True)\n\n # Features\n enable_imap = db.Column(db.Boolean, nullable=False, default=True)\n enable_pop = db.Column(db.Boolean, nullable=False, default=True)\n\n # Filters\n forward_enabled = db.Column(db.Boolean, nullable=False, default=False)\n forward_destination = db.Column(CommaSeparatedList, nullable=True, default=list)\n forward_keep = db.Column(db.Boolean, nullable=False, default=True)\n reply_enabled = db.Column(db.Boolean, nullable=False, default=False)\n reply_subject = db.Column(db.String(255), nullable=True, default=None)\n reply_body = db.Column(db.Text, nullable=True, default=None)\n reply_startdate = db.Column(db.Date, nullable=False,\n default=date(1900, 1, 1))\n reply_enddate = db.Column(db.Date, nullable=False,\n default=date(2999, 12, 31))\n\n # Settings\n displayed_name = db.Column(db.String(160), nullable=False, default='')\n spam_enabled = db.Column(db.Boolean, nullable=False, default=True)\n spam_mark_as_read = db.Column(db.Boolean, nullable=False, default=True)\n spam_threshold = db.Column(db.Integer, nullable=False, default=lambda:int(app.config.get(\"DEFAULT_SPAM_THRESHOLD\", 80)))\n\n # Flask-login attributes\n is_authenticated = True\n is_active = True\n is_anonymous = False\n\n def get_id(self):\n \"\"\" return users email address \"\"\"\n return self.email\n\n @property\n def destination(self):\n \"\"\" returns comma separated string of destinations \"\"\"\n if self.forward_enabled:\n result = list(self.forward_destination)\n if self.forward_keep:\n result.append(self.email)\n return ','.join(result)\n else:\n return self.email\n\n @property\n def reply_active(self):\n \"\"\" returns status of autoreply function \"\"\"\n now = date.today()\n return (\n self.reply_enabled and\n self.reply_startdate <= now and\n self.reply_enddate >= now\n )\n\n @property\n def sender_limiter(self):\n return utils.limiter.get_limiter(\n app.config[\"MESSAGE_RATELIMIT\"], \"sender\", self.email\n )\n\n @classmethod\n def get_password_context(cls):\n \"\"\" create password context for hashing and verification\n \"\"\"\n if cls._ctx:\n return cls._ctx\n\n # compile schemes\n # - skip scrypt (throws a warning if the native wheels aren't found)\n # - skip plaintext schemes (will be misidentified)\n schemes = [\n scheme for scheme in passlib.registry.list_crypt_handlers()\n if not (scheme == 'scrypt' or scheme.endswith('plaintext'))\n ]\n cls._ctx = passlib.context.CryptContext(\n schemes=schemes,\n default='bcrypt_sha256',\n bcrypt_sha256__rounds=app.config['CREDENTIAL_ROUNDS'],\n deprecated='auto'\n )\n return cls._ctx\n\n def check_password(self, password):\n \"\"\" verifies password against stored hash\n and updates hash if outdated\n \"\"\"\n if password == '':\n return False\n cache_result = self._credential_cache.get(self.get_id())\n current_salt = self.password.split('$')[3] if len(self.password.split('$')) == 5 else None\n if cache_result and current_salt:\n cache_salt, cache_hash = cache_result\n if cache_salt == current_salt:\n return passlib.hash.pbkdf2_sha256.verify(password, cache_hash)\n else:\n # the cache is local per gunicorn; the password has changed\n # so the local cache can be invalidated\n del self._credential_cache[self.get_id()]\n reference = self.password\n # strip {scheme} if that's something mailu has added\n # passlib will identify *crypt based hashes just fine\n # on its own\n if reference.startswith(('{PBKDF2}', '{BLF-CRYPT}', '{SHA512-CRYPT}', '{SHA256-CRYPT}', '{MD5-CRYPT}', '{CRYPT}')):\n reference = reference.split('}', 1)[1]\n\n result, new_hash = User.get_password_context().verify_and_update(password, reference)\n if new_hash:\n self.password = new_hash\n db.session.add(self)\n db.session.commit()\n\n if result:\n \"\"\"The credential cache uses a low number of rounds to be fast.\nWhile it's not meant to be persisted to cold-storage, no additional measures\nare taken to ensure it isn't (mlock(), encrypted swap, ...) on the basis that\nwe have little control over GC and string interning anyways.\n\n An attacker that can dump the process' memory is likely to find credentials\nin clear-text regardless of the presence of the cache.\n \"\"\"\n self._credential_cache[self.get_id()] = (self.password.split('$')[3], passlib.hash.pbkdf2_sha256.using(rounds=1).hash(password))\n return result\n\n def set_password(self, password, raw=False):\n \"\"\" Set password for user\n @password: plain text password to encrypt (or, if raw is True: the hash itself)\n \"\"\"\n self.password = password if raw else User.get_password_context().hash(password)\n\n def get_managed_domains(self):\n \"\"\" return list of domains this user can manage \"\"\"\n if self.global_admin:\n return Domain.query.all()\n else:\n return self.manager_of\n\n def get_managed_emails(self, include_aliases=True):\n \"\"\" returns list of email addresses this user can manage \"\"\"\n emails = []\n for domain in self.get_managed_domains():\n emails.extend(domain.users)\n if include_aliases:\n emails.extend(domain.aliases)\n return emails\n\n def send_welcome(self):\n \"\"\" send welcome email to user \"\"\"\n if app.config['WELCOME']:\n self.sendmail(app.config['WELCOME_SUBJECT'], app.config['WELCOME_BODY'])\n\n @classmethod\n def get(cls, email):\n \"\"\" find user object for email address \"\"\"\n return cls.query.get(email)\n\n @classmethod\n def login(cls, email, password):\n \"\"\" login user when enabled and password is valid \"\"\"\n user = cls.query.get(email)\n return user if (user and user.enabled and user.check_password(password)) else None\n\n\nclass Alias(Base, Email):\n \"\"\" An alias is an email address that redirects to some destination.\n \"\"\"\n\n __tablename__ = 'alias'\n\n domain = db.relationship(Domain,\n backref=db.backref('aliases', cascade='all, delete-orphan'))\n wildcard = db.Column(db.Boolean, nullable=False, default=False)\n destination = db.Column(CommaSeparatedList, nullable=False, default=list)\n\n @classmethod\n def resolve(cls, localpart, domain_name):\n \"\"\" find aliases matching email address localpart@domain_name \"\"\"\n\n alias_preserve_case = cls.query.filter(\n sqlalchemy.and_(cls.domain_name == domain_name,\n sqlalchemy.or_(\n sqlalchemy.and_(\n cls.wildcard == False,\n cls.localpart == localpart\n ), sqlalchemy.and_(\n cls.wildcard == True,\n sqlalchemy.bindparam('l', localpart).like(cls.localpart)\n )\n )\n )\n ).order_by(cls.wildcard, sqlalchemy.func.char_length(cls.localpart).desc()).first()\n\n localpart_lower = localpart.lower() if localpart else None\n alias_lower_case = cls.query.filter(\n sqlalchemy.and_(cls.domain_name == domain_name,\n sqlalchemy.or_(\n sqlalchemy.and_(\n cls.wildcard == False,\n sqlalchemy.func.lower(cls.localpart) == localpart_lower\n ), sqlalchemy.and_(\n cls.wildcard == True,\n sqlalchemy.bindparam('l', localpart_lower).like(\n sqlalchemy.func.lower(cls.localpart))\n )\n )\n )\n ).order_by(cls.wildcard, sqlalchemy.func.char_length(\n sqlalchemy.func.lower(cls.localpart)).desc()).first()\n\n if alias_preserve_case and alias_lower_case:\n return alias_lower_case if alias_preserve_case.wildcard else alias_preserve_case\n\n if alias_preserve_case and not alias_lower_case:\n return alias_preserve_case\n\n if alias_lower_case and not alias_preserve_case:\n return alias_lower_case\n\n return None\n\n\nclass Token(Base):\n \"\"\" A token is an application password for a given user.\n \"\"\"\n\n __tablename__ = 'token'\n\n id = db.Column(db.Integer, primary_key=True)\n user_email = db.Column(db.String(255), db.ForeignKey(User.email),\n nullable=False)\n user = db.relationship(User,\n backref=db.backref('tokens', cascade='all, delete-orphan'))\n password = db.Column(db.String(255), nullable=False)\n ip = db.Column(db.String(255))\n\n def check_password(self, password):\n \"\"\" verifies password against stored hash\n and updates hash if outdated\n \"\"\"\n if self.password.startswith(\"$5$\"):\n if passlib.hash.sha256_crypt.verify(password, self.password):\n self.set_password(password)\n db.session.add(self)\n db.session.commit()\n return True\n return False\n return passlib.hash.pbkdf2_sha256.verify(password, self.password)\n\n def set_password(self, password):\n \"\"\" sets password using pbkdf2_sha256 (1 round) \"\"\"\n # tokens have 128bits of entropy, they are not bruteforceable\n self.password = passlib.hash.pbkdf2_sha256.using(rounds=1).hash(password)\n\n def __repr__(self):\n return f'<Token #{self.id}: {self.comment or self.ip or self.password}>'\n\n\nclass Fetch(Base):\n \"\"\" A fetched account is a remote POP/IMAP account fetched into a local\n account.\n \"\"\"\n\n __tablename__ = 'fetch'\n\n id = db.Column(db.Integer, primary_key=True)\n user_email = db.Column(db.String(255), db.ForeignKey(User.email),\n nullable=False)\n user = db.relationship(User,\n backref=db.backref('fetches', cascade='all, delete-orphan'))\n protocol = db.Column(db.Enum('imap', 'pop3'), nullable=False)\n host = db.Column(db.String(255), nullable=False)\n port = db.Column(db.Integer, nullable=False)\n tls = db.Column(db.Boolean, nullable=False, default=False)\n username = db.Column(db.String(255), nullable=False)\n password = db.Column(db.String(255), nullable=False)\n keep = db.Column(db.Boolean, nullable=False, default=False)\n last_check = db.Column(db.DateTime, nullable=True)\n error = db.Column(db.String(1023), nullable=True)\n\n def __repr__(self):\n return (\n f'<Fetch #{self.id}: {self.protocol}{\"s\" if self.tls else \"\"}:'\n f'//{self.username}@{self.host}:{self.port}>'\n )\n\n\nclass MailuConfig:\n \"\"\" Class which joins whole Mailu config for dumping\n and loading\n \"\"\"\n\n class MailuCollection:\n \"\"\" Provides dict- and list-like access to instances\n of a sqlalchemy model\n \"\"\"\n\n def __init__(self, model : db.Model):\n self.model = model\n\n def __repr__(self):\n return f'<{self.model.__name__}-Collection>'\n\n @cached_property\n def _items(self):\n return {\n inspect(item).identity: item\n for item in self.model.query.all()\n }\n\n def __len__(self):\n return len(self._items)\n\n def __iter__(self):\n return iter(self._items.values())\n\n def __getitem__(self, key):\n return self._items[key]\n\n def __setitem__(self, key, item):\n if not isinstance(item, self.model):\n raise TypeError(f'expected {self.model.name}')\n if key != inspect(item).identity:\n raise ValueError(f'item identity != key {key!r}')\n self._items[key] = item\n\n def __delitem__(self, key):\n del self._items[key]\n\n def append(self, item, update=False):\n \"\"\" list-like append \"\"\"\n if not isinstance(item, self.model):\n raise TypeError(f'expected {self.model.name}')\n key = inspect(item).identity\n if key in self._items:\n if not update:\n raise ValueError(f'item {key!r} already present in collection')\n self._items[key] = item\n\n def extend(self, items, update=False):\n \"\"\" list-like extend \"\"\"\n add = {}\n for item in items:\n if not isinstance(item, self.model):\n raise TypeError(f'expected {self.model.name}')\n key = inspect(item).identity\n if not update and key in self._items:\n raise ValueError(f'item {key!r} already present in collection')\n add[key] = item\n self._items.update(add)\n\n def pop(self, *args):\n \"\"\" list-like (no args) and dict-like (1 or 2 args) pop \"\"\"\n if args:\n if len(args) > 2:\n raise TypeError(f'pop expected at most 2 arguments, got {len(args)}')\n return self._items.pop(*args)\n else:\n return self._items.popitem()[1]\n\n def popitem(self):\n \"\"\" dict-like popitem \"\"\"\n return self._items.popitem()\n\n def remove(self, item):\n \"\"\" list-like remove \"\"\"\n if not isinstance(item, self.model):\n raise TypeError(f'expected {self.model.name}')\n key = inspect(item).identity\n if not key in self._items:\n raise ValueError(f'item {key!r} not found in collection')\n del self._items[key]\n\n def clear(self):\n \"\"\" dict-like clear \"\"\"\n while True:\n try:\n self.pop()\n except IndexError:\n break\n\n def update(self, items):\n \"\"\" dict-like update \"\"\"\n for key, item in items:\n if not isinstance(item, self.model):\n raise TypeError(f'expected {self.model.name}')\n if key != inspect(item).identity:\n raise ValueError(f'item identity != key {key!r}')\n self._items.update(items)\n\n def setdefault(self, key, item=None):\n \"\"\" dict-like setdefault \"\"\"\n if key in self._items:\n return self._items[key]\n if item is None:\n return None\n if not isinstance(item, self.model):\n raise TypeError(f'expected {self.model.name}')\n if key != inspect(item).identity:\n raise ValueError(f'item identity != key {key!r}')\n self._items[key] = item\n return item\n\n def __init__(self):\n\n # section-name -> attr\n self._sections = {\n name: getattr(self, name)\n for name in dir(self)\n if isinstance(getattr(self, name), self.MailuCollection)\n }\n\n # known models\n self._models = tuple(section.model for section in self._sections.values())\n\n # model -> attr\n self._sections.update({\n section.model: section for section in self._sections.values()\n })\n\n def _get_model(self, section):\n if section is None:\n return None\n model = self._sections.get(section)\n if model is None:\n raise ValueError(f'Invalid section: {section!r}')\n if isinstance(model, self.MailuCollection):\n return model.model\n return model\n\n def _add(self, items, section, update):\n\n model = self._get_model(section)\n if isinstance(items, self._models):\n items = [items]\n elif not hasattr(items, '__iter__'):\n raise ValueError(f'{items!r} is not iterable')\n\n for item in items:\n if model is not None and not isinstance(item, model):\n what = item.__class__.__name__.capitalize()\n raise ValueError(f'{what} can not be added to section {section!r}')\n self._sections[type(item)].append(item, update=update)\n\n def add(self, items, section=None):\n \"\"\" add item to config \"\"\"\n self._add(items, section, update=False)\n\n def update(self, items, section=None):\n \"\"\" add or replace item in config \"\"\"\n self._add(items, section, update=True)\n\n def remove(self, items, section=None):\n \"\"\" remove item from config \"\"\"\n model = self._get_model(section)\n if isinstance(items, self._models):\n items = [items]\n elif not hasattr(items, '__iter__'):\n raise ValueError(f'{items!r} is not iterable')\n\n for item in items:\n if isinstance(item, str):\n if section is None:\n raise ValueError(f'Cannot remove key {item!r} without section')\n del self._sections[model][item]\n elif model is not None and not isinstance(item, model):\n what = item.__class__.__name__.capitalize()\n raise ValueError(f'{what} can not be removed from section {section!r}')\n self._sections[type(item)].remove(item,)\n\n def clear(self, models=None):\n \"\"\" remove complete configuration \"\"\"\n for model in self._models:\n if models is None or model in models:\n db.session.query(model).delete()\n\n def check(self):\n \"\"\" check for duplicate domain names \"\"\"\n dup = set()\n for fqdn in chain(\n db.session.query(Domain.name),\n db.session.query(Alternative.name),\n db.session.query(Relay.name)\n ):\n if fqdn in dup:\n raise ValueError(f'Duplicate domain name: {fqdn}')\n dup.add(fqdn)\n\n domain = MailuCollection(Domain)\n user = MailuCollection(User)\n alias = MailuCollection(Alias)\n relay = MailuCollection(Relay)\n config = MailuCollection(Config)\n", "path": "core/admin/mailu/models.py" } ]
diff --git a/core/admin/mailu/models.py b/core/admin/mailu/models.py index f30ef3870..48ce8b33f 100644 --- a/core/admin/mailu/models.py +++ b/core/admin/mailu/models.py @@ -546,8 +546,8 @@ def reply_active(self): now = date.today() return ( self.reply_enabled and - self.reply_startdate < now and - self.reply_enddate > now + self.reply_startdate <= now and + self.reply_enddate >= now ) @property diff --git a/towncrier/newsfragments/2512.bugfix b/towncrier/newsfragments/2512.bugfix new file mode 100644 index 000000000..b1b6aa99b --- /dev/null +++ b/towncrier/newsfragments/2512.bugfix @@ -0,0 +1 @@ +Fix: include start and end dates in the auto-reply period \ No newline at end of file
adamchainz__django-mysql-398
MySQLCache.set_many should return a list of failing keys This new behaviour in Django 2.0 ( django/django#7520 ) should be implemented.
[ { "content": "# -*- coding:utf-8 -*-\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals\n)\n\nimport re\nimport zlib\nfrom random import random\nfrom textwrap import dedent\nfrom time import time\n\nfrom django.core.cache.backends.base import (\n DEFAULT_TIMEOUT, BaseCache, default_key_func\n)\nfrom django.db import connections, router\nfrom django.utils import six\nfrom django.utils.encoding import force_bytes\nfrom django.utils.module_loading import import_string\n\nfrom django_mysql.utils import collapse_spaces, get_list_sql\n\ntry:\n from django.utils.six.moves import cPickle as pickle\nexcept ImportError: # pragma: no cover\n import pickle\n\n\nBIGINT_SIGNED_MIN = -9223372036854775808\nBIGINT_SIGNED_MAX = 9223372036854775807\nBIGINT_UNSIGNED_MAX = 18446744073709551615\n\n\n# Slightly modified copies of Options/BaseDatabaseCache from django's\n# cache.backends.db - these allow us to act like a separate app for database\n# routers (django_mysql), and not appear on django's `createcachetable`\n# command\n\nclass Options(object):\n \"\"\"A class that will quack like a Django model _meta class.\n\n This allows cache operations to be controlled by the router\n \"\"\"\n def __init__(self, table):\n self.db_table = table\n self.app_label = 'django_mysql'\n self.model_name = 'cacheentry'\n self.verbose_name = 'cache entry'\n self.verbose_name_plural = 'cache entries'\n self.object_name = 'CacheEntry'\n self.abstract = False\n self.managed = True\n self.proxy = False\n self.swapped = False\n\n\nclass BaseDatabaseCache(BaseCache):\n def __init__(self, table, params):\n super(BaseDatabaseCache, self).__init__(params)\n self._table = table\n\n class CacheEntry(object):\n _meta = Options(table)\n self.cache_model_class = CacheEntry\n\n\nreverse_key_re = re.compile(r'^([^:]*):(\\d+):(.*)')\n\n\ndef default_reverse_key_func(full_key):\n \"\"\"\n Reverse of Django's default_key_func, i.e. undoing:\n\n def default_key_func(key, key_prefix, version):\n return '%s:%s:%s' % (key_prefix, version, key)\n \"\"\"\n match = reverse_key_re.match(full_key)\n return match.group(3), match.group(1), int(match.group(2))\n\n\ndef get_reverse_key_func(reverse_key_func):\n \"\"\"\n Function to decide which reverse key function to use\n\n Defaults to ``None``, as any other value might not apply to the given\n KEY_FUNCTION. Also the user may not use any of the operations that require\n reversing the key_func.\n \"\"\"\n if reverse_key_func is not None:\n if callable(reverse_key_func):\n return reverse_key_func\n else:\n return import_string(reverse_key_func)\n return None\n\n\nclass MySQLCache(BaseDatabaseCache):\n\n # Got an error with the add() query using BIGINT_UNSIGNED_MAX, so use a\n # value slightly 1 bit less (still an incalculable time into the future of\n # 1970)\n FOREVER_TIMEOUT = BIGINT_UNSIGNED_MAX >> 1\n\n create_table_sql = dedent('''\\\n CREATE TABLE `{table_name}` (\n cache_key varchar(255) CHARACTER SET utf8 COLLATE utf8_bin\n NOT NULL PRIMARY KEY,\n value longblob NOT NULL,\n value_type char(1) CHARACTER SET latin1 COLLATE latin1_bin\n NOT NULL DEFAULT 'p',\n expires BIGINT UNSIGNED NOT NULL\n );\n ''')\n\n @classmethod\n def _now(cls):\n # Values in the expires column are milliseconds since unix epoch (UTC)\n return int(time() * 1000)\n\n def __init__(self, table, params):\n super(MySQLCache, self).__init__(table, params)\n options = params.get('OPTIONS', {})\n self._compress_min_length = options.get('COMPRESS_MIN_LENGTH', 5000)\n self._compress_level = options.get('COMPRESS_LEVEL', 6)\n self._cull_probability = options.get('CULL_PROBABILITY', 0.01)\n\n # Figure out our *reverse* key function\n if self.key_func is default_key_func:\n self.reverse_key_func = default_reverse_key_func\n if ':' in self.key_prefix:\n raise ValueError(\n \"Cannot use the default KEY_FUNCTION and \"\n \"REVERSE_KEY_FUNCTION if you have a colon in your \"\n \"KEY_PREFIX.\"\n )\n else:\n reverse_key_func = params.get('REVERSE_KEY_FUNCTION', None)\n self.reverse_key_func = get_reverse_key_func(reverse_key_func)\n\n # Django API + helpers\n\n def get(self, key, default=None, version=None):\n key = self.make_key(key, version=version)\n self.validate_key(key)\n db = router.db_for_read(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n\n with connections[db].cursor() as cursor:\n cursor.execute(\n self._get_query.format(table=table),\n (key, self._now())\n )\n row = cursor.fetchone()\n\n if row is None:\n return default\n else:\n value, value_type = row\n return self.decode(value, value_type)\n\n _get_query = collapse_spaces(\"\"\"\n SELECT value, value_type\n FROM {table}\n WHERE cache_key = %s AND\n expires >= %s\n \"\"\")\n\n def get_many(self, keys, version=None):\n made_key_to_key = {\n self.make_key(key, version=version): key\n for key in keys\n }\n made_keys = list(made_key_to_key.keys())\n for key in made_keys:\n self.validate_key(key)\n\n db = router.db_for_read(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n\n with connections[db].cursor() as cursor:\n cursor.execute(\n self._get_many_query.format(\n table=table,\n list_sql=get_list_sql(made_keys)\n ),\n made_keys + [self._now()]\n )\n rows = cursor.fetchall()\n\n data = {}\n\n for made_key, value, value_type in rows:\n key = made_key_to_key[made_key]\n data[key] = self.decode(value, value_type)\n\n return data\n\n _get_many_query = collapse_spaces(\"\"\"\n SELECT cache_key, value, value_type\n FROM {table}\n WHERE cache_key IN {list_sql} AND\n expires >= %s\n \"\"\")\n\n def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):\n key = self.make_key(key, version=version)\n self.validate_key(key)\n self._base_set('set', key, value, timeout)\n\n def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):\n key = self.make_key(key, version=version)\n self.validate_key(key)\n return self._base_set('add', key, value, timeout)\n\n def _base_set(self, mode, key, value, timeout=DEFAULT_TIMEOUT):\n exp = self.get_backend_timeout(timeout)\n db = router.db_for_write(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n\n self._maybe_cull()\n with connections[db].cursor() as cursor:\n\n value, value_type = self.encode(value)\n\n if mode == 'set':\n query = self._set_query\n params = (key, value, value_type, exp)\n elif mode == 'add':\n query = self._add_query\n params = (key, value, value_type, exp, self._now())\n\n cursor.execute(query.format(table=table), params)\n\n if mode == 'set':\n return True\n elif mode == 'add':\n # Use a special code in the add query for \"did insert\"\n insert_id = cursor.lastrowid\n return (insert_id != 444)\n\n _set_many_query = collapse_spaces(\"\"\"\n INSERT INTO {table} (cache_key, value, value_type, expires)\n VALUES {{VALUES_CLAUSE}}\n ON DUPLICATE KEY UPDATE\n value=VALUES(value),\n value_type=VALUES(value_type),\n expires=VALUES(expires)\n \"\"\")\n\n _set_query = _set_many_query.replace('{{VALUES_CLAUSE}}',\n '(%s, %s, %s, %s)')\n\n # Uses the IFNULL / LEAST / LAST_INSERT_ID trick to communicate the special\n # value of 444 back to the client (LAST_INSERT_ID is otherwise 0, since\n # there is no AUTO_INCREMENT column)\n _add_query = collapse_spaces(\"\"\"\n INSERT INTO {table} (cache_key, value, value_type, expires)\n VALUES (%s, %s, %s, %s)\n ON DUPLICATE KEY UPDATE\n value=IF(expires > @tmp_now:=%s, value, VALUES(value)),\n value_type=IF(expires > @tmp_now, value_type, VALUES(value_type)),\n expires=IF(\n expires > @tmp_now,\n IFNULL(\n LEAST(LAST_INSERT_ID(444), NULL),\n expires\n ),\n VALUES(expires)\n )\n \"\"\")\n\n def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):\n exp = self.get_backend_timeout(timeout)\n db = router.db_for_write(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n\n self._maybe_cull()\n\n params = []\n for key, value in six.iteritems(data):\n made_key = self.make_key(key, version=version)\n self.validate_key(made_key)\n value, value_type = self.encode(value)\n params.extend((made_key, value, value_type, exp))\n\n query = self._set_many_query.replace(\n '{{VALUES_CLAUSE}}',\n ','.join('(%s, %s, %s, %s)' for key in data)\n ).format(table=table)\n\n with connections[db].cursor() as cursor:\n cursor.execute(query, params)\n\n def delete(self, key, version=None):\n key = self.make_key(key, version=version)\n self.validate_key(key)\n\n db = router.db_for_write(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n\n with connections[db].cursor() as cursor:\n cursor.execute(self._delete_query.format(table=table), (key,))\n\n _delete_query = collapse_spaces(\"\"\"\n DELETE FROM {table}\n WHERE cache_key = %s\n \"\"\")\n\n def delete_many(self, keys, version=None):\n made_keys = [self.make_key(key, version=version) for key in keys]\n for key in made_keys:\n self.validate_key(key)\n\n db = router.db_for_write(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n\n with connections[db].cursor() as cursor:\n cursor.execute(\n self._delete_many_query.format(\n table=table,\n list_sql=get_list_sql(made_keys),\n ),\n made_keys\n )\n\n _delete_many_query = collapse_spaces(\"\"\"\n DELETE FROM {table}\n WHERE cache_key IN {list_sql}\n \"\"\")\n\n def has_key(self, key, version=None):\n key = self.make_key(key, version=version)\n self.validate_key(key)\n\n db = router.db_for_read(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n\n with connections[db].cursor() as cursor:\n cursor.execute(\n self._has_key_query.format(table=table),\n (key, self._now())\n )\n return cursor.fetchone() is not None\n\n _has_key_query = collapse_spaces(\"\"\"\n SELECT 1 FROM {table}\n WHERE cache_key = %s and expires > %s\n \"\"\")\n\n def incr(self, key, delta=1, version=None):\n return self._base_delta(key, delta, version, '+')\n\n def decr(self, key, delta=1, version=None):\n return self._base_delta(key, delta, version, '-')\n\n def _base_delta(self, key, delta, version, operation):\n key = self.make_key(key, version=version)\n self.validate_key(key)\n\n db = router.db_for_write(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n\n with connections[db].cursor() as cursor:\n updated = cursor.execute(\n self._delta_query.format(table=table, operation=operation),\n (delta, key)\n )\n\n if not updated:\n raise ValueError(\"Key '%s' not found, or not an integer\" % key)\n\n # New value stored in insert_id\n return cursor.lastrowid\n\n # Looks a bit tangled to turn the blob back into an int for updating, but\n # it works. Stores the new value for insert_id() with LAST_INSERT_ID\n _delta_query = collapse_spaces(\"\"\"\n UPDATE {table}\n SET value = LAST_INSERT_ID(\n CAST(value AS SIGNED INTEGER)\n {operation}\n %s\n )\n WHERE cache_key = %s AND\n value_type = 'i'\n \"\"\")\n\n def clear(self):\n db = router.db_for_write(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n with connections[db].cursor() as cursor:\n cursor.execute(\"DELETE FROM {table}\".format(table=table))\n\n def validate_key(self, key):\n \"\"\"\n Django normally warns about maximum key length, but we error on it.\n \"\"\"\n if len(key) > 250:\n raise ValueError(\n \"Cache key is longer than the maxmimum 250 characters: {}\"\n .format(key)\n )\n return super(MySQLCache, self).validate_key(key)\n\n def encode(self, obj):\n \"\"\"\n Take a Python object and return it as a tuple (value, value_type), a\n blob and a one-char code for what type it is\n \"\"\"\n if self._is_valid_mysql_bigint(obj):\n return obj, 'i'\n\n value = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)\n value_type = 'p'\n if (\n self._compress_min_length and\n len(value) >= self._compress_min_length\n ):\n value = zlib.compress(value, self._compress_level)\n value_type = 'z'\n return value, value_type\n\n def _is_valid_mysql_bigint(self, value):\n return(\n # Can't support int/long subclasses since they should are expected\n # to decode back to the same object\n (type(value) in six.integer_types) and\n # Can't go beyond these ranges\n BIGINT_SIGNED_MIN <= value <= BIGINT_SIGNED_MAX\n )\n\n def decode(self, value, value_type):\n \"\"\"\n Take a value blob and its value_type one-char code and convert it back\n to a python object\n \"\"\"\n if value_type == 'i':\n return int(value)\n\n if value_type == 'z':\n value = zlib.decompress(value)\n value_type = 'p'\n\n if value_type == 'p':\n return pickle.loads(force_bytes(value))\n\n raise ValueError(\n \"Unknown value_type '{}' read from the cache table.\"\n .format(value_type)\n )\n\n def _maybe_cull(self):\n # Roll the dice, if it says yes then cull\n if self._cull_probability and random() <= self._cull_probability:\n self.cull()\n\n def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):\n if timeout is None:\n return self.FOREVER_TIMEOUT\n timeout = super(MySQLCache, self).get_backend_timeout(timeout)\n return int(timeout * 1000)\n\n # Our API extensions\n\n def keys_with_prefix(self, prefix, version=None):\n if self.reverse_key_func is None:\n raise ValueError(\n \"To use the _with_prefix commands with a custom KEY_FUNCTION, \"\n \"you need to specify a custom REVERSE_KEY_FUNCTION too.\"\n )\n\n if version is None:\n version = self.version\n\n db = router.db_for_read(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n\n prefix = self.make_key(prefix + '%', version=version)\n\n with connections[db].cursor() as cursor:\n cursor.execute(\n \"\"\"SELECT cache_key FROM {table}\n WHERE cache_key LIKE %s AND\n expires >= %s\"\"\".format(table=table),\n (prefix, self._now())\n )\n rows = cursor.fetchall()\n full_keys = {row[0] for row in rows}\n\n keys = {}\n for full_key in full_keys:\n key, key_prefix, key_version = self.reverse_key_func(full_key)\n\n if key_version == version:\n keys[key] = key_version\n return set(six.iterkeys(keys))\n\n def get_with_prefix(self, prefix, version=None):\n if self.reverse_key_func is None:\n raise ValueError(\n \"To use the _with_prefix commands with a custom KEY_FUNCTION, \"\n \"you need to specify a custom REVERSE_KEY_FUNCTION too.\"\n )\n\n if version is None:\n version = self.version\n\n db = router.db_for_read(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n\n prefix = self.make_key(prefix + '%', version=version)\n version = six.text_type(version)\n\n with connections[db].cursor() as cursor:\n cursor.execute(\n \"\"\"SELECT cache_key, value, value_type\n FROM {table}\n WHERE cache_key LIKE %s AND\n expires >= %s\"\"\".format(table=table),\n (prefix, self._now())\n )\n rows = cursor.fetchall()\n\n data = {}\n for made_key, value, value_type in rows:\n key, key_prefix, key_version = self.reverse_key_func(made_key)\n data[key] = self.decode(value, value_type)\n\n return data\n\n def delete_with_prefix(self, prefix, version=None):\n if version is None:\n version = self.version\n\n db = router.db_for_write(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n\n prefix = self.make_key(prefix + '%', version=version)\n\n with connections[db].cursor() as cursor:\n return cursor.execute(\n \"\"\"DELETE FROM {table}\n WHERE cache_key LIKE %s\"\"\".format(table=table),\n (prefix,)\n )\n\n def cull(self):\n db = router.db_for_write(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n\n with connections[db].cursor() as cursor:\n # First, try just deleting expired keys\n num_deleted = cursor.execute(\n \"DELETE FROM {table} WHERE expires < %s\".format(table=table),\n (self._now(),)\n )\n\n # -1 means \"Don't limit size\"\n if self._max_entries == -1:\n return\n\n cursor.execute(\"SELECT COUNT(*) FROM {table}\".format(table=table))\n num = cursor.fetchone()[0]\n\n if num < self._max_entries:\n return num_deleted\n\n # Now do a key-based cull\n if self._cull_frequency == 0:\n num_deleted += cursor.execute(\n \"DELETE FROM {table}\".format(table=table)\n )\n else:\n cull_num = num // self._cull_frequency\n cursor.execute(\n \"\"\"SELECT cache_key FROM {table}\n ORDER BY cache_key\n LIMIT 1 OFFSET %s\"\"\".format(table=table),\n (cull_num,)\n )\n max_key = cursor.fetchone()[0]\n num_deleted += cursor.execute(\n \"\"\"DELETE FROM {table}\n WHERE cache_key < %s\"\"\".format(table=table),\n (max_key,)\n )\n return num_deleted\n", "path": "django_mysql/cache.py" } ]
[ { "content": "# -*- coding:utf-8 -*-\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals\n)\n\nimport re\nimport zlib\nfrom random import random\nfrom textwrap import dedent\nfrom time import time\n\nfrom django.core.cache.backends.base import (\n DEFAULT_TIMEOUT, BaseCache, default_key_func\n)\nfrom django.db import connections, router\nfrom django.utils import six\nfrom django.utils.encoding import force_bytes\nfrom django.utils.module_loading import import_string\n\nfrom django_mysql.utils import collapse_spaces, get_list_sql\n\ntry:\n from django.utils.six.moves import cPickle as pickle\nexcept ImportError: # pragma: no cover\n import pickle\n\n\nBIGINT_SIGNED_MIN = -9223372036854775808\nBIGINT_SIGNED_MAX = 9223372036854775807\nBIGINT_UNSIGNED_MAX = 18446744073709551615\n\n\n# Slightly modified copies of Options/BaseDatabaseCache from django's\n# cache.backends.db - these allow us to act like a separate app for database\n# routers (django_mysql), and not appear on django's `createcachetable`\n# command\n\nclass Options(object):\n \"\"\"A class that will quack like a Django model _meta class.\n\n This allows cache operations to be controlled by the router\n \"\"\"\n def __init__(self, table):\n self.db_table = table\n self.app_label = 'django_mysql'\n self.model_name = 'cacheentry'\n self.verbose_name = 'cache entry'\n self.verbose_name_plural = 'cache entries'\n self.object_name = 'CacheEntry'\n self.abstract = False\n self.managed = True\n self.proxy = False\n self.swapped = False\n\n\nclass BaseDatabaseCache(BaseCache):\n def __init__(self, table, params):\n super(BaseDatabaseCache, self).__init__(params)\n self._table = table\n\n class CacheEntry(object):\n _meta = Options(table)\n self.cache_model_class = CacheEntry\n\n\nreverse_key_re = re.compile(r'^([^:]*):(\\d+):(.*)')\n\n\ndef default_reverse_key_func(full_key):\n \"\"\"\n Reverse of Django's default_key_func, i.e. undoing:\n\n def default_key_func(key, key_prefix, version):\n return '%s:%s:%s' % (key_prefix, version, key)\n \"\"\"\n match = reverse_key_re.match(full_key)\n return match.group(3), match.group(1), int(match.group(2))\n\n\ndef get_reverse_key_func(reverse_key_func):\n \"\"\"\n Function to decide which reverse key function to use\n\n Defaults to ``None``, as any other value might not apply to the given\n KEY_FUNCTION. Also the user may not use any of the operations that require\n reversing the key_func.\n \"\"\"\n if reverse_key_func is not None:\n if callable(reverse_key_func):\n return reverse_key_func\n else:\n return import_string(reverse_key_func)\n return None\n\n\nclass MySQLCache(BaseDatabaseCache):\n\n # Got an error with the add() query using BIGINT_UNSIGNED_MAX, so use a\n # value slightly 1 bit less (still an incalculable time into the future of\n # 1970)\n FOREVER_TIMEOUT = BIGINT_UNSIGNED_MAX >> 1\n\n create_table_sql = dedent('''\\\n CREATE TABLE `{table_name}` (\n cache_key varchar(255) CHARACTER SET utf8 COLLATE utf8_bin\n NOT NULL PRIMARY KEY,\n value longblob NOT NULL,\n value_type char(1) CHARACTER SET latin1 COLLATE latin1_bin\n NOT NULL DEFAULT 'p',\n expires BIGINT UNSIGNED NOT NULL\n );\n ''')\n\n @classmethod\n def _now(cls):\n # Values in the expires column are milliseconds since unix epoch (UTC)\n return int(time() * 1000)\n\n def __init__(self, table, params):\n super(MySQLCache, self).__init__(table, params)\n options = params.get('OPTIONS', {})\n self._compress_min_length = options.get('COMPRESS_MIN_LENGTH', 5000)\n self._compress_level = options.get('COMPRESS_LEVEL', 6)\n self._cull_probability = options.get('CULL_PROBABILITY', 0.01)\n\n # Figure out our *reverse* key function\n if self.key_func is default_key_func:\n self.reverse_key_func = default_reverse_key_func\n if ':' in self.key_prefix:\n raise ValueError(\n \"Cannot use the default KEY_FUNCTION and \"\n \"REVERSE_KEY_FUNCTION if you have a colon in your \"\n \"KEY_PREFIX.\"\n )\n else:\n reverse_key_func = params.get('REVERSE_KEY_FUNCTION', None)\n self.reverse_key_func = get_reverse_key_func(reverse_key_func)\n\n # Django API + helpers\n\n def get(self, key, default=None, version=None):\n key = self.make_key(key, version=version)\n self.validate_key(key)\n db = router.db_for_read(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n\n with connections[db].cursor() as cursor:\n cursor.execute(\n self._get_query.format(table=table),\n (key, self._now())\n )\n row = cursor.fetchone()\n\n if row is None:\n return default\n else:\n value, value_type = row\n return self.decode(value, value_type)\n\n _get_query = collapse_spaces(\"\"\"\n SELECT value, value_type\n FROM {table}\n WHERE cache_key = %s AND\n expires >= %s\n \"\"\")\n\n def get_many(self, keys, version=None):\n made_key_to_key = {\n self.make_key(key, version=version): key\n for key in keys\n }\n made_keys = list(made_key_to_key.keys())\n for key in made_keys:\n self.validate_key(key)\n\n db = router.db_for_read(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n\n with connections[db].cursor() as cursor:\n cursor.execute(\n self._get_many_query.format(\n table=table,\n list_sql=get_list_sql(made_keys)\n ),\n made_keys + [self._now()]\n )\n rows = cursor.fetchall()\n\n data = {}\n\n for made_key, value, value_type in rows:\n key = made_key_to_key[made_key]\n data[key] = self.decode(value, value_type)\n\n return data\n\n _get_many_query = collapse_spaces(\"\"\"\n SELECT cache_key, value, value_type\n FROM {table}\n WHERE cache_key IN {list_sql} AND\n expires >= %s\n \"\"\")\n\n def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):\n key = self.make_key(key, version=version)\n self.validate_key(key)\n self._base_set('set', key, value, timeout)\n\n def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):\n key = self.make_key(key, version=version)\n self.validate_key(key)\n return self._base_set('add', key, value, timeout)\n\n def _base_set(self, mode, key, value, timeout=DEFAULT_TIMEOUT):\n exp = self.get_backend_timeout(timeout)\n db = router.db_for_write(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n\n self._maybe_cull()\n with connections[db].cursor() as cursor:\n\n value, value_type = self.encode(value)\n\n if mode == 'set':\n query = self._set_query\n params = (key, value, value_type, exp)\n elif mode == 'add':\n query = self._add_query\n params = (key, value, value_type, exp, self._now())\n\n cursor.execute(query.format(table=table), params)\n\n if mode == 'set':\n return True\n elif mode == 'add':\n # Use a special code in the add query for \"did insert\"\n insert_id = cursor.lastrowid\n return (insert_id != 444)\n\n _set_many_query = collapse_spaces(\"\"\"\n INSERT INTO {table} (cache_key, value, value_type, expires)\n VALUES {{VALUES_CLAUSE}}\n ON DUPLICATE KEY UPDATE\n value=VALUES(value),\n value_type=VALUES(value_type),\n expires=VALUES(expires)\n \"\"\")\n\n _set_query = _set_many_query.replace('{{VALUES_CLAUSE}}',\n '(%s, %s, %s, %s)')\n\n # Uses the IFNULL / LEAST / LAST_INSERT_ID trick to communicate the special\n # value of 444 back to the client (LAST_INSERT_ID is otherwise 0, since\n # there is no AUTO_INCREMENT column)\n _add_query = collapse_spaces(\"\"\"\n INSERT INTO {table} (cache_key, value, value_type, expires)\n VALUES (%s, %s, %s, %s)\n ON DUPLICATE KEY UPDATE\n value=IF(expires > @tmp_now:=%s, value, VALUES(value)),\n value_type=IF(expires > @tmp_now, value_type, VALUES(value_type)),\n expires=IF(\n expires > @tmp_now,\n IFNULL(\n LEAST(LAST_INSERT_ID(444), NULL),\n expires\n ),\n VALUES(expires)\n )\n \"\"\")\n\n def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):\n exp = self.get_backend_timeout(timeout)\n db = router.db_for_write(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n\n self._maybe_cull()\n\n params = []\n for key, value in six.iteritems(data):\n made_key = self.make_key(key, version=version)\n self.validate_key(made_key)\n value, value_type = self.encode(value)\n params.extend((made_key, value, value_type, exp))\n\n query = self._set_many_query.replace(\n '{{VALUES_CLAUSE}}',\n ','.join('(%s, %s, %s, %s)' for key in data)\n ).format(table=table)\n\n with connections[db].cursor() as cursor:\n cursor.execute(query, params)\n return []\n\n def delete(self, key, version=None):\n key = self.make_key(key, version=version)\n self.validate_key(key)\n\n db = router.db_for_write(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n\n with connections[db].cursor() as cursor:\n cursor.execute(self._delete_query.format(table=table), (key,))\n\n _delete_query = collapse_spaces(\"\"\"\n DELETE FROM {table}\n WHERE cache_key = %s\n \"\"\")\n\n def delete_many(self, keys, version=None):\n made_keys = [self.make_key(key, version=version) for key in keys]\n for key in made_keys:\n self.validate_key(key)\n\n db = router.db_for_write(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n\n with connections[db].cursor() as cursor:\n cursor.execute(\n self._delete_many_query.format(\n table=table,\n list_sql=get_list_sql(made_keys),\n ),\n made_keys\n )\n\n _delete_many_query = collapse_spaces(\"\"\"\n DELETE FROM {table}\n WHERE cache_key IN {list_sql}\n \"\"\")\n\n def has_key(self, key, version=None):\n key = self.make_key(key, version=version)\n self.validate_key(key)\n\n db = router.db_for_read(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n\n with connections[db].cursor() as cursor:\n cursor.execute(\n self._has_key_query.format(table=table),\n (key, self._now())\n )\n return cursor.fetchone() is not None\n\n _has_key_query = collapse_spaces(\"\"\"\n SELECT 1 FROM {table}\n WHERE cache_key = %s and expires > %s\n \"\"\")\n\n def incr(self, key, delta=1, version=None):\n return self._base_delta(key, delta, version, '+')\n\n def decr(self, key, delta=1, version=None):\n return self._base_delta(key, delta, version, '-')\n\n def _base_delta(self, key, delta, version, operation):\n key = self.make_key(key, version=version)\n self.validate_key(key)\n\n db = router.db_for_write(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n\n with connections[db].cursor() as cursor:\n updated = cursor.execute(\n self._delta_query.format(table=table, operation=operation),\n (delta, key)\n )\n\n if not updated:\n raise ValueError(\"Key '%s' not found, or not an integer\" % key)\n\n # New value stored in insert_id\n return cursor.lastrowid\n\n # Looks a bit tangled to turn the blob back into an int for updating, but\n # it works. Stores the new value for insert_id() with LAST_INSERT_ID\n _delta_query = collapse_spaces(\"\"\"\n UPDATE {table}\n SET value = LAST_INSERT_ID(\n CAST(value AS SIGNED INTEGER)\n {operation}\n %s\n )\n WHERE cache_key = %s AND\n value_type = 'i'\n \"\"\")\n\n def clear(self):\n db = router.db_for_write(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n with connections[db].cursor() as cursor:\n cursor.execute(\"DELETE FROM {table}\".format(table=table))\n\n def validate_key(self, key):\n \"\"\"\n Django normally warns about maximum key length, but we error on it.\n \"\"\"\n if len(key) > 250:\n raise ValueError(\n \"Cache key is longer than the maxmimum 250 characters: {}\"\n .format(key)\n )\n return super(MySQLCache, self).validate_key(key)\n\n def encode(self, obj):\n \"\"\"\n Take a Python object and return it as a tuple (value, value_type), a\n blob and a one-char code for what type it is\n \"\"\"\n if self._is_valid_mysql_bigint(obj):\n return obj, 'i'\n\n value = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)\n value_type = 'p'\n if (\n self._compress_min_length and\n len(value) >= self._compress_min_length\n ):\n value = zlib.compress(value, self._compress_level)\n value_type = 'z'\n return value, value_type\n\n def _is_valid_mysql_bigint(self, value):\n return(\n # Can't support int/long subclasses since they should are expected\n # to decode back to the same object\n (type(value) in six.integer_types) and\n # Can't go beyond these ranges\n BIGINT_SIGNED_MIN <= value <= BIGINT_SIGNED_MAX\n )\n\n def decode(self, value, value_type):\n \"\"\"\n Take a value blob and its value_type one-char code and convert it back\n to a python object\n \"\"\"\n if value_type == 'i':\n return int(value)\n\n if value_type == 'z':\n value = zlib.decompress(value)\n value_type = 'p'\n\n if value_type == 'p':\n return pickle.loads(force_bytes(value))\n\n raise ValueError(\n \"Unknown value_type '{}' read from the cache table.\"\n .format(value_type)\n )\n\n def _maybe_cull(self):\n # Roll the dice, if it says yes then cull\n if self._cull_probability and random() <= self._cull_probability:\n self.cull()\n\n def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):\n if timeout is None:\n return self.FOREVER_TIMEOUT\n timeout = super(MySQLCache, self).get_backend_timeout(timeout)\n return int(timeout * 1000)\n\n # Our API extensions\n\n def keys_with_prefix(self, prefix, version=None):\n if self.reverse_key_func is None:\n raise ValueError(\n \"To use the _with_prefix commands with a custom KEY_FUNCTION, \"\n \"you need to specify a custom REVERSE_KEY_FUNCTION too.\"\n )\n\n if version is None:\n version = self.version\n\n db = router.db_for_read(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n\n prefix = self.make_key(prefix + '%', version=version)\n\n with connections[db].cursor() as cursor:\n cursor.execute(\n \"\"\"SELECT cache_key FROM {table}\n WHERE cache_key LIKE %s AND\n expires >= %s\"\"\".format(table=table),\n (prefix, self._now())\n )\n rows = cursor.fetchall()\n full_keys = {row[0] for row in rows}\n\n keys = {}\n for full_key in full_keys:\n key, key_prefix, key_version = self.reverse_key_func(full_key)\n\n if key_version == version:\n keys[key] = key_version\n return set(six.iterkeys(keys))\n\n def get_with_prefix(self, prefix, version=None):\n if self.reverse_key_func is None:\n raise ValueError(\n \"To use the _with_prefix commands with a custom KEY_FUNCTION, \"\n \"you need to specify a custom REVERSE_KEY_FUNCTION too.\"\n )\n\n if version is None:\n version = self.version\n\n db = router.db_for_read(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n\n prefix = self.make_key(prefix + '%', version=version)\n version = six.text_type(version)\n\n with connections[db].cursor() as cursor:\n cursor.execute(\n \"\"\"SELECT cache_key, value, value_type\n FROM {table}\n WHERE cache_key LIKE %s AND\n expires >= %s\"\"\".format(table=table),\n (prefix, self._now())\n )\n rows = cursor.fetchall()\n\n data = {}\n for made_key, value, value_type in rows:\n key, key_prefix, key_version = self.reverse_key_func(made_key)\n data[key] = self.decode(value, value_type)\n\n return data\n\n def delete_with_prefix(self, prefix, version=None):\n if version is None:\n version = self.version\n\n db = router.db_for_write(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n\n prefix = self.make_key(prefix + '%', version=version)\n\n with connections[db].cursor() as cursor:\n return cursor.execute(\n \"\"\"DELETE FROM {table}\n WHERE cache_key LIKE %s\"\"\".format(table=table),\n (prefix,)\n )\n\n def cull(self):\n db = router.db_for_write(self.cache_model_class)\n table = connections[db].ops.quote_name(self._table)\n\n with connections[db].cursor() as cursor:\n # First, try just deleting expired keys\n num_deleted = cursor.execute(\n \"DELETE FROM {table} WHERE expires < %s\".format(table=table),\n (self._now(),)\n )\n\n # -1 means \"Don't limit size\"\n if self._max_entries == -1:\n return\n\n cursor.execute(\"SELECT COUNT(*) FROM {table}\".format(table=table))\n num = cursor.fetchone()[0]\n\n if num < self._max_entries:\n return num_deleted\n\n # Now do a key-based cull\n if self._cull_frequency == 0:\n num_deleted += cursor.execute(\n \"DELETE FROM {table}\".format(table=table)\n )\n else:\n cull_num = num // self._cull_frequency\n cursor.execute(\n \"\"\"SELECT cache_key FROM {table}\n ORDER BY cache_key\n LIMIT 1 OFFSET %s\"\"\".format(table=table),\n (cull_num,)\n )\n max_key = cursor.fetchone()[0]\n num_deleted += cursor.execute(\n \"\"\"DELETE FROM {table}\n WHERE cache_key < %s\"\"\".format(table=table),\n (max_key,)\n )\n return num_deleted\n", "path": "django_mysql/cache.py" } ]
diff --git a/HISTORY.rst b/HISTORY.rst index 671dd6cf..44452077 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -12,6 +12,8 @@ Pending ------------------ * Changed subprocess imports for compatibility with Google App Engine. +* (Insert new release notes below this line) +* Made ``MySQLCache.set_many`` return a list as per Django 2.0. 2.1.0 (2017-06-11) ------------------ diff --git a/django_mysql/cache.py b/django_mysql/cache.py index 77096f4a..eadae2d7 100644 --- a/django_mysql/cache.py +++ b/django_mysql/cache.py @@ -289,6 +289,7 @@ def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None): with connections[db].cursor() as cursor: cursor.execute(query, params) + return [] def delete(self, key, version=None): key = self.make_key(key, version=version) diff --git a/tests/testapp/test_cache.py b/tests/testapp/test_cache.py index fb6adb20..be9af6b6 100644 --- a/tests/testapp/test_cache.py +++ b/tests/testapp/test_cache.py @@ -846,11 +846,16 @@ def test_set_many(self): caches['no_cull'].get('nonexistent') with self.assertNumQueries(1): - caches['no_cull'].set_many({"key1": "spam"}) + result = caches['no_cull'].set_many({"key1": "spam"}) + assert result == [] # Multiple keys can be set using set_many with self.assertNumQueries(1): - caches['no_cull'].set_many({"key1": "spam", "key2": "eggs"}) + result = caches['no_cull'].set_many({ + 'key1': 'spam', + 'key2': 'eggs', + }) + assert result == [] assert cache.get("key1") == "spam" assert cache.get("key2") == "eggs"
python__python-docs-es-1201
readthedocs: 'extensions' is not defined Por alguna razón, hemos encontrado https://github.com/UPC/ravada/issues/890 en la CI de readthedocs, y actualmente los builds tienen el siguiente error: ``` % python -m sphinx -T -j auto -E -b html -d _build/doctrees -D language=es . _build/html Running Sphinx v2.2.0 Traceback (most recent call last): File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/config.py", line 361, in eval_config_file execfile_(filename, namespace) File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/util/pycompat.py", line 81, in execfile_ exec(code, _globals) File "/home/cmaureir/repos/python-docs-es-admin/conf.py", line 22, in <module> from conf import * File "/home/cmaureir/repos/python-docs-es-admin/conf.py", line 72, in <module> if extensions: NameError: name 'extensions' is not defined During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/cmd/build.py", line 272, in build_main app = Sphinx(args.sourcedir, args.confdir, args.outputdir, File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/application.py", line 210, in __init__ self.config = Config.read(self.confdir, confoverrides or {}, self.tags) File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/config.py", line 196, in read namespace = eval_config_file(filename, tags) File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/config.py", line 371, in eval_config_file raise ConfigError(msg % traceback.format_exc()) sphinx.errors.ConfigError: There is a programmable error in your configuration file: Traceback (most recent call last): File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/config.py", line 361, in eval_config_file execfile_(filename, namespace) File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/util/pycompat.py", line 81, in execfile_ exec(code, _globals) File "/home/cmaureir/repos/python-docs-es-admin/conf.py", line 22, in <module> from conf import * File "/home/cmaureir/repos/python-docs-es-admin/conf.py", line 72, in <module> if extensions: NameError: name 'extensions' is not defined Configuration error: There is a programmable error in your configuration file: Traceback (most recent call last): File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/config.py", line 361, in eval_config_file execfile_(filename, namespace) File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/util/pycompat.py", line 81, in execfile_ exec(code, _globals) File "/home/cmaureir/repos/python-docs-es-admin/conf.py", line 22, in <module> from conf import * File "/home/cmaureir/repos/python-docs-es-admin/conf.py", line 72, in <module> if extensions: NameError: name 'extensions' is not defined ``` Localmente `extensions` está definido, pero por alguna razón no en el CI de readthedocs.
[ { "content": "# Sphinx configuration file.\n#\n# - import original configurations from cpython/Doc/conf.py\n# - append the path considering the cpython submodule is at ./cpython\n# - create the symbolic links under ./cpython/locale/es/LC_MESSAGES\n# - make the build to work under Read the Docs\n#\n# The git submodule was created using this Stack Overflow answer\n# to fetch only the commit that I needed and avoid clonning the whole history\n# https://stackoverflow.com/a/27445058\n#\n# This can be built locally using `sphinx-build` by running\n#\n# $ sphinx-build -b html -d _build/doctrees -D language=es . _build/html\n\nimport sys, os, time\nsys.path.append(os.path.abspath('cpython/Doc/tools/extensions'))\nsys.path.append(os.path.abspath('cpython/Doc/includes'))\n\n# Import all the Sphinx settings from cpython\nsys.path.append(os.path.abspath('cpython/Doc'))\nfrom conf import *\n\n# Call patchlevel with the proper path to get the version from\n# instead of hardcoding it\nimport patchlevel\nversion, release = patchlevel.get_header_version_info(os.path.abspath('cpython/Doc'))\n\nproject = 'Python en Español'\ncopyright = '2001-%s, Python Software Foundation' % time.strftime('%Y')\n\nhtml_theme_path = ['cpython/Doc/tools']\ntemplates_path = ['cpython/Doc/tools/templates']\nhtml_static_path = ['cpython/Doc/tools/static']\n\nos.system('mkdir -p cpython/locales/es/')\nos.system('ln -nfs `pwd` cpython/locales/es/LC_MESSAGES')\n\nhtml_short_title = f'Documentación {release}'\nhtml_title = f'Documentación de Python en Español -- {release}'\n\nexclude_patterns = [\n # This file is not included and it not marked as :orphan:\n 'distutils/_setuptools_disclaimer.rst',\n 'README.rst',\n]\n\nif not os.environ.get('SPHINX_GETTEXT') == 'True':\n # Override all the files from ``.overrides`` directory\n from pathlib import Path\n overrides_paths = Path('.overrides')\n\n for path in overrides_paths.glob('**/*.*'):\n if path.name == 'README.rst' and path.parent == '.overrides':\n continue\n destroot = str(path.parent).replace('.overrides', '').lstrip('/')\n outputdir = Path('cpython/Doc') / destroot / path.name\n os.system(f'ln -nfs `pwd`/{path.parent}/{path.name} {outputdir}')\n\ngettext_compact = False\nlocale_dirs = ['../locales', 'cpython/locales'] # relative to the sourcedir\n\n\n# NOTE: Read the Docs does not support \"multi document output\".\n# So, we put all the documentation as a single file for now.\n_stdauthor = r'Guido van Rossum\\\\and the Python development team'\nlatex_documents = [\n ('contents', 'python-docs-es.tex', u'Documentación de Python en Español',\n _stdauthor, 'manual'),\n]\n\nextensions.extend([\n 'sphinx_tabs.tabs',\n 'sphinxemoji.sphinxemoji',\n])\n\n\ndef setup(app):\n\n def add_contributing_banner(app, doctree):\n \"\"\"\n Insert a banner at the top of the index.\n\n This way, we can easily communicate people to help with the translation,\n pointing them to different resources.\n \"\"\"\n\n if app.builder.format != 'html':\n # Do not include the banner when building with other formats\n # (this is useful when using -b gettext)\n return\n\n from docutils import nodes, core\n\n message = '¡Ayúdanos a traducir la documentación oficial de Python al Español! ' \\\n f'Puedes encontrar más información en `Como contribuir </es/{version}/CONTRIBUTING.html>`_. ' \\\n 'Ayuda a acercar Python a más personas de habla hispana.'\n\n paragraph = core.publish_doctree(message)[0]\n banner = nodes.warning(ids=['contributing-banner'])\n banner.append(paragraph)\n\n for document in doctree.traverse(nodes.document):\n document.insert(0, banner)\n\n # Change the sourcedir programmatically because Read the Docs always call it with `.`\n app.srcdir = 'cpython/Doc'\n\n app.connect('doctree-read', add_contributing_banner)\n\n # Import the sphinx-autorun manually to avoid this warning\n # TODO: Remove this code and use just ``extensions.append('sphinx_autorun')`` when\n # that issue gets fixed\n # See https://github.com/WhyNotHugo/sphinx-autorun/issues/17\n\n # WARNING: the sphinx_autorun extension does not declare if it is safe for\n # parallel reading, assuming it isn't - please ask the extension author to\n # check and make it explicit\n # WARNING: doing serial read\n from sphinx_autorun import RunBlock, AutoRun\n app.add_directive('runblock', RunBlock)\n app.connect('builder-inited', AutoRun.builder_init)\n app.add_config_value('autorun_languages', AutoRun.config, 'env')\n return {\n 'version': '0.1',\n 'parallel_read_safe': True,\n 'parallel_write_safe': True,\n }\n", "path": "conf.py" } ]
[ { "content": "# Sphinx configuration file.\n#\n# - import original configurations from cpython/Doc/conf.py\n# - append the path considering the cpython submodule is at ./cpython\n# - create the symbolic links under ./cpython/locale/es/LC_MESSAGES\n# - make the build to work under Read the Docs\n#\n# The git submodule was created using this Stack Overflow answer\n# to fetch only the commit that I needed and avoid clonning the whole history\n# https://stackoverflow.com/a/27445058\n#\n# This can be built locally using `sphinx-build` by running\n#\n# $ sphinx-build -b html -d _build/doctrees -D language=es . _build/html\n\nimport sys, os, time\nsys.path.append(os.path.abspath('cpython/Doc/tools/extensions'))\nsys.path.append(os.path.abspath('cpython/Doc/includes'))\n\n# Import all the Sphinx settings from cpython\nsys.path.append(os.path.abspath('cpython/Doc'))\nfrom conf import *\n\n# Call patchlevel with the proper path to get the version from\n# instead of hardcoding it\nimport patchlevel\nversion, release = patchlevel.get_header_version_info(os.path.abspath('cpython/Doc'))\n\nproject = 'Python en Español'\ncopyright = '2001-%s, Python Software Foundation' % time.strftime('%Y')\n\nhtml_theme_path = ['cpython/Doc/tools']\ntemplates_path = ['cpython/Doc/tools/templates']\nhtml_static_path = ['cpython/Doc/tools/static']\n\nos.system('mkdir -p cpython/locales/es/')\nos.system('ln -nfs `pwd` cpython/locales/es/LC_MESSAGES')\n\nhtml_short_title = f'Documentación {release}'\nhtml_title = f'Documentación de Python en Español -- {release}'\n\nexclude_patterns = [\n # This file is not included and it not marked as :orphan:\n 'distutils/_setuptools_disclaimer.rst',\n 'README.rst',\n]\n\nif not os.environ.get('SPHINX_GETTEXT') == 'True':\n # Override all the files from ``.overrides`` directory\n from pathlib import Path\n overrides_paths = Path('.overrides')\n\n for path in overrides_paths.glob('**/*.*'):\n if path.name == 'README.rst' and path.parent == '.overrides':\n continue\n destroot = str(path.parent).replace('.overrides', '').lstrip('/')\n outputdir = Path('cpython/Doc') / destroot / path.name\n os.system(f'ln -nfs `pwd`/{path.parent}/{path.name} {outputdir}')\n\ngettext_compact = False\nlocale_dirs = ['../locales', 'cpython/locales'] # relative to the sourcedir\n\n\n# NOTE: Read the Docs does not support \"multi document output\".\n# So, we put all the documentation as a single file for now.\n_stdauthor = r'Guido van Rossum\\\\and the Python development team'\nlatex_documents = [\n ('contents', 'python-docs-es.tex', u'Documentación de Python en Español',\n _stdauthor, 'manual'),\n]\n\ntry:\n extensions.extend([\n 'sphinx_tabs.tabs',\n 'sphinxemoji.sphinxemoji',\n ])\nexcept NameError:\n extensions = [\n 'sphinx_tabs.tabs',\n 'sphinxemoji.sphinxemoji',\n ]\n\n\ndef setup(app):\n\n def add_contributing_banner(app, doctree):\n \"\"\"\n Insert a banner at the top of the index.\n\n This way, we can easily communicate people to help with the translation,\n pointing them to different resources.\n \"\"\"\n\n if app.builder.format != 'html':\n # Do not include the banner when building with other formats\n # (this is useful when using -b gettext)\n return\n\n from docutils import nodes, core\n\n message = '¡Ayúdanos a traducir la documentación oficial de Python al Español! ' \\\n f'Puedes encontrar más información en `Como contribuir </es/{version}/CONTRIBUTING.html>`_. ' \\\n 'Ayuda a acercar Python a más personas de habla hispana.'\n\n paragraph = core.publish_doctree(message)[0]\n banner = nodes.warning(ids=['contributing-banner'])\n banner.append(paragraph)\n\n for document in doctree.traverse(nodes.document):\n document.insert(0, banner)\n\n # Change the sourcedir programmatically because Read the Docs always call it with `.`\n app.srcdir = 'cpython/Doc'\n\n app.connect('doctree-read', add_contributing_banner)\n\n # Import the sphinx-autorun manually to avoid this warning\n # TODO: Remove this code and use just ``extensions.append('sphinx_autorun')`` when\n # that issue gets fixed\n # See https://github.com/WhyNotHugo/sphinx-autorun/issues/17\n\n # WARNING: the sphinx_autorun extension does not declare if it is safe for\n # parallel reading, assuming it isn't - please ask the extension author to\n # check and make it explicit\n # WARNING: doing serial read\n from sphinx_autorun import RunBlock, AutoRun\n app.add_directive('runblock', RunBlock)\n app.connect('builder-inited', AutoRun.builder_init)\n app.add_config_value('autorun_languages', AutoRun.config, 'env')\n return {\n 'version': '0.1',\n 'parallel_read_safe': True,\n 'parallel_write_safe': True,\n }\n", "path": "conf.py" } ]
diff --git a/conf.py b/conf.py index 8ecc6c9d56..8956b12c0a 100644 --- a/conf.py +++ b/conf.py @@ -69,10 +69,16 @@ _stdauthor, 'manual'), ] -extensions.extend([ - 'sphinx_tabs.tabs', - 'sphinxemoji.sphinxemoji', -]) +try: + extensions.extend([ + 'sphinx_tabs.tabs', + 'sphinxemoji.sphinxemoji', + ]) +except NameError: + extensions = [ + 'sphinx_tabs.tabs', + 'sphinxemoji.sphinxemoji', + ] def setup(app):
chainer__chainer-1319
Is it possible to import caffe model on Python 3? As stated in the documentation, `chainer.functions.caffe.CaffeFunction` only supports Python 2.7. However in the "Install Chainer" section, it says ``` Caffe model support Protocol Buffers protobuf>=3.0.0 is required for Py3 ``` Thus I am curious whether caffe model import is supported in Python 3. Thank you very much for your help.
[ { "content": "import collections\nimport pkg_resources\nimport sys\nimport warnings\n\nimport numpy\nimport six\n\nfrom chainer import functions\nfrom chainer import link\nfrom chainer import links\n\n\ndef _protobuf3():\n ws = pkg_resources.WorkingSet()\n try:\n ws.require('protobuf>=3.0.0a')\n return True\n except pkg_resources.VersionConflict:\n return False\n\n\nif _protobuf3():\n from chainer.links.caffe import caffe_pb3 as caffe_pb\n available = True\nelif sys.version_info < (3, 0, 0):\n # caffe_pb2 does not support Py3\n from chainer.links.caffe import caffe_pb2 as caffe_pb\n available = True\nelse:\n available = False\n\nif available:\n _type_to_method = {}\n _oldname_to_method = {}\n\n def _layer(typ, oldname):\n def decorator(meth):\n global _type_to_method\n _type_to_method[typ] = meth\n if oldname is not None:\n typevalue = getattr(caffe_pb.V1LayerParameter, oldname)\n _oldname_to_method[typevalue] = meth\n return meth\n return decorator\nelse:\n def _layer(typ, oldname): # fallback\n def decorator(meth):\n return meth\n return decorator\n\n\nclass CaffeFunction(link.Chain):\n\n \"\"\"Caffe emulator based on the model file of Caffe.\n\n Given a protocol buffers file of a Caffe model, this class loads and\n emulates it on :class:`~chainer.Variable` objects. It supports the official\n reference models provided by BVLC.\n\n .. note::\n\n This class only supports Python 2.7, since the compiled module for\n protocol buffers only supports Python 2. The ``__init__`` function\n raises an exception in Python 3.\n\n .. note::\n\n CaffeFunction ignores the following layers:\n\n - Layers that CaffeFunction does not support (including data layers)\n - Layers that have no top blobs\n - Layers whose bottom blobs are incomplete (i.e., some or all of them\n are not given nor computed)\n\n .. warning::\n\n It does not support full compatibility against Caffe. Some layers and\n configurations are not implemented in Chainer yet, though the reference\n models provided by the BVLC team are supported except data layers.\n\n .. admonition:: Example\n\n Consider we want to extract the (unnormalized) log class probability\n of given images using BVLC reference CaffeNet. The model can be\n downloaded from:\n\n http://dl.caffe.berkeleyvision.org/bvlc_reference_caffenet.caffemodel\n\n We want to compute the ``fc8`` blob from the ``data`` blob. It is simply\n written as follows::\n\n # Load the model\n func = CaffeFunction('path/to/bvlc_reference_caffenet.caffemodel')\n\n # Minibatch of size 10\n x_data = numpy.ndarray((10, 3, 227, 227), dtype=numpy.float32)\n ... # (Fill the minibatch here)\n\n # Forward the pre-trained net\n x = Variable(x_data)\n y, = func(inputs={'data': x}, outputs=['fc8'])\n\n The result ``y`` contains the Variable corresponding to the ``fc8``\n blob. The computational graph is memorized as a usual forward\n computation in Chainer, so we can run backprop through this pre-trained\n net.\n\n Args:\n model_path (str): Path to the binary-proto model file of Caffe.\n\n Attributes:\n fs (FunctionSet): A set of functions corresponding to parameterized\n layers of Caffe. The names of its attributes are same as the layer\n names of the given network.\n forwards (dict): A mapping from layer names to corresponding functions.\n\n \"\"\"\n def __init__(self, model_path):\n if not available:\n msg = 'CaffeFunction is only supported on protobuf>=3 in Python3'\n raise RuntimeError(msg)\n\n super(CaffeFunction, self).__init__()\n\n net = caffe_pb.NetParameter()\n with open(model_path, 'rb') as model_file:\n net.MergeFromString(model_file.read())\n\n self.forwards = {}\n self.split_map = {}\n self.layers = []\n\n if net.layer:\n for layer in net.layer:\n meth = _type_to_method.get(layer.type)\n if meth:\n meth(self, layer)\n else:\n warnings.warn(\n 'Skip the layer \"%s\", since CaffeFunction does not'\n 'support %s layer' % (layer.name, layer.type))\n else: # v1 format\n for layer in net.layers:\n meth = _oldname_to_method.get(layer.type)\n if meth:\n meth(self, layer)\n else:\n warnings.warn(\n 'Skip the layer \"%s\", since CaffeFunction does not'\n 'support it' % layer.name)\n\n def __call__(self, inputs, outputs, disable=(), train=True):\n \"\"\"Executes a sub-network of the network.\n\n This function acts as an interpreter of the network definition for\n Caffe. On execution, it interprets each layer one by one, and if the\n bottom blobs are already computed, then emulates the layer and stores\n output blobs as :class:`~chainer.Variable` objects.\n\n Args:\n inputs (dict): A dictionary whose key-value pairs indicate initial\n correspondences between blob names and\n :class:`~chainer.Variable` objects.\n outputs (Iterable): A list of blob names whose corresponding\n :class:`~chainer.Variable` objects are returned.\n disable (Iterable): A list of layer names that will be ignored\n during the forward computation.\n train (bool): If ``True``, this function emulates the TRAIN phase\n of the Caffe layers. Otherwise, it emulates the TEST phase.\n\n Returns:\n tuple: A tuple of output :class:`~chainer.Variable` objects\n corresponding to elements of the `outputs` argument.\n\n \"\"\"\n self.train = train\n variables = dict(inputs)\n for func_name, bottom, top in self.layers:\n if (func_name in disable or\n func_name not in self.forwards or\n any(blob not in variables for blob in bottom)):\n continue\n\n func = self.forwards[func_name]\n input_vars = tuple(variables[blob] for blob in bottom)\n output_vars = func(*input_vars)\n if not isinstance(output_vars, collections.Iterable):\n output_vars = output_vars,\n for var, name in zip(output_vars, top):\n variables[name] = var\n\n self.variables = variables\n return tuple(variables[blob] for blob in outputs)\n\n def _add_layer(self, layer):\n bottom = []\n for blob_name in layer.bottom:\n bottom.append(self.split_map.get(blob_name, blob_name))\n self.layers.append((layer.name, bottom, list(layer.top)))\n\n @_layer('Concat', 'CONCAT')\n def _setup_concat(self, layer):\n param = layer.concat_param\n axis = param.axis\n if axis == 1 and param.concat_dim != 1:\n axis = param.concat_dim\n\n self.forwards[layer.name] = _ListArgumentFcuntion(\n functions.concat, axis=axis)\n self._add_layer(layer)\n\n @_layer('Convolution', 'CONVOLUTION')\n def _setup_convolution(self, layer):\n blobs = layer.blobs\n param = layer.convolution_param\n ksize = _get_ksize(param)\n stride = _get_stride(param)\n pad = _get_pad(param)\n num = _get_num(blobs[0])\n channels = _get_channels(blobs[0])\n\n n_in = channels * param.group\n n_out = num\n func = links.Convolution2D(n_in, n_out, ksize, stride, pad,\n nobias=not param.bias_term)\n func.W.data[...] = 0\n\n part_size = len(blobs[0].data) // param.group\n for i in six.moves.range(param.group):\n in_slice = slice(i * n_in // param.group,\n (i+1) * n_in // param.group)\n out_slice = slice(i * n_out // param.group,\n (i+1) * n_out // param.group)\n w = func.W.data[out_slice, in_slice]\n\n data = numpy.array(blobs[0].data[i*part_size:(i+1)*part_size])\n w[:] = data.reshape(w.shape)\n\n if param.bias_term:\n func.b.data[:] = blobs[1].data\n\n self.add_link(layer.name, func)\n self.forwards[layer.name] = _CallChildLink(self, layer.name)\n self._add_layer(layer)\n\n @_layer('Data', 'DATA')\n def _setup_data(self, layer):\n # We silently skip the data layer.\n pass\n\n @_layer('Dropout', 'DROPOUT')\n def _setup_dropout(self, layer):\n param = layer.dropout_param\n\n self.forwards[layer.name] = _DropoutFunction(\n self, ratio=param.dropout_ratio)\n self._add_layer(layer)\n\n @_layer('InnerProduct', 'INNER_PRODUCT')\n def _setup_inner_product(self, layer):\n param = layer.inner_product_param\n bias_term = param.bias_term\n if param.axis != 1:\n raise RuntimeError(\n 'Non-default axis in InnerProduct is not supported')\n\n blobs = layer.blobs\n width, height = _get_width(blobs[0]), _get_height(blobs[0])\n func = links.Linear(width, height, nobias=not bias_term)\n func.W.data.ravel()[:] = blobs[0].data\n if bias_term:\n func.b.data[:] = blobs[1].data\n\n self.add_link(layer.name, func)\n self.forwards[layer.name] = _CallChildLink(self, layer.name)\n self._add_layer(layer)\n\n @_layer('LRN', 'LRN')\n def _setup_lrn(self, layer):\n param = layer.lrn_param\n if param.norm_region != param.ACROSS_CHANNELS:\n raise RuntimeError('Within-channel LRN is not supported')\n\n fwd = _SingleArgumentFunction(\n functions.local_response_normalization,\n n=param.local_size, k=param.k,\n alpha=param.alpha / param.local_size, beta=param.beta)\n self.forwards[layer.name] = fwd\n self._add_layer(layer)\n\n @_layer('Pooling', 'POOLING')\n def _setup_pooling(self, layer):\n param = layer.pooling_param\n ksize = _get_ksize(param)\n stride = _get_stride(param)\n pad = _get_pad(param)\n\n if param.pool == param.MAX:\n func = functions.max_pooling_2d\n elif param.pool == param.AVE:\n func = functions.average_pooling_2d\n else:\n raise RuntimeError('Stochastic pooling is not supported')\n\n fw = _SingleArgumentFunction(func, ksize, stride=stride, pad=pad)\n self.forwards[layer.name] = fw\n self._add_layer(layer)\n\n @_layer('ReLU', 'RELU')\n def _setup_relu(self, layer):\n slope = layer.relu_param.negative_slope\n\n if slope != 0:\n fw = _SingleArgumentFunction(functions.leaky_relu, slope=slope)\n else:\n fw = functions.relu\n\n self.forwards[layer.name] = fw\n self._add_layer(layer)\n\n @_layer('BatchNorm', None)\n def _setup_batchnorm(self, layer):\n # Get layer parameters.\n blobs = layer.blobs\n param = layer.batch_norm_param\n use_global_stats = param.use_global_stats\n decay = param.moving_average_fraction\n eps = param.eps\n size = int(blobs[0].shape.dim[0]) # Get channel dim from mean blob.\n\n # Make BatchNormalization link.\n func = links.BatchNormalization(size, decay=decay, eps=eps,\n use_gamma=False, use_beta=False)\n func.avg_mean.ravel()[:] = blobs[0].data\n func.avg_var.ravel()[:] = blobs[1].data\n self.add_link(layer.name, func)\n\n # Add layer.\n fwd = _SingleArgumentFunction(\n _CallChildLink(self, layer.name),\n test=use_global_stats, finetune=False)\n self.forwards[layer.name] = fwd\n self._add_layer(layer)\n\n @_layer('Eltwise', 'ELTWISE')\n def _setup_eltwise(self, layer):\n # stable_prod_grad parameter is not supported now.\n operation = layer.eltwise_param.operation\n coeffs = layer.eltwise_param.coeff or None\n self.forwards[layer.name] = _EltwiseFunction(operation, coeffs)\n self._add_layer(layer)\n\n @_layer('Scale', None)\n def _setup_scale(self, layer):\n # Following parameters are not supported now:\n # - negative axis\n # - num_axes\n # - filler\n # - bias_filler\n\n # Get layer parameters.\n bottom = layer.bottom\n blobs = layer.blobs\n axis = layer.scale_param.axis\n bias_term = layer.scale_param.bias_term\n\n # Case of only one bottom where W is learnt parameter.\n if len(bottom) == 1:\n W_shape = blobs[0].shape.dim\n func = _Scale(axis, W_shape, bias_term)\n func.W.data.ravel()[:] = blobs[0].data\n if bias_term:\n func.bias.b.data.ravel()[:] = blobs[1].data\n # Case of two bottoms where W is given as a bottom.\n else:\n shape = blobs[0].shape.dim if bias_term else None\n func = _Scale(axis, bias_term=bias_term, bias_shape=shape)\n if bias_term:\n func.bias.b.data.ravel()[:] = blobs[0].data\n\n # Add layer.\n self.add_link(layer.name, func)\n self.forwards[layer.name] = _CallChildLink(self, layer.name)\n self._add_layer(layer)\n\n @_layer('Softmax', 'SOFTMAX')\n def _setup_softmax(self, layer):\n if layer.softmax_param.axis != 1:\n raise RuntimeError(\n 'Softmax along non-channel axis is not supported')\n\n if layer.softmax_param.engine == 0: # DEFAULT\n fw = functions.softmax\n elif layer.softmax_param.engine == 1: # CAFFE\n fw = _SingleArgumentFunction(functions.softmax, use_cudnn=False)\n elif layer.softmax_param.engine == 2: # CUDNN\n fw = _SingleArgumentFunction(functions.softmax, use_cudnn=True)\n\n self.forwards[layer.name] = fw\n self._add_layer(layer)\n\n @_layer('SoftmaxWithLoss', 'SOFTMAX_LOSS')\n def _setup_softmax_with_loss(self, layer):\n if layer.softmax_param.axis != 1:\n raise RuntimeError(\n 'Softmax along non-channel axis is not supported')\n\n self.forwards[layer.name] = functions.softmax_cross_entropy\n self._add_layer(layer)\n\n @_layer('Split', 'SPLIT')\n def _setup_split(self, layer):\n for top in layer.top:\n self.split_map[top] = layer.bottom[0]\n\n\n# Internal functions\n\ndef _get_ksize(param):\n if param.kernel_h > 0:\n return param.kernel_h, param.kernel_w\n elif type(param.kernel_size) == int:\n return param.kernel_size\n elif len(param.kernel_size) == 1:\n return param.kernel_size[0]\n else:\n return param.kernel_size\n\n\ndef _get_stride(param):\n if param.stride_h > 0:\n return param.stride_h, param.stride_w\n elif type(param.stride) == int:\n return param.stride\n elif len(param.stride) == 0:\n return 1\n elif len(param.stride) == 1:\n return param.stride[0]\n else:\n return param.stride\n\n\ndef _get_pad(param):\n if param.pad_h > 0:\n return param.pad_h, param.pad_w\n elif type(param.pad) == int:\n return param.pad\n elif len(param.pad) == 0:\n return 0\n elif len(param.pad) == 1:\n return param.pad[0]\n else:\n return param.pad\n\n\ndef _get_num(blob):\n if blob.num > 0:\n return blob.num\n else:\n return blob.shape.dim[0]\n\n\ndef _get_channels(blob):\n if blob.channels > 0:\n return blob.channels\n else:\n return blob.shape.dim[1]\n\n\ndef _get_height(blob):\n if blob.height > 0:\n return blob.height\n elif len(blob.shape.dim) == 2:\n return blob.shape.dim[0]\n elif len(blob.shape.dim) == 4:\n return blob.shape.dim[2]\n else:\n raise RuntimeError(\n '{}-dimentional array is not supported'.format(\n len(blob.shape.dim)))\n\n\ndef _get_width(blob):\n if blob.width > 0:\n return blob.width\n elif len(blob.shape.dim) == 2:\n return blob.shape.dim[1]\n elif len(blob.shape.dim) == 4:\n return blob.shape.dim[3]\n else:\n raise RuntimeError(\n '{}-dimentional array is not supported'.format(\n len(blob.shape.dim)))\n\n\n# Internal class\n\nclass _SingleArgumentFunction(object):\n def __init__(self, func, *args, **kwargs):\n self.func = func\n self.args = args\n self.kwargs = kwargs\n\n def __call__(self, x):\n return self.func(x, *self.args, **self.kwargs)\n\n\nclass _ListArgumentFcuntion(object):\n def __init__(self, func, **kwargs):\n self.func = func\n self.kwargs = kwargs\n\n def __call__(self, *xs):\n return self.func(xs, **self.kwargs)\n\n\nclass _DropoutFunction(object):\n def __init__(self, caffe_func, ratio):\n # `caffe_func.train` is determined when calling `__call__`\n self.caffe_func = caffe_func\n self.ratio = ratio\n\n def __call__(self, x):\n return functions.dropout(\n x, ratio=self.ratio, train=self.caffe_func.train)\n\n\nclass _CallChildLink(object):\n def __init__(self, caffe_func, name):\n self.name = name\n self.caffe_func = caffe_func\n\n def __call__(self, *xs, **kwargs):\n return self.caffe_func[self.name](*xs, **kwargs)\n\n\nclass _EltwiseFunction(object):\n def __init__(self, operation, coeffs=None):\n if coeffs is not None:\n assert len(coeffs) > 0\n self.operation = operation\n self.coeffs = coeffs\n\n def __call__(self, *xs):\n operation = self.operation\n\n if operation == 0: # PROD\n return six.moves.reduce(lambda x, y: x * y, xs),\n\n elif operation == 1: # SUM\n coeffs = self.coeffs\n if coeffs is not None:\n assert len(xs) == len(coeffs)\n xs = [x * coeff for x, coeff in zip(xs, coeffs)]\n return six.moves.reduce(lambda x, y: x + y, xs),\n\n elif operation == 2: # MAX\n return six.moves.reduce(lambda x, y: functions.maximum(x, y), xs),\n\n else:\n raise ValueError('Invalid EltwiseParameter.EltwiseOp value.')\n\n\ndef _scale(x, y, axis=1):\n x_shape = x.data.shape\n y_shape = y.data.shape\n assert x_shape[axis:axis + len(y_shape)] == y_shape\n y1_shape = tuple([1] * axis + list(y_shape) +\n [1] * (len(x_shape) - axis - len(y_shape)))\n y1 = functions.reshape(y, y1_shape)\n y2 = functions.broadcast_to(y1, x_shape)\n return x * y2\n\n\nclass _Scale(link.Chain):\n def __init__(self, axis=1, W_shape=None, bias_term=False, bias_shape=None):\n super(_Scale, self).__init__()\n\n # Add W parameter if given.\n if W_shape is not None:\n self.add_param('W', W_shape)\n self.W.data.fill(1)\n else:\n self.W = None\n\n # Add bias term if given.\n if W_shape is not None:\n if bias_term:\n func = _Bias(axis, W_shape)\n self.add_link('bias', func)\n else:\n self.bias = None\n else:\n if bias_term:\n if bias_shape is None:\n raise ValueError('bias_shape should be given if W is not '\n 'learnt parameter and bias_term is True.')\n func = _Bias(axis, bias_shape)\n self.add_link('bias', func)\n else:\n self.bias = None\n\n # Hold axis.\n self.axis = axis\n\n def __call__(self, *xs):\n axis = self.axis\n\n # Case of only one bottom where W is learnt parameter.\n if self.W is not None:\n assert len(xs) == 1\n x, = xs\n W = self.W\n z = _scale(x, W, axis)\n # Case of two bottoms where W is given as a bottom.\n else:\n assert len(xs) == 2\n x, y = xs\n z = _scale(x, y, axis)\n\n # Forward propagate bias term if given.\n if self.bias is not None:\n return self.bias(z)\n else:\n return z\n\n\ndef _bias(x, y, axis=1):\n x_shape = x.data.shape\n y_shape = y.data.shape\n assert x_shape[axis:axis + len(y_shape)] == y_shape\n y1_shape = tuple([1] * axis + list(y_shape) +\n [1] * (len(x_shape) - axis - len(y_shape)))\n y1 = functions.reshape(y, y1_shape)\n y2 = functions.broadcast_to(y1, x_shape)\n return x + y2\n\n\nclass _Bias(link.Link):\n def __init__(self, axis=1, shape=None):\n super(_Bias, self).__init__()\n\n # Add b parameter if given.\n if shape is not None:\n self.add_param('b', shape)\n self.b.data.fill(0)\n else:\n self.b = None\n\n # Hold axis.\n self.axis = axis\n\n def __call__(self, *xs):\n axis = self.axis\n\n # Case of only one bottom where b is learnt parameter.\n if self.b is not None:\n assert len(xs) == 1\n x, = xs\n b = self.b\n return _bias(x, b, axis)\n # Case of two bottoms where b is given as a bottom.\n else:\n assert len(xs) == 2\n x, y = xs\n return _bias(x, y, axis)\n", "path": "chainer/links/caffe/caffe_function.py" } ]
[ { "content": "import collections\nimport pkg_resources\nimport sys\nimport warnings\n\nimport numpy\nimport six\n\nfrom chainer import functions\nfrom chainer import link\nfrom chainer import links\n\n\ndef _protobuf3():\n ws = pkg_resources.WorkingSet()\n try:\n ws.require('protobuf>=3.0.0a')\n return True\n except pkg_resources.VersionConflict:\n return False\n\n\nif _protobuf3():\n from chainer.links.caffe import caffe_pb3 as caffe_pb\n available = True\nelif sys.version_info < (3, 0, 0):\n # caffe_pb2 does not support Py3\n from chainer.links.caffe import caffe_pb2 as caffe_pb\n available = True\nelse:\n available = False\n\nif available:\n _type_to_method = {}\n _oldname_to_method = {}\n\n def _layer(typ, oldname):\n def decorator(meth):\n global _type_to_method\n _type_to_method[typ] = meth\n if oldname is not None:\n typevalue = getattr(caffe_pb.V1LayerParameter, oldname)\n _oldname_to_method[typevalue] = meth\n return meth\n return decorator\nelse:\n def _layer(typ, oldname): # fallback\n def decorator(meth):\n return meth\n return decorator\n\n\nclass CaffeFunction(link.Chain):\n\n \"\"\"Caffe emulator based on the model file of Caffe.\n\n Given a protocol buffers file of a Caffe model, this class loads and\n emulates it on :class:`~chainer.Variable` objects. It supports the official\n reference models provided by BVLC.\n\n .. note::\n\n protobuf>=3.0.0 is required if you use Python 3 because protobuf 2 is\n not supported on Python 3.\n\n .. note::\n\n CaffeFunction ignores the following layers:\n\n - Layers that CaffeFunction does not support (including data layers)\n - Layers that have no top blobs\n - Layers whose bottom blobs are incomplete (i.e., some or all of them\n are not given nor computed)\n\n .. warning::\n\n It does not support full compatibility against Caffe. Some layers and\n configurations are not implemented in Chainer yet, though the reference\n models provided by the BVLC team are supported except data layers.\n\n .. admonition:: Example\n\n Consider we want to extract the (unnormalized) log class probability\n of given images using BVLC reference CaffeNet. The model can be\n downloaded from:\n\n http://dl.caffe.berkeleyvision.org/bvlc_reference_caffenet.caffemodel\n\n We want to compute the ``fc8`` blob from the ``data`` blob. It is simply\n written as follows::\n\n # Load the model\n func = CaffeFunction('path/to/bvlc_reference_caffenet.caffemodel')\n\n # Minibatch of size 10\n x_data = numpy.ndarray((10, 3, 227, 227), dtype=numpy.float32)\n ... # (Fill the minibatch here)\n\n # Forward the pre-trained net\n x = Variable(x_data)\n y, = func(inputs={'data': x}, outputs=['fc8'])\n\n The result ``y`` contains the Variable corresponding to the ``fc8``\n blob. The computational graph is memorized as a usual forward\n computation in Chainer, so we can run backprop through this pre-trained\n net.\n\n Args:\n model_path (str): Path to the binary-proto model file of Caffe.\n\n Attributes:\n fs (FunctionSet): A set of functions corresponding to parameterized\n layers of Caffe. The names of its attributes are same as the layer\n names of the given network.\n forwards (dict): A mapping from layer names to corresponding functions.\n\n \"\"\"\n def __init__(self, model_path):\n if not available:\n msg = 'CaffeFunction is only supported on protobuf>=3 in Python3'\n raise RuntimeError(msg)\n\n super(CaffeFunction, self).__init__()\n\n net = caffe_pb.NetParameter()\n with open(model_path, 'rb') as model_file:\n net.MergeFromString(model_file.read())\n\n self.forwards = {}\n self.split_map = {}\n self.layers = []\n\n if net.layer:\n for layer in net.layer:\n meth = _type_to_method.get(layer.type)\n if meth:\n meth(self, layer)\n else:\n warnings.warn(\n 'Skip the layer \"%s\", since CaffeFunction does not'\n 'support %s layer' % (layer.name, layer.type))\n else: # v1 format\n for layer in net.layers:\n meth = _oldname_to_method.get(layer.type)\n if meth:\n meth(self, layer)\n else:\n warnings.warn(\n 'Skip the layer \"%s\", since CaffeFunction does not'\n 'support it' % layer.name)\n\n def __call__(self, inputs, outputs, disable=(), train=True):\n \"\"\"Executes a sub-network of the network.\n\n This function acts as an interpreter of the network definition for\n Caffe. On execution, it interprets each layer one by one, and if the\n bottom blobs are already computed, then emulates the layer and stores\n output blobs as :class:`~chainer.Variable` objects.\n\n Args:\n inputs (dict): A dictionary whose key-value pairs indicate initial\n correspondences between blob names and\n :class:`~chainer.Variable` objects.\n outputs (Iterable): A list of blob names whose corresponding\n :class:`~chainer.Variable` objects are returned.\n disable (Iterable): A list of layer names that will be ignored\n during the forward computation.\n train (bool): If ``True``, this function emulates the TRAIN phase\n of the Caffe layers. Otherwise, it emulates the TEST phase.\n\n Returns:\n tuple: A tuple of output :class:`~chainer.Variable` objects\n corresponding to elements of the `outputs` argument.\n\n \"\"\"\n self.train = train\n variables = dict(inputs)\n for func_name, bottom, top in self.layers:\n if (func_name in disable or\n func_name not in self.forwards or\n any(blob not in variables for blob in bottom)):\n continue\n\n func = self.forwards[func_name]\n input_vars = tuple(variables[blob] for blob in bottom)\n output_vars = func(*input_vars)\n if not isinstance(output_vars, collections.Iterable):\n output_vars = output_vars,\n for var, name in zip(output_vars, top):\n variables[name] = var\n\n self.variables = variables\n return tuple(variables[blob] for blob in outputs)\n\n def _add_layer(self, layer):\n bottom = []\n for blob_name in layer.bottom:\n bottom.append(self.split_map.get(blob_name, blob_name))\n self.layers.append((layer.name, bottom, list(layer.top)))\n\n @_layer('Concat', 'CONCAT')\n def _setup_concat(self, layer):\n param = layer.concat_param\n axis = param.axis\n if axis == 1 and param.concat_dim != 1:\n axis = param.concat_dim\n\n self.forwards[layer.name] = _ListArgumentFcuntion(\n functions.concat, axis=axis)\n self._add_layer(layer)\n\n @_layer('Convolution', 'CONVOLUTION')\n def _setup_convolution(self, layer):\n blobs = layer.blobs\n param = layer.convolution_param\n ksize = _get_ksize(param)\n stride = _get_stride(param)\n pad = _get_pad(param)\n num = _get_num(blobs[0])\n channels = _get_channels(blobs[0])\n\n n_in = channels * param.group\n n_out = num\n func = links.Convolution2D(n_in, n_out, ksize, stride, pad,\n nobias=not param.bias_term)\n func.W.data[...] = 0\n\n part_size = len(blobs[0].data) // param.group\n for i in six.moves.range(param.group):\n in_slice = slice(i * n_in // param.group,\n (i+1) * n_in // param.group)\n out_slice = slice(i * n_out // param.group,\n (i+1) * n_out // param.group)\n w = func.W.data[out_slice, in_slice]\n\n data = numpy.array(blobs[0].data[i*part_size:(i+1)*part_size])\n w[:] = data.reshape(w.shape)\n\n if param.bias_term:\n func.b.data[:] = blobs[1].data\n\n self.add_link(layer.name, func)\n self.forwards[layer.name] = _CallChildLink(self, layer.name)\n self._add_layer(layer)\n\n @_layer('Data', 'DATA')\n def _setup_data(self, layer):\n # We silently skip the data layer.\n pass\n\n @_layer('Dropout', 'DROPOUT')\n def _setup_dropout(self, layer):\n param = layer.dropout_param\n\n self.forwards[layer.name] = _DropoutFunction(\n self, ratio=param.dropout_ratio)\n self._add_layer(layer)\n\n @_layer('InnerProduct', 'INNER_PRODUCT')\n def _setup_inner_product(self, layer):\n param = layer.inner_product_param\n bias_term = param.bias_term\n if param.axis != 1:\n raise RuntimeError(\n 'Non-default axis in InnerProduct is not supported')\n\n blobs = layer.blobs\n width, height = _get_width(blobs[0]), _get_height(blobs[0])\n func = links.Linear(width, height, nobias=not bias_term)\n func.W.data.ravel()[:] = blobs[0].data\n if bias_term:\n func.b.data[:] = blobs[1].data\n\n self.add_link(layer.name, func)\n self.forwards[layer.name] = _CallChildLink(self, layer.name)\n self._add_layer(layer)\n\n @_layer('LRN', 'LRN')\n def _setup_lrn(self, layer):\n param = layer.lrn_param\n if param.norm_region != param.ACROSS_CHANNELS:\n raise RuntimeError('Within-channel LRN is not supported')\n\n fwd = _SingleArgumentFunction(\n functions.local_response_normalization,\n n=param.local_size, k=param.k,\n alpha=param.alpha / param.local_size, beta=param.beta)\n self.forwards[layer.name] = fwd\n self._add_layer(layer)\n\n @_layer('Pooling', 'POOLING')\n def _setup_pooling(self, layer):\n param = layer.pooling_param\n ksize = _get_ksize(param)\n stride = _get_stride(param)\n pad = _get_pad(param)\n\n if param.pool == param.MAX:\n func = functions.max_pooling_2d\n elif param.pool == param.AVE:\n func = functions.average_pooling_2d\n else:\n raise RuntimeError('Stochastic pooling is not supported')\n\n fw = _SingleArgumentFunction(func, ksize, stride=stride, pad=pad)\n self.forwards[layer.name] = fw\n self._add_layer(layer)\n\n @_layer('ReLU', 'RELU')\n def _setup_relu(self, layer):\n slope = layer.relu_param.negative_slope\n\n if slope != 0:\n fw = _SingleArgumentFunction(functions.leaky_relu, slope=slope)\n else:\n fw = functions.relu\n\n self.forwards[layer.name] = fw\n self._add_layer(layer)\n\n @_layer('BatchNorm', None)\n def _setup_batchnorm(self, layer):\n # Get layer parameters.\n blobs = layer.blobs\n param = layer.batch_norm_param\n use_global_stats = param.use_global_stats\n decay = param.moving_average_fraction\n eps = param.eps\n size = int(blobs[0].shape.dim[0]) # Get channel dim from mean blob.\n\n # Make BatchNormalization link.\n func = links.BatchNormalization(size, decay=decay, eps=eps,\n use_gamma=False, use_beta=False)\n func.avg_mean.ravel()[:] = blobs[0].data\n func.avg_var.ravel()[:] = blobs[1].data\n self.add_link(layer.name, func)\n\n # Add layer.\n fwd = _SingleArgumentFunction(\n _CallChildLink(self, layer.name),\n test=use_global_stats, finetune=False)\n self.forwards[layer.name] = fwd\n self._add_layer(layer)\n\n @_layer('Eltwise', 'ELTWISE')\n def _setup_eltwise(self, layer):\n # stable_prod_grad parameter is not supported now.\n operation = layer.eltwise_param.operation\n coeffs = layer.eltwise_param.coeff or None\n self.forwards[layer.name] = _EltwiseFunction(operation, coeffs)\n self._add_layer(layer)\n\n @_layer('Scale', None)\n def _setup_scale(self, layer):\n # Following parameters are not supported now:\n # - negative axis\n # - num_axes\n # - filler\n # - bias_filler\n\n # Get layer parameters.\n bottom = layer.bottom\n blobs = layer.blobs\n axis = layer.scale_param.axis\n bias_term = layer.scale_param.bias_term\n\n # Case of only one bottom where W is learnt parameter.\n if len(bottom) == 1:\n W_shape = blobs[0].shape.dim\n func = _Scale(axis, W_shape, bias_term)\n func.W.data.ravel()[:] = blobs[0].data\n if bias_term:\n func.bias.b.data.ravel()[:] = blobs[1].data\n # Case of two bottoms where W is given as a bottom.\n else:\n shape = blobs[0].shape.dim if bias_term else None\n func = _Scale(axis, bias_term=bias_term, bias_shape=shape)\n if bias_term:\n func.bias.b.data.ravel()[:] = blobs[0].data\n\n # Add layer.\n self.add_link(layer.name, func)\n self.forwards[layer.name] = _CallChildLink(self, layer.name)\n self._add_layer(layer)\n\n @_layer('Softmax', 'SOFTMAX')\n def _setup_softmax(self, layer):\n if layer.softmax_param.axis != 1:\n raise RuntimeError(\n 'Softmax along non-channel axis is not supported')\n\n if layer.softmax_param.engine == 0: # DEFAULT\n fw = functions.softmax\n elif layer.softmax_param.engine == 1: # CAFFE\n fw = _SingleArgumentFunction(functions.softmax, use_cudnn=False)\n elif layer.softmax_param.engine == 2: # CUDNN\n fw = _SingleArgumentFunction(functions.softmax, use_cudnn=True)\n\n self.forwards[layer.name] = fw\n self._add_layer(layer)\n\n @_layer('SoftmaxWithLoss', 'SOFTMAX_LOSS')\n def _setup_softmax_with_loss(self, layer):\n if layer.softmax_param.axis != 1:\n raise RuntimeError(\n 'Softmax along non-channel axis is not supported')\n\n self.forwards[layer.name] = functions.softmax_cross_entropy\n self._add_layer(layer)\n\n @_layer('Split', 'SPLIT')\n def _setup_split(self, layer):\n for top in layer.top:\n self.split_map[top] = layer.bottom[0]\n\n\n# Internal functions\n\ndef _get_ksize(param):\n if param.kernel_h > 0:\n return param.kernel_h, param.kernel_w\n elif type(param.kernel_size) == int:\n return param.kernel_size\n elif len(param.kernel_size) == 1:\n return param.kernel_size[0]\n else:\n return param.kernel_size\n\n\ndef _get_stride(param):\n if param.stride_h > 0:\n return param.stride_h, param.stride_w\n elif type(param.stride) == int:\n return param.stride\n elif len(param.stride) == 0:\n return 1\n elif len(param.stride) == 1:\n return param.stride[0]\n else:\n return param.stride\n\n\ndef _get_pad(param):\n if param.pad_h > 0:\n return param.pad_h, param.pad_w\n elif type(param.pad) == int:\n return param.pad\n elif len(param.pad) == 0:\n return 0\n elif len(param.pad) == 1:\n return param.pad[0]\n else:\n return param.pad\n\n\ndef _get_num(blob):\n if blob.num > 0:\n return blob.num\n else:\n return blob.shape.dim[0]\n\n\ndef _get_channels(blob):\n if blob.channels > 0:\n return blob.channels\n else:\n return blob.shape.dim[1]\n\n\ndef _get_height(blob):\n if blob.height > 0:\n return blob.height\n elif len(blob.shape.dim) == 2:\n return blob.shape.dim[0]\n elif len(blob.shape.dim) == 4:\n return blob.shape.dim[2]\n else:\n raise RuntimeError(\n '{}-dimentional array is not supported'.format(\n len(blob.shape.dim)))\n\n\ndef _get_width(blob):\n if blob.width > 0:\n return blob.width\n elif len(blob.shape.dim) == 2:\n return blob.shape.dim[1]\n elif len(blob.shape.dim) == 4:\n return blob.shape.dim[3]\n else:\n raise RuntimeError(\n '{}-dimentional array is not supported'.format(\n len(blob.shape.dim)))\n\n\n# Internal class\n\nclass _SingleArgumentFunction(object):\n def __init__(self, func, *args, **kwargs):\n self.func = func\n self.args = args\n self.kwargs = kwargs\n\n def __call__(self, x):\n return self.func(x, *self.args, **self.kwargs)\n\n\nclass _ListArgumentFcuntion(object):\n def __init__(self, func, **kwargs):\n self.func = func\n self.kwargs = kwargs\n\n def __call__(self, *xs):\n return self.func(xs, **self.kwargs)\n\n\nclass _DropoutFunction(object):\n def __init__(self, caffe_func, ratio):\n # `caffe_func.train` is determined when calling `__call__`\n self.caffe_func = caffe_func\n self.ratio = ratio\n\n def __call__(self, x):\n return functions.dropout(\n x, ratio=self.ratio, train=self.caffe_func.train)\n\n\nclass _CallChildLink(object):\n def __init__(self, caffe_func, name):\n self.name = name\n self.caffe_func = caffe_func\n\n def __call__(self, *xs, **kwargs):\n return self.caffe_func[self.name](*xs, **kwargs)\n\n\nclass _EltwiseFunction(object):\n def __init__(self, operation, coeffs=None):\n if coeffs is not None:\n assert len(coeffs) > 0\n self.operation = operation\n self.coeffs = coeffs\n\n def __call__(self, *xs):\n operation = self.operation\n\n if operation == 0: # PROD\n return six.moves.reduce(lambda x, y: x * y, xs),\n\n elif operation == 1: # SUM\n coeffs = self.coeffs\n if coeffs is not None:\n assert len(xs) == len(coeffs)\n xs = [x * coeff for x, coeff in zip(xs, coeffs)]\n return six.moves.reduce(lambda x, y: x + y, xs),\n\n elif operation == 2: # MAX\n return six.moves.reduce(lambda x, y: functions.maximum(x, y), xs),\n\n else:\n raise ValueError('Invalid EltwiseParameter.EltwiseOp value.')\n\n\ndef _scale(x, y, axis=1):\n x_shape = x.data.shape\n y_shape = y.data.shape\n assert x_shape[axis:axis + len(y_shape)] == y_shape\n y1_shape = tuple([1] * axis + list(y_shape) +\n [1] * (len(x_shape) - axis - len(y_shape)))\n y1 = functions.reshape(y, y1_shape)\n y2 = functions.broadcast_to(y1, x_shape)\n return x * y2\n\n\nclass _Scale(link.Chain):\n def __init__(self, axis=1, W_shape=None, bias_term=False, bias_shape=None):\n super(_Scale, self).__init__()\n\n # Add W parameter if given.\n if W_shape is not None:\n self.add_param('W', W_shape)\n self.W.data.fill(1)\n else:\n self.W = None\n\n # Add bias term if given.\n if W_shape is not None:\n if bias_term:\n func = _Bias(axis, W_shape)\n self.add_link('bias', func)\n else:\n self.bias = None\n else:\n if bias_term:\n if bias_shape is None:\n raise ValueError('bias_shape should be given if W is not '\n 'learnt parameter and bias_term is True.')\n func = _Bias(axis, bias_shape)\n self.add_link('bias', func)\n else:\n self.bias = None\n\n # Hold axis.\n self.axis = axis\n\n def __call__(self, *xs):\n axis = self.axis\n\n # Case of only one bottom where W is learnt parameter.\n if self.W is not None:\n assert len(xs) == 1\n x, = xs\n W = self.W\n z = _scale(x, W, axis)\n # Case of two bottoms where W is given as a bottom.\n else:\n assert len(xs) == 2\n x, y = xs\n z = _scale(x, y, axis)\n\n # Forward propagate bias term if given.\n if self.bias is not None:\n return self.bias(z)\n else:\n return z\n\n\ndef _bias(x, y, axis=1):\n x_shape = x.data.shape\n y_shape = y.data.shape\n assert x_shape[axis:axis + len(y_shape)] == y_shape\n y1_shape = tuple([1] * axis + list(y_shape) +\n [1] * (len(x_shape) - axis - len(y_shape)))\n y1 = functions.reshape(y, y1_shape)\n y2 = functions.broadcast_to(y1, x_shape)\n return x + y2\n\n\nclass _Bias(link.Link):\n def __init__(self, axis=1, shape=None):\n super(_Bias, self).__init__()\n\n # Add b parameter if given.\n if shape is not None:\n self.add_param('b', shape)\n self.b.data.fill(0)\n else:\n self.b = None\n\n # Hold axis.\n self.axis = axis\n\n def __call__(self, *xs):\n axis = self.axis\n\n # Case of only one bottom where b is learnt parameter.\n if self.b is not None:\n assert len(xs) == 1\n x, = xs\n b = self.b\n return _bias(x, b, axis)\n # Case of two bottoms where b is given as a bottom.\n else:\n assert len(xs) == 2\n x, y = xs\n return _bias(x, y, axis)\n", "path": "chainer/links/caffe/caffe_function.py" } ]
diff --git a/chainer/links/caffe/caffe_function.py b/chainer/links/caffe/caffe_function.py index 69628f28f61d..7a1fdef31226 100644 --- a/chainer/links/caffe/caffe_function.py +++ b/chainer/links/caffe/caffe_function.py @@ -60,9 +60,8 @@ class CaffeFunction(link.Chain): .. note:: - This class only supports Python 2.7, since the compiled module for - protocol buffers only supports Python 2. The ``__init__`` function - raises an exception in Python 3. + protobuf>=3.0.0 is required if you use Python 3 because protobuf 2 is + not supported on Python 3. .. note::
modin-project__modin-1782
ClusterError class should implement its own version of __str__ method <!-- General questions should be asked on the mailing list [email protected]. Before submitting an issue, please fill out the following form. --> ### System information - **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: - **Modin installed from (source or binary)**: - **Modin version**: - **Python version**: - **Exact command to reproduce**: <!-- You can obtain the Modin version with python -c "import modin; print(modin.__version__)" --> ### Describe the problem <!-- Describe the problem clearly here. --> `ClusterError` includes the `cause` field that is not printed. This makes it difficult to understand the problems that cause exceptions. ### Source code / logs <!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->
[ { "content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nfrom typing import NamedTuple\nimport os\nimport sys\n\n\nclass ClusterError(Exception):\n \"\"\"\n Generic cluster operating exception\n \"\"\"\n\n def __init__(self, *args, cause: BaseException = None, traceback: str = None, **kw):\n self.cause = cause\n self.traceback = traceback\n super().__init__(*args, **kw)\n\n\nclass CannotSpawnCluster(ClusterError):\n \"\"\"\n Raised when cluster cannot be spawned in the cloud\n \"\"\"\n\n\nclass CannotDestroyCluster(ClusterError):\n \"\"\"\n Raised when cluster cannot be destroyed in the cloud\n \"\"\"\n\n\nclass ConnectionDetails(NamedTuple):\n user_name: str = \"modin\"\n key_file: str = None\n address: str = None\n port: int = 22\n\n\n_EXT = (\".exe\", \".com\", \".cmd\", \".bat\", \"\") if sys.platform == \"win32\" else (\"\",)\n\n\ndef _which(prog):\n for entry in os.environ[\"PATH\"].split(os.pathsep):\n for ext in _EXT:\n path = os.path.join(entry, prog + ext)\n if os.access(path, os.X_OK):\n return path\n return None\n\n\ndef _get_ssh_proxy_command():\n socks_proxy = os.environ.get(\"MODIN_SOCKS_PROXY\", None)\n if socks_proxy is None:\n return None\n if _which(\"nc\"):\n return f\"nc -x {socks_proxy} %h %p\"\n elif _which(\"connect\"):\n return f\"connect -S {socks_proxy} %h %p\"\n raise ClusterError(\n \"SSH through proxy required but no supported proxying tools found\"\n )\n", "path": "modin/experimental/cloud/base.py" } ]
[ { "content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nfrom typing import NamedTuple\nimport os\nimport sys\n\n\nclass ClusterError(Exception):\n \"\"\"\n Generic cluster operating exception\n \"\"\"\n\n def __init__(self, *args, cause: BaseException = None, traceback: str = None, **kw):\n self.cause = cause\n self.traceback = traceback\n super().__init__(*args, **kw)\n\n def __str__(self):\n if self.clause:\n return f\"clause: {self.cause}\\n{super()}\"\n return str(super())\n\n\nclass CannotSpawnCluster(ClusterError):\n \"\"\"\n Raised when cluster cannot be spawned in the cloud\n \"\"\"\n\n\nclass CannotDestroyCluster(ClusterError):\n \"\"\"\n Raised when cluster cannot be destroyed in the cloud\n \"\"\"\n\n\nclass ConnectionDetails(NamedTuple):\n user_name: str = \"modin\"\n key_file: str = None\n address: str = None\n port: int = 22\n\n\n_EXT = (\".exe\", \".com\", \".cmd\", \".bat\", \"\") if sys.platform == \"win32\" else (\"\",)\n\n\ndef _which(prog):\n for entry in os.environ[\"PATH\"].split(os.pathsep):\n for ext in _EXT:\n path = os.path.join(entry, prog + ext)\n if os.access(path, os.X_OK):\n return path\n return None\n\n\ndef _get_ssh_proxy_command():\n socks_proxy = os.environ.get(\"MODIN_SOCKS_PROXY\", None)\n if socks_proxy is None:\n return None\n if _which(\"nc\"):\n return f\"nc -x {socks_proxy} %h %p\"\n elif _which(\"connect\"):\n return f\"connect -S {socks_proxy} %h %p\"\n raise ClusterError(\n \"SSH through proxy required but no supported proxying tools found\"\n )\n", "path": "modin/experimental/cloud/base.py" } ]
diff --git a/modin/experimental/cloud/base.py b/modin/experimental/cloud/base.py index e73d4b4bf9b..f78ddf30c7d 100644 --- a/modin/experimental/cloud/base.py +++ b/modin/experimental/cloud/base.py @@ -26,6 +26,11 @@ def __init__(self, *args, cause: BaseException = None, traceback: str = None, ** self.traceback = traceback super().__init__(*args, **kw) + def __str__(self): + if self.clause: + return f"clause: {self.cause}\n{super()}" + return str(super()) + class CannotSpawnCluster(ClusterError): """
rasterio__rasterio-1390
1.0 RC 1 Hey all, if there aren't any reports of show-stopping bugs in 1.0b4, I'd like to put out a release candidate on Wednesday 6/27.
[ { "content": "\"\"\"Rasterio\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom collections import namedtuple\nfrom contextlib import contextmanager\nimport logging\nimport warnings\n\ntry:\n from pathlib import Path\nexcept ImportError: # pragma: no cover\n class Path:\n pass\n\ntry:\n from logging import NullHandler\nexcept ImportError: # pragma: no cover\n class NullHandler(logging.Handler):\n def emit(self, record):\n pass\n\nfrom rasterio._base import gdal_version\nfrom rasterio.drivers import is_blacklisted\nfrom rasterio.dtypes import (\n bool_, ubyte, uint8, uint16, int16, uint32, int32, float32, float64,\n complex_, check_dtype)\nfrom rasterio.env import ensure_env_credentialled, Env\nfrom rasterio.errors import RasterioIOError\nfrom rasterio.compat import string_types\nfrom rasterio.io import (\n DatasetReader, get_writer_for_path, get_writer_for_driver, MemoryFile)\nfrom rasterio.profiles import default_gtiff_profile\nfrom rasterio.transform import Affine, guard_transform\nfrom rasterio.path import parse_path\n\n# These modules are imported from the Cython extensions, but are also import\n# here to help tools like cx_Freeze find them automatically\nimport rasterio._err\nimport rasterio.coords\nimport rasterio.enums\nimport rasterio.path\n\n\n__all__ = ['band', 'open', 'pad']\n__version__ = \"1.0b4\"\n__gdal_version__ = gdal_version()\n\n# Rasterio attaches NullHandler to the 'rasterio' logger and its\n# descendents. See\n# https://docs.python.org/2/howto/logging.html#configuring-logging-for-a-library\n# Applications must attach their own handlers in order to see messages.\n# See rasterio/rio/main.py for an example.\nlog = logging.getLogger(__name__)\nlog.addHandler(NullHandler())\n\n\n@ensure_env_credentialled\ndef open(fp, mode='r', driver=None, width=None, height=None, count=None,\n crs=None, transform=None, dtype=None, nodata=None, sharing=True,\n **kwargs):\n \"\"\"Open a dataset for reading or writing.\n\n The dataset may be located in a local file, in a resource located by\n a URL, or contained within a stream of bytes.\n\n In read ('r') or read/write ('r+') mode, no keyword arguments are\n required: these attributes are supplied by the opened dataset.\n\n In write ('w' or 'w+') mode, the driver, width, height, count, and dtype\n keywords are strictly required.\n\n Parameters\n ----------\n fp : str, file object or pathlib.Path object\n A filename or URL, a file object opened in binary ('rb') mode,\n or a Path object.\n mode : str, optional\n 'r' (read, the default), 'r+' (read/write), 'w' (write), or\n 'w+' (write/read).\n driver : str, optional\n A short format driver name (e.g. \"GTiff\" or \"JPEG\") or a list of\n such names (see GDAL docs at\n http://www.gdal.org/formats_list.html). In 'w' or 'w+' modes\n a single name is required. In 'r' or 'r+' modes the driver can\n usually be omitted. Registered drivers will be tried\n sequentially until a match is found. When multiple drivers are\n available for a format such as JPEG2000, one of them can be\n selected by using this keyword argument.\n width, height : int, optional\n The numbers of rows and columns of the raster dataset. Required\n in 'w' or 'w+' modes, they are ignored in 'r' or 'r+' modes.\n count : int, optional\n The count of dataset bands. Required in 'w' or 'w+' modes, it is\n ignored in 'r' or 'r+' modes.\n dtype : str or numpy dtype\n The data type for bands. For example: 'uint8' or\n ``rasterio.uint16``. Required in 'w' or 'w+' modes, it is\n ignored in 'r' or 'r+' modes.\n crs : str, dict, or CRS; optional\n The coordinate reference system. Required in 'w' or 'w+' modes,\n it is ignored in 'r' or 'r+' modes.\n transform : Affine instance, optional\n Affine transformation mapping the pixel space to geographic\n space. Required in 'w' or 'w+' modes, it is ignored in 'r' or\n 'r+' modes.\n nodata : int, float, or nan; optional\n Defines the pixel value to be interpreted as not valid data.\n Required in 'w' or 'w+' modes, it is ignored in 'r' or 'r+'\n modes.\n sharing : bool\n A flag that allows sharing of dataset handles. Default is\n `True`. Should be set to `False` in a multithreaded:w program.\n kwargs : optional\n These are passed to format drivers as directives for creating or\n interpreting datasets. For example: in 'w' or 'w+' modes\n a `tiled=True` keyword argument will direct the GeoTIFF format\n driver to create a tiled, rather than striped, TIFF.\n\n Returns\n -------\n A ``DatasetReader`` or ``DatasetUpdater`` object.\n\n Examples\n --------\n\n To open a GeoTIFF for reading using standard driver discovery and\n no directives:\n\n >>> import rasterio\n >>> with rasterio.open('example.tif') as dataset:\n ... print(dataset.profile)\n\n To open a JPEG2000 using only the JP2OpenJPEG driver:\n\n >>> with rasterio.open(\n ... 'example.jp2', driver='JP2OpenJPEG') as dataset:\n ... print(dataset.profile)\n\n To create a new 8-band, 16-bit unsigned, tiled, and LZW-compressed\n GeoTIFF with a global extent and 0.5 degree resolution:\n\n >>> from rasterio.transform import from_origin\n >>> with rasterio.open(\n ... 'example.tif', 'w', driver='GTiff', dtype='uint16',\n ... width=720, height=360, count=8, crs='EPSG:4326',\n ... transform=from_origin(-180.0, 90.0, 0.5, 0.5),\n ... nodata=0, tiled=True, compress='lzw') as dataset:\n ... dataset.write(...)\n \"\"\"\n\n if not isinstance(fp, string_types):\n if not (hasattr(fp, 'read') or hasattr(fp, 'write') or isinstance(fp, Path)):\n raise TypeError(\"invalid path or file: {0!r}\".format(fp))\n if mode and not isinstance(mode, string_types):\n raise TypeError(\"invalid mode: {0!r}\".format(mode))\n if driver and not isinstance(driver, string_types):\n raise TypeError(\"invalid driver: {0!r}\".format(driver))\n if dtype and not check_dtype(dtype):\n raise TypeError(\"invalid dtype: {0!r}\".format(dtype))\n if nodata is not None:\n nodata = float(nodata)\n if transform:\n transform = guard_transform(transform)\n\n # Check driver/mode blacklist.\n if driver and is_blacklisted(driver, mode):\n raise RasterioIOError(\n \"Blacklisted: file cannot be opened by \"\n \"driver '{0}' in '{1}' mode\".format(driver, mode))\n\n # Special case for file object argument.\n if mode == 'r' and hasattr(fp, 'read'):\n\n @contextmanager\n def fp_reader(fp):\n memfile = MemoryFile(fp.read())\n dataset = memfile.open()\n try:\n yield dataset\n finally:\n dataset.close()\n memfile.close()\n\n return fp_reader(fp)\n\n elif mode in ('w', 'w+') and hasattr(fp, 'write'):\n\n @contextmanager\n def fp_writer(fp):\n memfile = MemoryFile()\n dataset = memfile.open(driver=driver, width=width, height=height,\n count=count, crs=crs, transform=transform,\n dtype=dtype, nodata=nodata, **kwargs)\n try:\n yield dataset\n finally:\n dataset.close()\n memfile.seek(0)\n fp.write(memfile.read())\n memfile.close()\n\n return fp_writer(fp)\n\n else:\n # If a pathlib.Path instance is given, convert it to a string path.\n if isinstance(fp, Path):\n fp = str(fp)\n\n # The 'normal' filename or URL path.\n path = parse_path(fp)\n\n # Create dataset instances and pass the given env, which will\n # be taken over by the dataset's context manager if it is not\n # None.\n if mode == 'r':\n s = DatasetReader(path, driver=driver, **kwargs)\n elif mode == 'r+':\n s = get_writer_for_path(path)(path, mode, driver=driver, **kwargs)\n elif mode.startswith(\"w\"):\n s = get_writer_for_driver(driver)(path, mode, driver=driver,\n width=width, height=height,\n count=count, crs=crs,\n transform=transform,\n dtype=dtype, nodata=nodata,\n **kwargs)\n else:\n raise ValueError(\n \"mode must be one of 'r', 'r+', or 'w', not %s\" % mode)\n return s\n\n\nBand = namedtuple('Band', ['ds', 'bidx', 'dtype', 'shape'])\n\n\ndef band(ds, bidx):\n \"\"\"A dataset and one or more of its bands\n\n Parameters\n ----------\n ds: dataset object\n An opened rasterio dataset object.\n bidx: int or sequence of ints\n Band number(s), index starting at 1.\n\n Returns\n -------\n rasterio.Band\n \"\"\"\n return Band(ds, bidx, set(ds.dtypes).pop(), ds.shape)\n\n\ndef pad(array, transform, pad_width, mode=None, **kwargs):\n \"\"\"pad array and adjust affine transform matrix.\n\n Parameters\n ----------\n array: ndarray\n Numpy ndarray, for best results a 2D array\n transform: Affine transform\n transform object mapping pixel space to coordinates\n pad_width: int\n number of pixels to pad array on all four\n mode: str or function\n define the method for determining padded values\n\n Returns\n -------\n (array, transform): tuple\n Tuple of new array and affine transform\n\n Notes\n -----\n See numpy docs for details on mode and other kwargs:\n http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.pad.html\n \"\"\"\n import numpy as np\n transform = guard_transform(transform)\n padded_array = np.pad(array, pad_width, mode, **kwargs)\n padded_trans = list(transform)\n padded_trans[2] -= pad_width * padded_trans[0]\n padded_trans[5] -= pad_width * padded_trans[4]\n return padded_array, Affine(*padded_trans[:6])\n", "path": "rasterio/__init__.py" } ]
[ { "content": "\"\"\"Rasterio\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom collections import namedtuple\nfrom contextlib import contextmanager\nimport logging\nimport warnings\n\ntry:\n from pathlib import Path\nexcept ImportError: # pragma: no cover\n class Path:\n pass\n\ntry:\n from logging import NullHandler\nexcept ImportError: # pragma: no cover\n class NullHandler(logging.Handler):\n def emit(self, record):\n pass\n\nfrom rasterio._base import gdal_version\nfrom rasterio.drivers import is_blacklisted\nfrom rasterio.dtypes import (\n bool_, ubyte, uint8, uint16, int16, uint32, int32, float32, float64,\n complex_, check_dtype)\nfrom rasterio.env import ensure_env_credentialled, Env\nfrom rasterio.errors import RasterioIOError\nfrom rasterio.compat import string_types\nfrom rasterio.io import (\n DatasetReader, get_writer_for_path, get_writer_for_driver, MemoryFile)\nfrom rasterio.profiles import default_gtiff_profile\nfrom rasterio.transform import Affine, guard_transform\nfrom rasterio.path import parse_path\n\n# These modules are imported from the Cython extensions, but are also import\n# here to help tools like cx_Freeze find them automatically\nimport rasterio._err\nimport rasterio.coords\nimport rasterio.enums\nimport rasterio.path\n\n\n__all__ = ['band', 'open', 'pad', 'Env']\n__version__ = \"1.0rc1\"\n__gdal_version__ = gdal_version()\n\n# Rasterio attaches NullHandler to the 'rasterio' logger and its\n# descendents. See\n# https://docs.python.org/2/howto/logging.html#configuring-logging-for-a-library\n# Applications must attach their own handlers in order to see messages.\n# See rasterio/rio/main.py for an example.\nlog = logging.getLogger(__name__)\nlog.addHandler(NullHandler())\n\n\n@ensure_env_credentialled\ndef open(fp, mode='r', driver=None, width=None, height=None, count=None,\n crs=None, transform=None, dtype=None, nodata=None, sharing=True,\n **kwargs):\n \"\"\"Open a dataset for reading or writing.\n\n The dataset may be located in a local file, in a resource located by\n a URL, or contained within a stream of bytes.\n\n In read ('r') or read/write ('r+') mode, no keyword arguments are\n required: these attributes are supplied by the opened dataset.\n\n In write ('w' or 'w+') mode, the driver, width, height, count, and dtype\n keywords are strictly required.\n\n Parameters\n ----------\n fp : str, file object or pathlib.Path object\n A filename or URL, a file object opened in binary ('rb') mode,\n or a Path object.\n mode : str, optional\n 'r' (read, the default), 'r+' (read/write), 'w' (write), or\n 'w+' (write/read).\n driver : str, optional\n A short format driver name (e.g. \"GTiff\" or \"JPEG\") or a list of\n such names (see GDAL docs at\n http://www.gdal.org/formats_list.html). In 'w' or 'w+' modes\n a single name is required. In 'r' or 'r+' modes the driver can\n usually be omitted. Registered drivers will be tried\n sequentially until a match is found. When multiple drivers are\n available for a format such as JPEG2000, one of them can be\n selected by using this keyword argument.\n width, height : int, optional\n The numbers of rows and columns of the raster dataset. Required\n in 'w' or 'w+' modes, they are ignored in 'r' or 'r+' modes.\n count : int, optional\n The count of dataset bands. Required in 'w' or 'w+' modes, it is\n ignored in 'r' or 'r+' modes.\n dtype : str or numpy dtype\n The data type for bands. For example: 'uint8' or\n ``rasterio.uint16``. Required in 'w' or 'w+' modes, it is\n ignored in 'r' or 'r+' modes.\n crs : str, dict, or CRS; optional\n The coordinate reference system. Required in 'w' or 'w+' modes,\n it is ignored in 'r' or 'r+' modes.\n transform : Affine instance, optional\n Affine transformation mapping the pixel space to geographic\n space. Required in 'w' or 'w+' modes, it is ignored in 'r' or\n 'r+' modes.\n nodata : int, float, or nan; optional\n Defines the pixel value to be interpreted as not valid data.\n Required in 'w' or 'w+' modes, it is ignored in 'r' or 'r+'\n modes.\n sharing : bool\n A flag that allows sharing of dataset handles. Default is\n `True`. Should be set to `False` in a multithreaded:w program.\n kwargs : optional\n These are passed to format drivers as directives for creating or\n interpreting datasets. For example: in 'w' or 'w+' modes\n a `tiled=True` keyword argument will direct the GeoTIFF format\n driver to create a tiled, rather than striped, TIFF.\n\n Returns\n -------\n A ``DatasetReader`` or ``DatasetUpdater`` object.\n\n Examples\n --------\n\n To open a GeoTIFF for reading using standard driver discovery and\n no directives:\n\n >>> import rasterio\n >>> with rasterio.open('example.tif') as dataset:\n ... print(dataset.profile)\n\n To open a JPEG2000 using only the JP2OpenJPEG driver:\n\n >>> with rasterio.open(\n ... 'example.jp2', driver='JP2OpenJPEG') as dataset:\n ... print(dataset.profile)\n\n To create a new 8-band, 16-bit unsigned, tiled, and LZW-compressed\n GeoTIFF with a global extent and 0.5 degree resolution:\n\n >>> from rasterio.transform import from_origin\n >>> with rasterio.open(\n ... 'example.tif', 'w', driver='GTiff', dtype='uint16',\n ... width=720, height=360, count=8, crs='EPSG:4326',\n ... transform=from_origin(-180.0, 90.0, 0.5, 0.5),\n ... nodata=0, tiled=True, compress='lzw') as dataset:\n ... dataset.write(...)\n \"\"\"\n\n if not isinstance(fp, string_types):\n if not (hasattr(fp, 'read') or hasattr(fp, 'write') or isinstance(fp, Path)):\n raise TypeError(\"invalid path or file: {0!r}\".format(fp))\n if mode and not isinstance(mode, string_types):\n raise TypeError(\"invalid mode: {0!r}\".format(mode))\n if driver and not isinstance(driver, string_types):\n raise TypeError(\"invalid driver: {0!r}\".format(driver))\n if dtype and not check_dtype(dtype):\n raise TypeError(\"invalid dtype: {0!r}\".format(dtype))\n if nodata is not None:\n nodata = float(nodata)\n if transform:\n transform = guard_transform(transform)\n\n # Check driver/mode blacklist.\n if driver and is_blacklisted(driver, mode):\n raise RasterioIOError(\n \"Blacklisted: file cannot be opened by \"\n \"driver '{0}' in '{1}' mode\".format(driver, mode))\n\n # Special case for file object argument.\n if mode == 'r' and hasattr(fp, 'read'):\n\n @contextmanager\n def fp_reader(fp):\n memfile = MemoryFile(fp.read())\n dataset = memfile.open()\n try:\n yield dataset\n finally:\n dataset.close()\n memfile.close()\n\n return fp_reader(fp)\n\n elif mode in ('w', 'w+') and hasattr(fp, 'write'):\n\n @contextmanager\n def fp_writer(fp):\n memfile = MemoryFile()\n dataset = memfile.open(driver=driver, width=width, height=height,\n count=count, crs=crs, transform=transform,\n dtype=dtype, nodata=nodata, **kwargs)\n try:\n yield dataset\n finally:\n dataset.close()\n memfile.seek(0)\n fp.write(memfile.read())\n memfile.close()\n\n return fp_writer(fp)\n\n else:\n # If a pathlib.Path instance is given, convert it to a string path.\n if isinstance(fp, Path):\n fp = str(fp)\n\n # The 'normal' filename or URL path.\n path = parse_path(fp)\n\n # Create dataset instances and pass the given env, which will\n # be taken over by the dataset's context manager if it is not\n # None.\n if mode == 'r':\n s = DatasetReader(path, driver=driver, **kwargs)\n elif mode == 'r+':\n s = get_writer_for_path(path)(path, mode, driver=driver, **kwargs)\n elif mode.startswith(\"w\"):\n s = get_writer_for_driver(driver)(path, mode, driver=driver,\n width=width, height=height,\n count=count, crs=crs,\n transform=transform,\n dtype=dtype, nodata=nodata,\n **kwargs)\n else:\n raise ValueError(\n \"mode must be one of 'r', 'r+', or 'w', not %s\" % mode)\n return s\n\n\nBand = namedtuple('Band', ['ds', 'bidx', 'dtype', 'shape'])\n\n\ndef band(ds, bidx):\n \"\"\"A dataset and one or more of its bands\n\n Parameters\n ----------\n ds: dataset object\n An opened rasterio dataset object.\n bidx: int or sequence of ints\n Band number(s), index starting at 1.\n\n Returns\n -------\n rasterio.Band\n \"\"\"\n return Band(ds, bidx, set(ds.dtypes).pop(), ds.shape)\n\n\ndef pad(array, transform, pad_width, mode=None, **kwargs):\n \"\"\"pad array and adjust affine transform matrix.\n\n Parameters\n ----------\n array: ndarray\n Numpy ndarray, for best results a 2D array\n transform: Affine transform\n transform object mapping pixel space to coordinates\n pad_width: int\n number of pixels to pad array on all four\n mode: str or function\n define the method for determining padded values\n\n Returns\n -------\n (array, transform): tuple\n Tuple of new array and affine transform\n\n Notes\n -----\n See numpy docs for details on mode and other kwargs:\n http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.pad.html\n \"\"\"\n import numpy as np\n transform = guard_transform(transform)\n padded_array = np.pad(array, pad_width, mode, **kwargs)\n padded_trans = list(transform)\n padded_trans[2] -= pad_width * padded_trans[0]\n padded_trans[5] -= pad_width * padded_trans[4]\n return padded_array, Affine(*padded_trans[:6])\n", "path": "rasterio/__init__.py" } ]
diff --git a/CHANGES.txt b/CHANGES.txt index 4d7f55abf..8f5e2755e 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,13 +1,16 @@ Changes ======= -Next ------------------- +1.0rc1 (2018-06-27) +------------------- Bug fixes: - Internal Env() in `rasterio.open` has been replaced with an environment - ensuring decorator (#1009). + ensuring decorator (#1009). The same decorator ensures that credentials are + obtained when functions from `rasterio.shutils` are called. +- Input file arguments for all CLI commands are now parsed and validated in + a uniform manner (#999). - Local loggers have all been changed to `getLogger(__name__)` in rasterio.rio module (#1328). diff --git a/rasterio/__init__.py b/rasterio/__init__.py index a09236d42..5129bb605 100644 --- a/rasterio/__init__.py +++ b/rasterio/__init__.py @@ -42,8 +42,8 @@ def emit(self, record): import rasterio.path -__all__ = ['band', 'open', 'pad'] -__version__ = "1.0b4" +__all__ = ['band', 'open', 'pad', 'Env'] +__version__ = "1.0rc1" __gdal_version__ = gdal_version() # Rasterio attaches NullHandler to the 'rasterio' logger and its diff --git a/tests/test_env.py b/tests/test_env.py index e4fed81c7..5159d39ed 100644 --- a/tests/test_env.py +++ b/tests/test_env.py @@ -9,7 +9,7 @@ import rasterio from rasterio._env import del_gdal_config, get_gdal_config, set_gdal_config -from rasterio.env import Env, defenv, delenv, getenv, setenv, ensure_env +from rasterio.env import Env, defenv, delenv, getenv, setenv, ensure_env, ensure_env_credentialled from rasterio.env import GDALVersion, require_gdal_version from rasterio.errors import EnvError, RasterioIOError, GDALVersionError from rasterio.rio.main import main_group @@ -113,6 +113,24 @@ def f(): assert f() is True +def test_ensure_env_credentialled_decorator(monkeypatch, gdalenv): + """Credentialization is ensured by wrapper""" + monkeypatch.setenv('AWS_ACCESS_KEY_ID', 'id') + monkeypatch.setenv('AWS_SECRET_ACCESS_KEY', 'key') + monkeypatch.setenv('AWS_SESSION_TOKEN', 'token') + + @ensure_env_credentialled + def f(path): + return getenv() + + config = f('s3://foo/bar') + assert config['AWS_ACCESS_KEY_ID'] == 'id' + assert config['AWS_SECRET_ACCESS_KEY'] == 'key' + assert config['AWS_SESSION_TOKEN'] == 'token' + + monkeypatch.undo() + + def test_no_aws_gdal_config(gdalenv): """Trying to set AWS-specific GDAL config options fails.""" with pytest.raises(EnvError):
cloud-custodian__cloud-custodian-3075
GCP: Firewalls resource policy fails with no resource attribute 'Firewall' When running this policy custodian fails: - policies: - name: firewall-test resource: gcp.firewall The error returned is: AttributeError: 'Resource' object has no attribute 'firewall'
[ { "content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom c7n_gcp.query import QueryResourceManager, TypeInfo\n\nfrom c7n_gcp.provider import resources\n\n\[email protected]('vpc')\nclass Network(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'compute'\n version = 'v1'\n component = 'networks'\n scope_template = \"projects/{}/global/networks\"\n\n\[email protected]('subnet')\nclass Subnet(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'compute'\n version = 'v1'\n component = 'networks'\n enum_spec = ('aggregatedList', 'items.*.subnetworks[]', None)\n scope_template = \"projects/{}/aggregated/subnetworks\"\n\n\[email protected]('firewall')\nclass Firewall(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'compute'\n version = 'v1'\n component = 'firewall'\n scope_template = \"projects/{}/global/firewalls\"\n\n\[email protected]('router')\nclass Router(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'compute'\n version = 'v1'\n component = 'routers'\n enum_spec = ('aggregatedList', 'items.*.routers[]', None)\n scope_template = \"projects/{}/aggregated/routers\"\n\n\[email protected]('route')\nclass Route(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'compute'\n version = 'v1'\n component = 'routes'\n scope_template = \"projects/{}/global/routes\"\n", "path": "tools/c7n_gcp/c7n_gcp/resources/network.py" } ]
[ { "content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom c7n_gcp.query import QueryResourceManager, TypeInfo\n\nfrom c7n_gcp.provider import resources\n\n\[email protected]('vpc')\nclass Network(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'compute'\n version = 'v1'\n component = 'networks'\n scope_template = \"projects/{}/global/networks\"\n\n\[email protected]('subnet')\nclass Subnet(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'compute'\n version = 'v1'\n component = 'networks'\n enum_spec = ('aggregatedList', 'items.*.subnetworks[]', None)\n scope_template = \"projects/{}/aggregated/subnetworks\"\n\n\[email protected]('firewall')\nclass Firewall(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'compute'\n version = 'v1'\n component = 'firewalls'\n\n\[email protected]('router')\nclass Router(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'compute'\n version = 'v1'\n component = 'routers'\n enum_spec = ('aggregatedList', 'items.*.routers[]', None)\n scope_template = \"projects/{}/aggregated/routers\"\n\n\[email protected]('route')\nclass Route(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'compute'\n version = 'v1'\n component = 'routes'\n scope_template = \"projects/{}/global/routes\"\n", "path": "tools/c7n_gcp/c7n_gcp/resources/network.py" } ]
diff --git a/tools/c7n_gcp/c7n_gcp/resources/network.py b/tools/c7n_gcp/c7n_gcp/resources/network.py index b023e0d1fe2..aa22a7502f7 100644 --- a/tools/c7n_gcp/c7n_gcp/resources/network.py +++ b/tools/c7n_gcp/c7n_gcp/resources/network.py @@ -43,8 +43,7 @@ class Firewall(QueryResourceManager): class resource_type(TypeInfo): service = 'compute' version = 'v1' - component = 'firewall' - scope_template = "projects/{}/global/firewalls" + component = 'firewalls' @resources.register('router')
huggingface__text-generation-inference-794
small typo in galactica model loading https://github.com/huggingface/text-generation-inference/blob/1fdc88ee908beb8ae0afe17810a17b9b4d8848e2/server/text_generation_server/models/__init__.py#L92 should be trust_remote_code
[ { "content": "import os\nimport torch\n\nfrom loguru import logger\nfrom transformers.configuration_utils import PretrainedConfig\nfrom transformers.models.auto import modeling_auto\nfrom typing import Optional\n\nfrom text_generation_server.models.model import Model\nfrom text_generation_server.models.causal_lm import CausalLM\nfrom text_generation_server.models.flash_causal_lm import FlashCausalLM\nfrom text_generation_server.models.bloom import BLOOMSharded\nfrom text_generation_server.models.mpt import MPTSharded\nfrom text_generation_server.models.seq2seq_lm import Seq2SeqLM\nfrom text_generation_server.models.rw import RW\nfrom text_generation_server.models.opt import OPTSharded\nfrom text_generation_server.models.galactica import GalacticaSharded\nfrom text_generation_server.models.santacoder import SantaCoder\nfrom text_generation_server.models.t5 import T5Sharded\nfrom text_generation_server.models.gpt_neox import GPTNeoxSharded\n\n# The flag below controls whether to allow TF32 on matmul. This flag defaults to False\n# in PyTorch 1.12 and later.\ntorch.backends.cuda.matmul.allow_tf32 = True\n\n# The flag below controls whether to allow TF32 on cuDNN. This flag defaults to True.\ntorch.backends.cudnn.allow_tf32 = True\n\n# Disable gradients\ntorch.set_grad_enabled(False)\n\n__all__ = [\n \"Model\",\n \"BLOOMSharded\",\n \"CausalLM\",\n \"FlashCausalLM\",\n \"GalacticaSharded\",\n \"Seq2SeqLM\",\n \"SantaCoder\",\n \"OPTSharded\",\n \"T5Sharded\",\n \"get_model\",\n]\n\nFLASH_ATT_ERROR_MESSAGE = \"{} requires Flash Attention enabled models.\"\n\nFLASH_ATTENTION = True\ntry:\n from text_generation_server.models.flash_rw import FlashRWSharded\n from text_generation_server.models.flash_neox import FlashNeoXSharded\n from text_generation_server.models.flash_llama import (\n FlashLlama,\n )\n from text_generation_server.models.flash_santacoder import (\n FlashSantacoderSharded,\n )\n\nexcept ImportError as e:\n logger.warning(f\"Could not import Flash Attention enabled models: {e}\")\n FLASH_ATTENTION = False\n\nif FLASH_ATTENTION:\n __all__.append(FlashNeoXSharded)\n __all__.append(FlashRWSharded)\n __all__.append(FlashSantacoderSharded)\n __all__.append(FlashLlama)\n\n\ndef get_model(\n model_id: str,\n revision: Optional[str],\n sharded: bool,\n quantize: Optional[str],\n dtype: Optional[str],\n trust_remote_code: bool,\n) -> Model:\n if dtype is None:\n dtype = torch.float16\n elif dtype == \"float16\":\n dtype = torch.float16\n elif dtype == \"bfloat16\":\n dtype = torch.bfloat16\n else:\n raise RuntimeError(f\"Unknown dtype {dtype}\")\n\n if \"facebook/galactica\" in model_id:\n return GalacticaSharded(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n dtypetrust_remote_code=trust_remote_code,\n )\n\n if model_id.startswith(\"bigcode/\"):\n if FLASH_ATTENTION:\n return FlashSantacoderSharded(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n elif sharded:\n raise NotImplementedError(\n FLASH_ATT_ERROR_MESSAGE.format(\"Sharded Santacoder\")\n )\n else:\n return SantaCoder(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n\n config_dict, _ = PretrainedConfig.get_config_dict(\n model_id, revision=revision, trust_remote_code=trust_remote_code\n )\n model_type = config_dict[\"model_type\"]\n\n if model_type == \"gpt_bigcode\":\n if FLASH_ATTENTION:\n return FlashSantacoderSharded(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n elif sharded:\n raise NotImplementedError(\n FLASH_ATT_ERROR_MESSAGE.format(\"Sharded Santacoder\")\n )\n else:\n return SantaCoder(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n\n if model_type == \"bloom\":\n return BLOOMSharded(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n elif model_type == \"mpt\":\n return MPTSharded(\n model_id, revision, quantize=quantize, trust_remote_code=trust_remote_code\n )\n\n elif model_type == \"gpt_neox\":\n if FLASH_ATTENTION:\n return FlashNeoXSharded(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n elif sharded:\n return GPTNeoxSharded(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n else:\n return CausalLM(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n\n elif model_type == \"llama\":\n if FLASH_ATTENTION:\n return FlashLlama(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n elif sharded:\n raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format(\"Sharded Llama\"))\n else:\n return CausalLM(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n\n if model_type in [\"RefinedWeb\", \"RefinedWebModel\", \"falcon\"]:\n if sharded:\n if FLASH_ATTENTION:\n if config_dict.get(\"alibi\", False):\n raise NotImplementedError(\"sharded is not supported for this model\")\n return FlashRWSharded(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format(f\"Sharded Falcon\"))\n else:\n if FLASH_ATTENTION and not config_dict.get(\"alibi\", False):\n return FlashRWSharded(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n else:\n return RW(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n\n elif model_type == \"opt\":\n return OPTSharded(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n\n elif model_type == \"t5\":\n return T5Sharded(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n\n if sharded:\n raise ValueError(\"sharded is not supported for AutoModel\")\n if quantize == \"gptq\":\n raise ValueError(\n \"gptq quantization is not supported for AutoModel, you can try to quantize it with `text-generation-server quantize ORIGINAL_MODEL_ID NEW_MODEL_ID`\"\n )\n elif (quantize == \"bitsandbytes-fp4\") or (quantize == \"bitsandbytes-nf4\"):\n raise ValueError(\n \"4bit quantization is not supported for AutoModel\"\n )\n if model_type in modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES:\n return CausalLM(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n if model_type in modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES:\n return Seq2SeqLM(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n\n auto_map = config_dict.get(\"auto_map\", None)\n if trust_remote_code and auto_map is not None:\n if \"AutoModelForCausalLM\" in auto_map.keys():\n return CausalLM(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n if \"AutoModelForSeq2SeqLM\" in auto_map.keys():\n return Seq2SeqLM(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n\n raise ValueError(f\"Unsupported model type {model_type}\")\n", "path": "server/text_generation_server/models/__init__.py" } ]
[ { "content": "import os\nimport torch\n\nfrom loguru import logger\nfrom transformers.configuration_utils import PretrainedConfig\nfrom transformers.models.auto import modeling_auto\nfrom typing import Optional\n\nfrom text_generation_server.models.model import Model\nfrom text_generation_server.models.causal_lm import CausalLM\nfrom text_generation_server.models.flash_causal_lm import FlashCausalLM\nfrom text_generation_server.models.bloom import BLOOMSharded\nfrom text_generation_server.models.mpt import MPTSharded\nfrom text_generation_server.models.seq2seq_lm import Seq2SeqLM\nfrom text_generation_server.models.rw import RW\nfrom text_generation_server.models.opt import OPTSharded\nfrom text_generation_server.models.galactica import GalacticaSharded\nfrom text_generation_server.models.santacoder import SantaCoder\nfrom text_generation_server.models.t5 import T5Sharded\nfrom text_generation_server.models.gpt_neox import GPTNeoxSharded\n\n# The flag below controls whether to allow TF32 on matmul. This flag defaults to False\n# in PyTorch 1.12 and later.\ntorch.backends.cuda.matmul.allow_tf32 = True\n\n# The flag below controls whether to allow TF32 on cuDNN. This flag defaults to True.\ntorch.backends.cudnn.allow_tf32 = True\n\n# Disable gradients\ntorch.set_grad_enabled(False)\n\n__all__ = [\n \"Model\",\n \"BLOOMSharded\",\n \"CausalLM\",\n \"FlashCausalLM\",\n \"GalacticaSharded\",\n \"Seq2SeqLM\",\n \"SantaCoder\",\n \"OPTSharded\",\n \"T5Sharded\",\n \"get_model\",\n]\n\nFLASH_ATT_ERROR_MESSAGE = \"{} requires Flash Attention enabled models.\"\n\nFLASH_ATTENTION = True\ntry:\n from text_generation_server.models.flash_rw import FlashRWSharded\n from text_generation_server.models.flash_neox import FlashNeoXSharded\n from text_generation_server.models.flash_llama import (\n FlashLlama,\n )\n from text_generation_server.models.flash_santacoder import (\n FlashSantacoderSharded,\n )\n\nexcept ImportError as e:\n logger.warning(f\"Could not import Flash Attention enabled models: {e}\")\n FLASH_ATTENTION = False\n\nif FLASH_ATTENTION:\n __all__.append(FlashNeoXSharded)\n __all__.append(FlashRWSharded)\n __all__.append(FlashSantacoderSharded)\n __all__.append(FlashLlama)\n\n\ndef get_model(\n model_id: str,\n revision: Optional[str],\n sharded: bool,\n quantize: Optional[str],\n dtype: Optional[str],\n trust_remote_code: bool,\n) -> Model:\n if dtype is None:\n dtype = torch.float16\n elif dtype == \"float16\":\n dtype = torch.float16\n elif dtype == \"bfloat16\":\n dtype = torch.bfloat16\n else:\n raise RuntimeError(f\"Unknown dtype {dtype}\")\n\n if \"facebook/galactica\" in model_id:\n return GalacticaSharded(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n\n if model_id.startswith(\"bigcode/\"):\n if FLASH_ATTENTION:\n return FlashSantacoderSharded(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n elif sharded:\n raise NotImplementedError(\n FLASH_ATT_ERROR_MESSAGE.format(\"Sharded Santacoder\")\n )\n else:\n return SantaCoder(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n\n config_dict, _ = PretrainedConfig.get_config_dict(\n model_id, revision=revision, trust_remote_code=trust_remote_code\n )\n model_type = config_dict[\"model_type\"]\n\n if model_type == \"gpt_bigcode\":\n if FLASH_ATTENTION:\n return FlashSantacoderSharded(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n elif sharded:\n raise NotImplementedError(\n FLASH_ATT_ERROR_MESSAGE.format(\"Sharded Santacoder\")\n )\n else:\n return SantaCoder(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n\n if model_type == \"bloom\":\n return BLOOMSharded(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n elif model_type == \"mpt\":\n return MPTSharded(\n model_id, revision, quantize=quantize, trust_remote_code=trust_remote_code\n )\n\n elif model_type == \"gpt_neox\":\n if FLASH_ATTENTION:\n return FlashNeoXSharded(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n elif sharded:\n return GPTNeoxSharded(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n else:\n return CausalLM(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n\n elif model_type == \"llama\":\n if FLASH_ATTENTION:\n return FlashLlama(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n elif sharded:\n raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format(\"Sharded Llama\"))\n else:\n return CausalLM(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n\n if model_type in [\"RefinedWeb\", \"RefinedWebModel\", \"falcon\"]:\n if sharded:\n if FLASH_ATTENTION:\n if config_dict.get(\"alibi\", False):\n raise NotImplementedError(\"sharded is not supported for this model\")\n return FlashRWSharded(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format(f\"Sharded Falcon\"))\n else:\n if FLASH_ATTENTION and not config_dict.get(\"alibi\", False):\n return FlashRWSharded(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n else:\n return RW(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n\n elif model_type == \"opt\":\n return OPTSharded(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n\n elif model_type == \"t5\":\n return T5Sharded(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n\n if sharded:\n raise ValueError(\"sharded is not supported for AutoModel\")\n if quantize == \"gptq\":\n raise ValueError(\n \"gptq quantization is not supported for AutoModel, you can try to quantize it with `text-generation-server quantize ORIGINAL_MODEL_ID NEW_MODEL_ID`\"\n )\n elif (quantize == \"bitsandbytes-fp4\") or (quantize == \"bitsandbytes-nf4\"):\n raise ValueError(\n \"4bit quantization is not supported for AutoModel\"\n )\n if model_type in modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES:\n return CausalLM(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n if model_type in modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES:\n return Seq2SeqLM(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n\n auto_map = config_dict.get(\"auto_map\", None)\n if trust_remote_code and auto_map is not None:\n if \"AutoModelForCausalLM\" in auto_map.keys():\n return CausalLM(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n if \"AutoModelForSeq2SeqLM\" in auto_map.keys():\n return Seq2SeqLM(\n model_id,\n revision,\n quantize=quantize,\n dtype=dtype,\n trust_remote_code=trust_remote_code,\n )\n\n raise ValueError(f\"Unsupported model type {model_type}\")\n", "path": "server/text_generation_server/models/__init__.py" } ]
diff --git a/server/text_generation_server/models/__init__.py b/server/text_generation_server/models/__init__.py index 71efcab745b..621652e8b62 100644 --- a/server/text_generation_server/models/__init__.py +++ b/server/text_generation_server/models/__init__.py @@ -89,7 +89,7 @@ def get_model( revision, quantize=quantize, dtype=dtype, - dtypetrust_remote_code=trust_remote_code, + trust_remote_code=trust_remote_code, ) if model_id.startswith("bigcode/"):
dbt-labs__dbt-core-1743
Support for Snowflake Secure Views ### Adding support for Secure View in Snowflake When using the Materialize feature where setting the type of materialization, adding secure-view to the {{ config(materialized='secure-view') }} would be beneficial. ### Current Work-around Currently the solution for Snowflake secure views is running post-hook events to set the targeted views as secure, example: `alter view sv_mySecureTest set secure;` This works, and each view that needs to be secured will need to be added to the post-hook event. ### Affects only Snowflake This feature is specific to the Snowflake Cloud Data warehouse. [https://docs.snowflake.net/manuals/user-guide/views-secure.html](url) ### This will help DBT Snowflake Developer / Non Developers When creating a secure view in Snowflake, a developer can use 2 syntax commands 1. CREATE OR REPLACE SECURE VIEW... 2. Alter view <view_name> Set Secure The first method will allow non-dbt user to render the DDL with the secure declaration as part of the DDL, the second statement is added to the end of the generated DDL however it may be ignored by developers unfamiliar with Snowflake Syntax, causing possible security issues, allowing unauthorized access to the View DDL by Read-Only roles in Snowflake.
[ { "content": "from dbt.adapters.sql import SQLAdapter\nfrom dbt.adapters.snowflake import SnowflakeConnectionManager\nfrom dbt.adapters.snowflake import SnowflakeRelation\nfrom dbt.utils import filter_null_values\n\n\nclass SnowflakeAdapter(SQLAdapter):\n Relation = SnowflakeRelation\n ConnectionManager = SnowflakeConnectionManager\n\n AdapterSpecificConfigs = frozenset(\n {\"transient\", \"cluster_by\", \"automatic_clustering\"}\n )\n\n @classmethod\n def date_function(cls):\n return \"CURRENT_TIMESTAMP()\"\n\n @classmethod\n def _catalog_filter_table(cls, table, manifest):\n # On snowflake, users can set QUOTED_IDENTIFIERS_IGNORE_CASE, so force\n # the column names to their lowercased forms.\n lowered = table.rename(\n column_names=[c.lower() for c in table.column_names]\n )\n return super()._catalog_filter_table(lowered, manifest)\n\n def _make_match_kwargs(self, database, schema, identifier):\n quoting = self.config.quoting\n if identifier is not None and quoting[\"identifier\"] is False:\n identifier = identifier.upper()\n\n if schema is not None and quoting[\"schema\"] is False:\n schema = schema.upper()\n\n if database is not None and quoting[\"database\"] is False:\n database = database.upper()\n\n return filter_null_values(\n {\"identifier\": identifier, \"schema\": schema, \"database\": database}\n )\n", "path": "plugins/snowflake/dbt/adapters/snowflake/impl.py" } ]
[ { "content": "from dbt.adapters.sql import SQLAdapter\nfrom dbt.adapters.snowflake import SnowflakeConnectionManager\nfrom dbt.adapters.snowflake import SnowflakeRelation\nfrom dbt.utils import filter_null_values\n\n\nclass SnowflakeAdapter(SQLAdapter):\n Relation = SnowflakeRelation\n ConnectionManager = SnowflakeConnectionManager\n\n AdapterSpecificConfigs = frozenset(\n {\"transient\", \"cluster_by\", \"automatic_clustering\", \"secure\"}\n )\n\n @classmethod\n def date_function(cls):\n return \"CURRENT_TIMESTAMP()\"\n\n @classmethod\n def _catalog_filter_table(cls, table, manifest):\n # On snowflake, users can set QUOTED_IDENTIFIERS_IGNORE_CASE, so force\n # the column names to their lowercased forms.\n lowered = table.rename(\n column_names=[c.lower() for c in table.column_names]\n )\n return super()._catalog_filter_table(lowered, manifest)\n\n def _make_match_kwargs(self, database, schema, identifier):\n quoting = self.config.quoting\n if identifier is not None and quoting[\"identifier\"] is False:\n identifier = identifier.upper()\n\n if schema is not None and quoting[\"schema\"] is False:\n schema = schema.upper()\n\n if database is not None and quoting[\"database\"] is False:\n database = database.upper()\n\n return filter_null_values(\n {\"identifier\": identifier, \"schema\": schema, \"database\": database}\n )\n", "path": "plugins/snowflake/dbt/adapters/snowflake/impl.py" } ]
diff --git a/plugins/snowflake/dbt/adapters/snowflake/impl.py b/plugins/snowflake/dbt/adapters/snowflake/impl.py index 31600a8a236..4e3d3d7793e 100644 --- a/plugins/snowflake/dbt/adapters/snowflake/impl.py +++ b/plugins/snowflake/dbt/adapters/snowflake/impl.py @@ -9,7 +9,7 @@ class SnowflakeAdapter(SQLAdapter): ConnectionManager = SnowflakeConnectionManager AdapterSpecificConfigs = frozenset( - {"transient", "cluster_by", "automatic_clustering"} + {"transient", "cluster_by", "automatic_clustering", "secure"} ) @classmethod diff --git a/plugins/snowflake/dbt/include/snowflake/macros/adapters.sql b/plugins/snowflake/dbt/include/snowflake/macros/adapters.sql index cb3b67efde2..ab3af5c4ceb 100644 --- a/plugins/snowflake/dbt/include/snowflake/macros/adapters.sql +++ b/plugins/snowflake/dbt/include/snowflake/macros/adapters.sql @@ -35,7 +35,10 @@ {% endmacro %} {% macro snowflake__create_view_as(relation, sql) -%} - create or replace view {{ relation }} as ( + {%- set secure = config.get('secure', default=false) -%} + create or replace {% if secure -%} + secure + {%- endif %} view {{ relation }} as ( {{ sql }} ); {% endmacro %}
django-extensions__django-extensions-1654
RemovedInDjango41Warning: 'django_extensions' defines default_app_config The following warning is emitted when using django-extenstions along with django 3.2 ``` django.utils.deprecation.RemovedInDjango41Warning: 'django_extensions' defines default_app_config = 'django_extensions.apps.DjangoExtensionsConfig'. Django now detects this configuration automatically. You can remove default_app_config. ``` I suppose it is related to the change introduced by django 3.2: https://docs.djangoproject.com/en/3.2/releases/3.2/#what-s-new-in-django-3-2 Environment: * python 3.8 * django 3.2
[ { "content": "# -*- coding: utf-8 -*-\nVERSION = (3, 1, 3, 'dev')\n\n\ndef get_version(version):\n \"\"\"Dynamically calculate the version based on VERSION tuple.\"\"\"\n if len(version) > 2 and version[2] is not None:\n if len(version) == 4:\n str_version = \"%s.%s.%s.%s\" % version\n elif isinstance(version[2], int):\n str_version = \"%s.%s.%s\" % version[:3]\n else:\n str_version = \"%s.%s_%s\" % version[:3]\n else:\n str_version = \"%s.%s\" % version[:2]\n\n return str_version\n\n\n__version__ = get_version(VERSION)\n\ndefault_app_config = 'django_extensions.apps.DjangoExtensionsConfig'\n", "path": "django_extensions/__init__.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nVERSION = (3, 1, 3, 'dev')\n\n\ndef get_version(version):\n \"\"\"Dynamically calculate the version based on VERSION tuple.\"\"\"\n if len(version) > 2 and version[2] is not None:\n if len(version) == 4:\n str_version = \"%s.%s.%s.%s\" % version\n elif isinstance(version[2], int):\n str_version = \"%s.%s.%s\" % version[:3]\n else:\n str_version = \"%s.%s_%s\" % version[:3]\n else:\n str_version = \"%s.%s\" % version[:2]\n\n return str_version\n\n\n__version__ = get_version(VERSION)\n\ntry:\n import django\n\n if django.VERSION < (3, 2):\n default_app_config = 'django_extensions.apps.DjangoExtensionsConfig'\nexcept ModuleNotFoundError:\n # this part is useful for allow setup.py to be used for version checks\n pass\n", "path": "django_extensions/__init__.py" } ]
diff --git a/django_extensions/__init__.py b/django_extensions/__init__.py index 488c8e0d6..8e1d2201a 100644 --- a/django_extensions/__init__.py +++ b/django_extensions/__init__.py @@ -19,4 +19,11 @@ def get_version(version): __version__ = get_version(VERSION) -default_app_config = 'django_extensions.apps.DjangoExtensionsConfig' +try: + import django + + if django.VERSION < (3, 2): + default_app_config = 'django_extensions.apps.DjangoExtensionsConfig' +except ModuleNotFoundError: + # this part is useful for allow setup.py to be used for version checks + pass diff --git a/tests/management/commands/test_notes.py b/tests/management/commands/test_notes.py index cfe7536e6..8536743c9 100644 --- a/tests/management/commands/test_notes.py +++ b/tests/management/commands/test_notes.py @@ -8,7 +8,7 @@ def test_without_args(capsys, settings): call_command('notes') out, err = capsys.readouterr() - assert 'tests/testapp/__init__.py:\n * [ 4] TODO this is a test todo\n\n' in out + assert 'tests/testapp/__init__.py:\n * [ 8] TODO this is a test todo\n\n' in out def test_with_utf8(capsys, settings): diff --git a/tests/test_compatibility.py b/tests/test_compatibility.py new file mode 100644 index 000000000..703fb8ee1 --- /dev/null +++ b/tests/test_compatibility.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +"""Test Compatility between different django versions""" +import django +import pytest + +import django_extensions + + +class TestDefaultAppConfigDefinition: + @pytest.mark.skipif(django.VERSION < (3, 2), reason='app config is automatically defined by django') + def test_app_config_not_defined(self): + assert hasattr(django_extensions, 'default_app_config') is False + + @pytest.mark.skipif(django.VERSION >= (3, 2), reason='app config is not automatically defined by django') + def test_app_config_defined(self): + assert hasattr(django_extensions, 'default_app_config') is True diff --git a/tests/testapp/__init__.py b/tests/testapp/__init__.py index 0599a0784..b889f41b7 100644 --- a/tests/testapp/__init__.py +++ b/tests/testapp/__init__.py @@ -1,4 +1,8 @@ # -*- coding: utf-8 -*- -default_app_config = 'tests.testapp.apps.TestAppConfig' +import django + + +if django.VERSION < (3, 2): + default_app_config = 'tests.testapp.apps.TestAppConfig' # TODO: this is a test todo
fidals__shopelectro-200
SE yml fix delivery 3k -> 5k [origin trello task](https://trello.com/c/LyLVDakS/298-se-%D0%BF%D1%80%D0%B0%D0%B2%D1%8C-yml) Внезапно обноружили, что через фид передается( в теге <sales_notes>) неверная инфа о доставке. Исправь на 5к.
[ { "content": "\"\"\"\nDjango settings for shopelectro project.\n\nGenerated by 'django-admin startproject' using Django 1.9.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\nimport os\nfrom datetime import datetime\n\nimport dj_database_url\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', 'so_secret_key')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# http://bit.ly/sorl-thumbnail-docs\nTHUMBNAIL_DEBUG = False\n\nALLOWED_HOSTS = ['*']\n\nif os.environ.get('TEST_ENV', False):\n # disable https in CI\n # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'http')\n\n# Enable in frame loading for Ya.Metric\n# https://docs.djangoproject.com/es/1.10/ref/clickjacking/\n# https://yandex.ru/support/metrika/general/counter-webvisor.xml#download-page\nX_FRAME_OPTIONS = 'ALLOW-FROM http://webvisor.com'\n\n# Application definition\nINSTALLED_APPS = [\n # https://docs.djangoproject.com/en/1.9/ref/contrib/admin/#django.contrib.admin.autodiscover\n 'django.contrib.contenttypes',\n 'django.contrib.auth',\n 'django.contrib.messages',\n 'django.contrib.redirects',\n 'django.contrib.sessions',\n 'django.contrib.sitemaps',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django_user_agents',\n 'generic_admin',\n 'django.contrib.admin.apps.SimpleAdminConfig',\n 'debug_toolbar',\n 'mptt',\n 'widget_tweaks',\n 'sorl.thumbnail',\n 'images',\n 'pages',\n 'catalog',\n 'ecommerce',\n 'shopelectro',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.contrib.redirects.middleware.RedirectFallbackMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django_user_agents.middleware.UserAgentMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\nROOT_URLCONF = 'shopelectro.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.request',\n 'django.template.context_processors.static',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'ecommerce.context_processors.cart',\n 'shopelectro.context_processors.shop',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'shopelectro.wsgi.application'\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLOCALE_NAME = 'en_US'\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'shopelectro/locale')]\nFORMAT_MODULE_PATH = [\n 'shopelectro.formats',\n]\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nASSETS_DIR = os.path.join(BASE_DIR, 'assets')\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'front/build'),\n ASSETS_DIR,\n]\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n# It is fake-url. Correct url will be created on `docker-compose up` stage from `docker/.env`\nDATABASE_URL = 'postgres://user:pass@db_name/table'\nDATABASES = {\n 'default': dj_database_url.config(\n env='DATABASE_URL',\n default=DATABASE_URL,\n )\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n },\n}\n\nSITE_CREATED = datetime(2013, 1, 1)\n\nLOCALHOST = 'http://127.0.0.1:8000/'\nBASE_URL = 'https://www.shopelectro.ru'\n\nPLACEHOLDER_IMAGE = 'images/logo.png'\nPLACEHOLDER_ALT = 'Логотип компании Shopelectro'\n\n# Autocomplete and search settings\nSEARCH_SEE_ALL_LABEL = 'Смотреть все результаты'\n\n# For sitemaps and sites framework\nSITE_ID = 1\nSITE_DOMAIN_NAME = 'www.shopelectro.ru'\n\n# Used to retrieve instances in ecommerce.Cart\nCART_ID = 'cart'\n\n# Used to define choices attr in definition of Order.payment_type field\nPAYMENT_OPTIONS = (\n ('cash', 'Наличные'),\n ('cashless', 'Безналичные и денежные переводы'),\n ('AC', 'Банковская карта'),\n ('PC', 'Яндекс.Деньги'),\n ('GP', 'Связной (терминал)'),\n ('AB', 'Альфа-Клик'),\n)\n\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nYANDEX_SHOP_PASS = os.environ.get('YANDEX_SHOP_PASS', 'so_secret_pass')\n\n# Used for order's email in ecommerce app\nFAKE_ORDER_NUMBER = 6000\n\n# Subjects for different types of emails sent from SE.\nEMAIL_SUBJECTS = {\n 'call': 'Обратный звонок',\n 'order': 'Заказ №{0.fake_order_number}',\n 'yandex_order': 'Заказ №{0.fake_order_number} | Яндекс.Касса',\n 'one_click': 'Заказ в один клик №{0.fake_order_number}',\n 'ya_feedback_request': 'Оцените нас на Яндекс.Маркете',\n}\n\n# Email configs\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nEMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', 'so_secret_pass')\nEMAIL_HOST_USER = '[email protected]'\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.yandex.ru'\nEMAIL_PORT = 587\nEMAIL_SENDER = '[email protected]'\nEMAIL_RECIPIENT = '[email protected]'\nSHOP_EMAIL = '[email protected]'\n\n# FTP configs\nFTP_USER = os.environ.get('FTP_USER', 'user')\nFTP_PASS = os.environ.get('FTP_PASS', 'pass')\nFTP_IP = os.environ.get('FTP_IP', '0.0.0.0')\n\n# Used in admin image uploads\nMODEL_TYPES = {\n 'Product': {\n 'app_name': 'shopelectro',\n 'dir_name': 'products',\n },\n 'Category': {\n 'app_name': 'shopelectro',\n 'dir_name': 'categories',\n }\n}\n\n# This need for using {% debug %} variable in templates.\nINTERNAL_IPS = (\n '127.0.0.1',\n)\n\nTOP_PRODUCTS = [291, 438, 1137, 2166, 2725, 2838, 3288, 3884, 3959, 2764]\n\nSHOP = {\n 'id': '69886',\n 'scid': '64788',\n 'success_url': BASE_URL + '/shop/order-success/',\n 'fail_url': BASE_URL + '/',\n 'cps_phone': '+78124163200',\n 'cps_email': '[email protected]',\n 'local_delivery_cost': 300,\n 'local_delivery_cost_threshold': 3000,\n}\n\n# used in data-migrations and tests\nCUSTOM_PAGES = {\n 'index': {\n 'slug': '',\n 'name': 'Интернет-магазин элементов питания \"ShopElectro\"',\n 'menu_title': 'Главная',\n 'title': 'Интернет-магазин Элементов питания с доставкой по России',\n },\n 'sitemap': {\n 'slug': 'sitemap',\n 'h1': 'Карта сайта',\n 'name': 'Карта сайта',\n },\n 'order': {\n 'slug': 'order',\n 'name': 'Оформление заказа',\n 'title': 'Корзина Интернет-магазин shopelectro.ru Санкт-Петербург',\n },\n 'search': {\n 'slug': 'search',\n 'name': 'Результаты поиска',\n },\n 'catalog': {\n 'slug': 'catalog',\n 'name': 'Каталог товаров',\n 'menu_title': 'Каталог',\n },\n 'order_success': {\n 'slug': 'order-success',\n 'name': 'Заказ принят',\n }\n}\n", "path": "shopelectro/settings/base.py" } ]
[ { "content": "\"\"\"\nDjango settings for shopelectro project.\n\nGenerated by 'django-admin startproject' using Django 1.9.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\nimport os\nfrom datetime import datetime\n\nimport dj_database_url\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', 'so_secret_key')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# http://bit.ly/sorl-thumbnail-docs\nTHUMBNAIL_DEBUG = False\n\nALLOWED_HOSTS = ['*']\n\nif os.environ.get('TEST_ENV', False):\n # disable https in CI\n # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'http')\n\n# Enable in frame loading for Ya.Metric\n# https://docs.djangoproject.com/es/1.10/ref/clickjacking/\n# https://yandex.ru/support/metrika/general/counter-webvisor.xml#download-page\nX_FRAME_OPTIONS = 'ALLOW-FROM http://webvisor.com'\n\n# Application definition\nINSTALLED_APPS = [\n # https://docs.djangoproject.com/en/1.9/ref/contrib/admin/#django.contrib.admin.autodiscover\n 'django.contrib.contenttypes',\n 'django.contrib.auth',\n 'django.contrib.messages',\n 'django.contrib.redirects',\n 'django.contrib.sessions',\n 'django.contrib.sitemaps',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django_user_agents',\n 'generic_admin',\n 'django.contrib.admin.apps.SimpleAdminConfig',\n 'debug_toolbar',\n 'mptt',\n 'widget_tweaks',\n 'sorl.thumbnail',\n 'images',\n 'pages',\n 'catalog',\n 'ecommerce',\n 'shopelectro',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.contrib.redirects.middleware.RedirectFallbackMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django_user_agents.middleware.UserAgentMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\nROOT_URLCONF = 'shopelectro.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.request',\n 'django.template.context_processors.static',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'ecommerce.context_processors.cart',\n 'shopelectro.context_processors.shop',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'shopelectro.wsgi.application'\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLOCALE_NAME = 'en_US'\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'shopelectro/locale')]\nFORMAT_MODULE_PATH = [\n 'shopelectro.formats',\n]\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nASSETS_DIR = os.path.join(BASE_DIR, 'assets')\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'front/build'),\n ASSETS_DIR,\n]\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n# It is fake-url. Correct url will be created on `docker-compose up` stage from `docker/.env`\nDATABASE_URL = 'postgres://user:pass@db_name/table'\nDATABASES = {\n 'default': dj_database_url.config(\n env='DATABASE_URL',\n default=DATABASE_URL,\n )\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n },\n}\n\nSITE_CREATED = datetime(2013, 1, 1)\n\nLOCALHOST = 'http://127.0.0.1:8000/'\nBASE_URL = 'https://www.shopelectro.ru'\n\nPLACEHOLDER_IMAGE = 'images/logo.png'\nPLACEHOLDER_ALT = 'Логотип компании Shopelectro'\n\n# Autocomplete and search settings\nSEARCH_SEE_ALL_LABEL = 'Смотреть все результаты'\n\n# For sitemaps and sites framework\nSITE_ID = 1\nSITE_DOMAIN_NAME = 'www.shopelectro.ru'\n\n# Used to retrieve instances in ecommerce.Cart\nCART_ID = 'cart'\n\n# Used to define choices attr in definition of Order.payment_type field\nPAYMENT_OPTIONS = (\n ('cash', 'Наличные'),\n ('cashless', 'Безналичные и денежные переводы'),\n ('AC', 'Банковская карта'),\n ('PC', 'Яндекс.Деньги'),\n ('GP', 'Связной (терминал)'),\n ('AB', 'Альфа-Клик'),\n)\n\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nYANDEX_SHOP_PASS = os.environ.get('YANDEX_SHOP_PASS', 'so_secret_pass')\n\n# Used for order's email in ecommerce app\nFAKE_ORDER_NUMBER = 6000\n\n# Subjects for different types of emails sent from SE.\nEMAIL_SUBJECTS = {\n 'call': 'Обратный звонок',\n 'order': 'Заказ №{0.fake_order_number}',\n 'yandex_order': 'Заказ №{0.fake_order_number} | Яндекс.Касса',\n 'one_click': 'Заказ в один клик №{0.fake_order_number}',\n 'ya_feedback_request': 'Оцените нас на Яндекс.Маркете',\n}\n\n# Email configs\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nEMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', 'so_secret_pass')\nEMAIL_HOST_USER = '[email protected]'\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.yandex.ru'\nEMAIL_PORT = 587\nEMAIL_SENDER = '[email protected]'\nEMAIL_RECIPIENT = '[email protected]'\nSHOP_EMAIL = '[email protected]'\n\n# FTP configs\nFTP_USER = os.environ.get('FTP_USER', 'user')\nFTP_PASS = os.environ.get('FTP_PASS', 'pass')\nFTP_IP = os.environ.get('FTP_IP', '0.0.0.0')\n\n# Used in admin image uploads\nMODEL_TYPES = {\n 'Product': {\n 'app_name': 'shopelectro',\n 'dir_name': 'products',\n },\n 'Category': {\n 'app_name': 'shopelectro',\n 'dir_name': 'categories',\n }\n}\n\n# This need for using {% debug %} variable in templates.\nINTERNAL_IPS = (\n '127.0.0.1',\n)\n\nTOP_PRODUCTS = [291, 438, 1137, 2166, 2725, 2838, 3288, 3884, 3959, 2764]\n\nSHOP = {\n 'id': '69886',\n 'scid': '64788',\n 'success_url': BASE_URL + '/shop/order-success/',\n 'fail_url': BASE_URL + '/',\n 'cps_phone': '+78124163200',\n 'cps_email': '[email protected]',\n 'local_delivery_cost': 300,\n 'local_delivery_cost_threshold': 5000,\n}\n\n# used in data-migrations and tests\nCUSTOM_PAGES = {\n 'index': {\n 'slug': '',\n 'name': 'Интернет-магазин элементов питания \"ShopElectro\"',\n 'menu_title': 'Главная',\n 'title': 'Интернет-магазин Элементов питания с доставкой по России',\n },\n 'sitemap': {\n 'slug': 'sitemap',\n 'h1': 'Карта сайта',\n 'name': 'Карта сайта',\n },\n 'order': {\n 'slug': 'order',\n 'name': 'Оформление заказа',\n 'title': 'Корзина Интернет-магазин shopelectro.ru Санкт-Петербург',\n },\n 'search': {\n 'slug': 'search',\n 'name': 'Результаты поиска',\n },\n 'catalog': {\n 'slug': 'catalog',\n 'name': 'Каталог товаров',\n 'menu_title': 'Каталог',\n },\n 'order_success': {\n 'slug': 'order-success',\n 'name': 'Заказ принят',\n }\n}\n", "path": "shopelectro/settings/base.py" } ]
diff --git a/shopelectro/settings/base.py b/shopelectro/settings/base.py index 0ed86524..3f3942b0 100644 --- a/shopelectro/settings/base.py +++ b/shopelectro/settings/base.py @@ -268,7 +268,7 @@ 'cps_phone': '+78124163200', 'cps_email': '[email protected]', 'local_delivery_cost': 300, - 'local_delivery_cost_threshold': 3000, + 'local_delivery_cost_threshold': 5000, } # used in data-migrations and tests
MycroftAI__mycroft-core-275
Base media skill is loaded as a normal skill At the moment, the load_skills function tries to load the media skill as if it is a normal skill.
[ { "content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nimport imp\nimport time\n\nimport abc\nimport os.path\nimport re\nfrom adapt.intent import Intent\nfrom os.path import join, dirname, splitext, isdir\n\nfrom mycroft.client.enclosure.api import EnclosureAPI\nfrom mycroft.configuration import ConfigurationManager\nfrom mycroft.dialog import DialogLoader\nfrom mycroft.filesystem import FileSystemAccess\nfrom mycroft.messagebus.message import Message\nfrom mycroft.util.log import getLogger\n\n__author__ = 'seanfitz'\n\nPRIMARY_SKILLS = ['intent', 'wake']\nBLACKLISTED_SKILLS = [\"send_sms\"]\nSKILLS_BASEDIR = dirname(__file__)\nTHIRD_PARTY_SKILLS_DIR = \"/opt/mycroft/third_party\"\n\nMainModule = '__init__'\n\nlogger = getLogger(__name__)\n\n\ndef load_vocab_from_file(path, vocab_type, emitter):\n with open(path, 'r') as voc_file:\n for line in voc_file.readlines():\n parts = line.strip().split(\"|\")\n entity = parts[0]\n\n emitter.emit(\n Message(\"register_vocab\",\n metadata={'start': entity, 'end': vocab_type}))\n for alias in parts[1:]:\n emitter.emit(\n Message(\"register_vocab\",\n metadata={'start': alias, 'end': vocab_type,\n 'alias_of': entity}))\n\n\ndef load_regex_from_file(path, emitter):\n if(path.endswith('.rx')):\n with open(path, 'r') as reg_file:\n for line in reg_file.readlines():\n re.compile(line.strip())\n emitter.emit(\n Message(\"register_vocab\",\n metadata={'regex': line.strip()}))\n\n\ndef load_vocabulary(basedir, emitter):\n for vocab_type in os.listdir(basedir):\n load_vocab_from_file(\n join(basedir, vocab_type), splitext(vocab_type)[0], emitter)\n\n\ndef load_regex(basedir, emitter):\n for regex_type in os.listdir(basedir):\n if regex_type.endswith(\".rx\"):\n load_regex_from_file(\n join(basedir, regex_type), emitter)\n\n\ndef create_intent_envelope(intent):\n return Message(None, metadata=intent.__dict__, context={})\n\n\ndef open_intent_envelope(message):\n intent_dict = message.metadata\n return Intent(intent_dict.get('name'),\n intent_dict.get('requires'),\n intent_dict.get('at_least_one'),\n intent_dict.get('optional'))\n\n\ndef load_skill(skill_descriptor, emitter):\n try:\n skill_module = imp.load_module(\n skill_descriptor[\"name\"] + MainModule, *skill_descriptor[\"info\"])\n if (hasattr(skill_module, 'create_skill') and\n callable(skill_module.create_skill)):\n # v2 skills framework\n skill = skill_module.create_skill()\n skill.bind(emitter)\n skill.initialize()\n return skill\n else:\n logger.warn(\n \"Module %s does not appear to be skill\" % (\n skill_descriptor[\"name\"]))\n except:\n logger.error(\n \"Failed to load skill: \" + skill_descriptor[\"name\"], exc_info=True)\n return None\n\n\ndef get_skills(skills_folder):\n skills = []\n possible_skills = os.listdir(skills_folder)\n for i in possible_skills:\n location = join(skills_folder, i)\n if (not isdir(location) or\n not MainModule + \".py\" in os.listdir(location)):\n continue\n\n skills.append(create_skill_descriptor(location))\n skills = sorted(skills, key=lambda p: p.get('name'))\n return skills\n\n\ndef create_skill_descriptor(skill_folder):\n info = imp.find_module(MainModule, [skill_folder])\n return {\"name\": os.path.basename(skill_folder), \"info\": info}\n\n\ndef load_skills(emitter, skills_root=SKILLS_BASEDIR):\n skills = get_skills(skills_root)\n for skill in skills:\n if skill['name'] in PRIMARY_SKILLS:\n load_skill(skill, emitter)\n\n for skill in skills:\n if (skill['name'] not in PRIMARY_SKILLS and\n skill['name'] not in BLACKLISTED_SKILLS):\n load_skill(skill, emitter)\n\n\nclass MycroftSkill(object):\n \"\"\"\n Abstract base class which provides common behaviour and parameters to all\n Skills implementation.\n \"\"\"\n\n def __init__(self, name, emitter=None):\n self.name = name\n self.bind(emitter)\n config = ConfigurationManager.get()\n self.config = config.get(name)\n self.config_core = config.get('core')\n self.dialog_renderer = None\n self.file_system = FileSystemAccess(join('skills', name))\n self.registered_intents = []\n\n @property\n def location(self):\n return self.config_core.get('location')\n\n @property\n def lang(self):\n return self.config_core.get('lang')\n\n def bind(self, emitter):\n if emitter:\n self.emitter = emitter\n self.enclosure = EnclosureAPI(emitter)\n self.__register_stop()\n\n def __register_stop(self):\n self.stop_time = time.time()\n self.stop_threshold = self.config_core.get('stop_threshold')\n self.emitter.on('mycroft.stop', self.__handle_stop)\n\n def detach(self):\n for name in self.registered_intents:\n self.emitter.emit(\n Message(\"detach_intent\", metadata={\"intent_name\": name}))\n\n def initialize(self):\n \"\"\"\n Initialization function to be implemented by all Skills.\n\n Usually used to create intents rules and register them.\n \"\"\"\n raise Exception(\"Initialize not implemented for skill: \" + self.name)\n\n def register_intent(self, intent_parser, handler):\n intent_message = create_intent_envelope(intent_parser)\n intent_message.message_type = \"register_intent\"\n self.emitter.emit(intent_message)\n self.registered_intents.append(intent_parser.name)\n\n def receive_handler(message):\n try:\n handler(message)\n except:\n # TODO: Localize\n self.speak(\n \"An error occurred while processing a request in \" +\n self.name)\n logger.error(\n \"An error occurred while processing a request in \" +\n self.name, exc_info=True)\n\n self.emitter.on(intent_parser.name, receive_handler)\n\n def register_vocabulary(self, entity, entity_type):\n self.emitter.emit(\n Message('register_vocab',\n metadata={'start': entity, 'end': entity_type}))\n\n def register_regex(self, regex_str):\n re.compile(regex_str) # validate regex\n self.emitter.emit(\n Message('register_vocab', metadata={'regex': regex_str}))\n\n def speak(self, utterance):\n self.emitter.emit(Message(\"speak\", metadata={'utterance': utterance}))\n\n def speak_dialog(self, key, data={}):\n self.speak(self.dialog_renderer.render(key, data))\n\n def init_dialog(self, root_directory):\n self.dialog_renderer = DialogLoader().load(\n join(root_directory, 'dialog', self.lang))\n\n def load_data_files(self, root_directory):\n self.init_dialog(root_directory)\n self.load_vocab_files(join(root_directory, 'vocab', self.lang))\n\n def load_vocab_files(self, vocab_dir):\n load_vocabulary(vocab_dir, self.emitter)\n\n def load_regex_files(self, regex_dir):\n load_regex(regex_dir, self.emitter)\n\n def __handle_stop(self, event):\n self.stop_time = time.time()\n self.stop()\n\n @abc.abstractmethod\n def stop(self):\n pass\n\n def is_stop(self):\n passed_time = time.time() - self.stop_time\n return passed_time < self.stop_threshold\n", "path": "mycroft/skills/core.py" } ]
[ { "content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nimport imp\nimport time\n\nimport abc\nimport os.path\nimport re\nfrom adapt.intent import Intent\nfrom os.path import join, dirname, splitext, isdir\n\nfrom mycroft.client.enclosure.api import EnclosureAPI\nfrom mycroft.configuration import ConfigurationManager\nfrom mycroft.dialog import DialogLoader\nfrom mycroft.filesystem import FileSystemAccess\nfrom mycroft.messagebus.message import Message\nfrom mycroft.util.log import getLogger\n\n__author__ = 'seanfitz'\n\nPRIMARY_SKILLS = ['intent', 'wake']\nBLACKLISTED_SKILLS = [\"send_sms\", \"media\"]\nSKILLS_BASEDIR = dirname(__file__)\nTHIRD_PARTY_SKILLS_DIR = \"/opt/mycroft/third_party\"\n\nMainModule = '__init__'\n\nlogger = getLogger(__name__)\n\n\ndef load_vocab_from_file(path, vocab_type, emitter):\n with open(path, 'r') as voc_file:\n for line in voc_file.readlines():\n parts = line.strip().split(\"|\")\n entity = parts[0]\n\n emitter.emit(\n Message(\"register_vocab\",\n metadata={'start': entity, 'end': vocab_type}))\n for alias in parts[1:]:\n emitter.emit(\n Message(\"register_vocab\",\n metadata={'start': alias, 'end': vocab_type,\n 'alias_of': entity}))\n\n\ndef load_regex_from_file(path, emitter):\n if(path.endswith('.rx')):\n with open(path, 'r') as reg_file:\n for line in reg_file.readlines():\n re.compile(line.strip())\n emitter.emit(\n Message(\"register_vocab\",\n metadata={'regex': line.strip()}))\n\n\ndef load_vocabulary(basedir, emitter):\n for vocab_type in os.listdir(basedir):\n load_vocab_from_file(\n join(basedir, vocab_type), splitext(vocab_type)[0], emitter)\n\n\ndef load_regex(basedir, emitter):\n for regex_type in os.listdir(basedir):\n if regex_type.endswith(\".rx\"):\n load_regex_from_file(\n join(basedir, regex_type), emitter)\n\n\ndef create_intent_envelope(intent):\n return Message(None, metadata=intent.__dict__, context={})\n\n\ndef open_intent_envelope(message):\n intent_dict = message.metadata\n return Intent(intent_dict.get('name'),\n intent_dict.get('requires'),\n intent_dict.get('at_least_one'),\n intent_dict.get('optional'))\n\n\ndef load_skill(skill_descriptor, emitter):\n try:\n skill_module = imp.load_module(\n skill_descriptor[\"name\"] + MainModule, *skill_descriptor[\"info\"])\n if (hasattr(skill_module, 'create_skill') and\n callable(skill_module.create_skill)):\n # v2 skills framework\n skill = skill_module.create_skill()\n skill.bind(emitter)\n skill.initialize()\n return skill\n else:\n logger.warn(\n \"Module %s does not appear to be skill\" % (\n skill_descriptor[\"name\"]))\n except:\n logger.error(\n \"Failed to load skill: \" + skill_descriptor[\"name\"], exc_info=True)\n return None\n\n\ndef get_skills(skills_folder):\n skills = []\n possible_skills = os.listdir(skills_folder)\n for i in possible_skills:\n location = join(skills_folder, i)\n if (not isdir(location) or\n not MainModule + \".py\" in os.listdir(location)):\n continue\n\n skills.append(create_skill_descriptor(location))\n skills = sorted(skills, key=lambda p: p.get('name'))\n return skills\n\n\ndef create_skill_descriptor(skill_folder):\n info = imp.find_module(MainModule, [skill_folder])\n return {\"name\": os.path.basename(skill_folder), \"info\": info}\n\n\ndef load_skills(emitter, skills_root=SKILLS_BASEDIR):\n skills = get_skills(skills_root)\n for skill in skills:\n if skill['name'] in PRIMARY_SKILLS:\n load_skill(skill, emitter)\n\n for skill in skills:\n if (skill['name'] not in PRIMARY_SKILLS and\n skill['name'] not in BLACKLISTED_SKILLS):\n load_skill(skill, emitter)\n\n\nclass MycroftSkill(object):\n \"\"\"\n Abstract base class which provides common behaviour and parameters to all\n Skills implementation.\n \"\"\"\n\n def __init__(self, name, emitter=None):\n self.name = name\n self.bind(emitter)\n config = ConfigurationManager.get()\n self.config = config.get(name)\n self.config_core = config.get('core')\n self.dialog_renderer = None\n self.file_system = FileSystemAccess(join('skills', name))\n self.registered_intents = []\n\n @property\n def location(self):\n return self.config_core.get('location')\n\n @property\n def lang(self):\n return self.config_core.get('lang')\n\n def bind(self, emitter):\n if emitter:\n self.emitter = emitter\n self.enclosure = EnclosureAPI(emitter)\n self.__register_stop()\n\n def __register_stop(self):\n self.stop_time = time.time()\n self.stop_threshold = self.config_core.get('stop_threshold')\n self.emitter.on('mycroft.stop', self.__handle_stop)\n\n def detach(self):\n for name in self.registered_intents:\n self.emitter.emit(\n Message(\"detach_intent\", metadata={\"intent_name\": name}))\n\n def initialize(self):\n \"\"\"\n Initialization function to be implemented by all Skills.\n\n Usually used to create intents rules and register them.\n \"\"\"\n raise Exception(\"Initialize not implemented for skill: \" + self.name)\n\n def register_intent(self, intent_parser, handler):\n intent_message = create_intent_envelope(intent_parser)\n intent_message.message_type = \"register_intent\"\n self.emitter.emit(intent_message)\n self.registered_intents.append(intent_parser.name)\n\n def receive_handler(message):\n try:\n handler(message)\n except:\n # TODO: Localize\n self.speak(\n \"An error occurred while processing a request in \" +\n self.name)\n logger.error(\n \"An error occurred while processing a request in \" +\n self.name, exc_info=True)\n\n self.emitter.on(intent_parser.name, receive_handler)\n\n def register_vocabulary(self, entity, entity_type):\n self.emitter.emit(\n Message('register_vocab',\n metadata={'start': entity, 'end': entity_type}))\n\n def register_regex(self, regex_str):\n re.compile(regex_str) # validate regex\n self.emitter.emit(\n Message('register_vocab', metadata={'regex': regex_str}))\n\n def speak(self, utterance):\n self.emitter.emit(Message(\"speak\", metadata={'utterance': utterance}))\n\n def speak_dialog(self, key, data={}):\n self.speak(self.dialog_renderer.render(key, data))\n\n def init_dialog(self, root_directory):\n self.dialog_renderer = DialogLoader().load(\n join(root_directory, 'dialog', self.lang))\n\n def load_data_files(self, root_directory):\n self.init_dialog(root_directory)\n self.load_vocab_files(join(root_directory, 'vocab', self.lang))\n\n def load_vocab_files(self, vocab_dir):\n load_vocabulary(vocab_dir, self.emitter)\n\n def load_regex_files(self, regex_dir):\n load_regex(regex_dir, self.emitter)\n\n def __handle_stop(self, event):\n self.stop_time = time.time()\n self.stop()\n\n @abc.abstractmethod\n def stop(self):\n pass\n\n def is_stop(self):\n passed_time = time.time() - self.stop_time\n return passed_time < self.stop_threshold\n", "path": "mycroft/skills/core.py" } ]
diff --git a/mycroft/skills/core.py b/mycroft/skills/core.py index a61dce2f4d10..af32d10d6c4a 100644 --- a/mycroft/skills/core.py +++ b/mycroft/skills/core.py @@ -35,7 +35,7 @@ __author__ = 'seanfitz' PRIMARY_SKILLS = ['intent', 'wake'] -BLACKLISTED_SKILLS = ["send_sms"] +BLACKLISTED_SKILLS = ["send_sms", "media"] SKILLS_BASEDIR = dirname(__file__) THIRD_PARTY_SKILLS_DIR = "/opt/mycroft/third_party"
mdn__kuma-5855
Escalated number of errors from Google Search Console: Breadcrumbs [Google Search Console](https://search.google.com/search-console/breadcrumbs/drilldown?resource_id=https%3A%2F%2Fdeveloper.mozilla.org%2F&item_key=CgwICRoIcG9zaXRpb24QAxgP&hl=en) emailed us about a big increase in indexing "errors" about breadcrumbs. <img width="1174" alt="Screen Shot 2019-09-20 at 1 47 54 PM" src="https://user-images.githubusercontent.com/26739/65347578-4a118c80-dbad-11e9-8bda-8df0bd7871de.png"> The code that produces our breadcrumbs (on the Wiki) [hasn't changed in years](https://github.com/mozilla/kuma/blame/master/kuma/wiki/jinja2/wiki/includes/document_macros.html).
[ { "content": "# -*- coding: utf-8 -*-\nimport difflib\nimport json\nimport re\n\nimport jinja2\nimport six\nfrom constance import config\nfrom cssselect.parser import SelectorSyntaxError\nfrom django.conf import settings\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.template import loader\nfrom django.utils import lru_cache\nfrom django.utils.html import conditional_escape\nfrom django.utils.six.moves.urllib.parse import urlsplit, urlunparse\nfrom django.utils.translation import ugettext\nfrom django_jinja import library\n\nfrom kuma.core.urlresolvers import reverse\nfrom kuma.core.utils import order_params, urlparams\nfrom kuma.core.utils import safer_pyquery as pq\n\nfrom ..constants import DIFF_WRAP_COLUMN\nfrom ..content import clean_content\nfrom ..utils import tidy_content\n\n\ndef get_compare_url(doc, from_id, to_id):\n return order_params(urlparams(\n reverse('wiki.compare_revisions', args=[doc.slug], locale=doc.locale),\n **{'from': from_id, 'to': to_id}\n ))\n\n\[email protected]\ndef bugize_text(content):\n content = jinja2.escape(content)\n regex = re.compile(r'(bug)\\s+#?(\\d+)', re.IGNORECASE)\n content = regex.sub(\n jinja2.Markup('<a href=\"https://bugzilla.mozilla.org/'\n 'show_bug.cgi?id=\\\\2\" '\n 'target=\"_blank\" rel=\"noopener\">\\\\1 \\\\2</a>'),\n content)\n return content\n\n\[email protected]_function\ndef format_comment(rev, previous_revision=None, load_previous=True):\n \"\"\"\n Format comment for HTML display, with Bugzilla links and slug changes.\n\n Keyword Arguments:\n rev - The revision\n previous_revision - The previous revision (default None)\n load_previous - Try loading previous revision if None (default True)\n \"\"\"\n if previous_revision is None and load_previous:\n previous_revision = rev.previous\n comment = bugize_text(rev.comment if rev.comment else \"\")\n\n # If a page move, say so\n if previous_revision and previous_revision.slug != rev.slug:\n comment += jinja2.Markup(\n '<span class=\"slug-change\">'\n '<span>%s</span>'\n ' <i class=\"icon-long-arrow-right\" aria-hidden=\"true\"></i> '\n '<span>%s</span></span>') % (previous_revision.slug, rev.slug)\n\n return comment\n\n\[email protected]_function\ndef revisions_unified_diff(from_revision, to_revision):\n \"\"\"\n Given the two revisions generate a diff between their tidied\n content in the unified diff format.\n \"\"\"\n if from_revision is None or to_revision is None:\n return \"Diff is unavailable.\"\n\n fromfile = '[%s] #%s' % (from_revision.document.locale, from_revision.id)\n tofile = '[%s] #%s' % (to_revision.document.locale, to_revision.id)\n\n tidy_from = from_revision.get_tidied_content()\n tidy_to = to_revision.get_tidied_content()\n\n return u'\\n'.join(difflib.unified_diff(\n tidy_from.splitlines(),\n tidy_to.splitlines(),\n fromfile=fromfile,\n tofile=tofile,\n ))\n\n\[email protected]_function\ndef diff_table(content_from, content_to, prev_id, curr_id, tidy=False):\n \"\"\"\n Creates an HTML diff of the passed in content_from and content_to.\n \"\"\"\n if tidy:\n content_from, errors = tidy_content(content_from)\n content_to, errors = tidy_content(content_to)\n\n html_diff = difflib.HtmlDiff(wrapcolumn=DIFF_WRAP_COLUMN)\n try:\n diff = html_diff.make_table(content_from.splitlines(),\n content_to.splitlines(),\n ugettext('Revision %s') % prev_id,\n ugettext('Revision %s') % curr_id,\n context=True,\n numlines=config.DIFF_CONTEXT_LINES)\n except RuntimeError:\n # some diffs hit a max recursion error\n message = ugettext(u'There was an error generating the content.')\n diff = '<div class=\"warning\"><p>%s</p></div>' % message\n return jinja2.Markup(diff)\n\n\[email protected]_function\ndef tag_diff_table(prev_tags, curr_tags, prev_id, curr_id):\n html_diff = difflib.HtmlDiff(wrapcolumn=DIFF_WRAP_COLUMN)\n\n diff = html_diff.make_table([prev_tags], [curr_tags],\n ugettext('Revision %s') % prev_id,\n ugettext('Revision %s') % curr_id)\n\n # Simple formatting update: 784877\n diff = diff.replace('\",', '\"<br />').replace('<td', '<td valign=\"top\"')\n return jinja2.Markup(diff)\n\n\[email protected]_function\ndef colorize_diff(diff):\n # we're doing something horrible here because this will show up\n # in feed reader and other clients that don't load CSS files\n diff = diff.replace('<span class=\"diff_add\"', '<span class=\"diff_add\" '\n 'style=\"background-color: #afa; text-decoration: none;\"')\n diff = diff.replace('<span class=\"diff_sub\"', '<span class=\"diff_sub\" '\n 'style=\"background-color: #faa; text-decoration: none;\"')\n diff = diff.replace('<span class=\"diff_chg\"', '<span class=\"diff_chg\" '\n 'style=\"background-color: #fe0; text-decoration: none;\"')\n return diff\n\n\[email protected]\ndef wiki_bleach(val):\n return jinja2.Markup(clean_content(val))\n\n\[email protected]\ndef selector_content_find(document, selector):\n \"\"\"\n Provided a selector, returns the relevant content from the document\n \"\"\"\n content = ''\n try:\n page = pq(document.rendered_html)\n except ValueError:\n # pass errors during construction\n pass\n try:\n content = page.find(selector).text()\n except SelectorSyntaxError:\n # pass errors during find/select\n pass\n return content\n\n\ndef _recursive_escape(value, esc=conditional_escape):\n \"\"\"\n Recursively escapes strings in an object.\n\n Traverses dict, list and tuples. These are the data structures supported\n by the JSON encoder.\n \"\"\"\n if isinstance(value, dict):\n return type(value)((esc(k), _recursive_escape(v))\n for (k, v) in value.iteritems())\n elif isinstance(value, (list, tuple)):\n return type(value)(_recursive_escape(v) for v in value)\n elif isinstance(value, six.string_types):\n return esc(value)\n elif isinstance(value, (int, long, float)) or value in (True, False, None):\n return value\n # We've exhausted all the types acceptable by the default JSON encoder.\n # Django's improved JSON encoder handles a few other types, all of which\n # are represented by strings. For these types, we apply JSON encoding\n # immediately and then escape the result.\n return esc(DjangoJSONEncoder().default(value))\n\n\[email protected]\ndef tojson(value):\n \"\"\"\n Returns the JSON representation of the value.\n \"\"\"\n try:\n # If value contains custom subclasses of int, str, datetime, etc.\n # arbitrary exceptions may be raised during escaping or serialization.\n result = json.dumps(_recursive_escape(value), cls=DjangoJSONEncoder)\n except Exception:\n return ''\n return jinja2.Markup(result)\n\n\[email protected]\ndef absolutify(url, for_wiki_site=False):\n \"\"\"Joins settings.SITE_URL with a URL path.\"\"\"\n if url.startswith('http'):\n return url\n\n if for_wiki_site:\n site_url = settings.WIKI_SITE_URL\n else:\n site_url = settings.SITE_URL\n\n site = urlsplit(site_url)\n parts = urlsplit(url)\n scheme = site.scheme\n netloc = site.netloc\n path = parts.path\n query = parts.query\n fragment = parts.fragment\n\n if path == '':\n path = '/'\n\n return urlunparse([scheme, netloc, path, None, query, fragment])\n\n\[email protected]_function\ndef wiki_url(path):\n \"\"\"\n Create a URL pointing to Kuma.\n Look for a wiki page in the current locale, or default to given path\n \"\"\"\n if '#' in path:\n slug, fragment = path.split('#', 1)\n else:\n slug = path\n fragment = ''\n new_path = reverse('wiki.document', args=[slug])\n if fragment:\n new_path += '#' + fragment\n return new_path\n\n\[email protected]_function\n@lru_cache.lru_cache()\ndef include_svg(path, title=None, title_id=None):\n \"\"\"\n Embded an SVG file by path, optionally changing the title,\n and adding an id\n \"\"\"\n svg = loader.get_template(path).render()\n if (title):\n svg_parsed = pq(svg, namespaces={'svg': 'http://www.w3.org/2000/svg'})\n svg_parsed('svg|title')[0].text = title\n if (title_id):\n svg_parsed('svg|title').attr['id'] = title_id\n svg_out = svg_parsed.outerHtml()\n else:\n svg_out = svg\n return jinja2.Markup(svg_out)\n", "path": "kuma/wiki/templatetags/jinja_helpers.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nimport difflib\nimport json\nimport re\n\nimport jinja2\nimport six\nfrom constance import config\nfrom cssselect.parser import SelectorSyntaxError\nfrom django.conf import settings\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.template import loader\nfrom django.utils import lru_cache\nfrom django.utils.html import conditional_escape\nfrom django.utils.six.moves.urllib.parse import urlsplit, urlunparse\nfrom django.utils.translation import ugettext\nfrom django_jinja import library\n\nfrom kuma.core.urlresolvers import reverse\nfrom kuma.core.utils import order_params, urlparams\nfrom kuma.core.utils import safer_pyquery as pq\n\nfrom ..constants import DIFF_WRAP_COLUMN\nfrom ..content import clean_content\nfrom ..utils import tidy_content\n\n\ndef get_compare_url(doc, from_id, to_id):\n return order_params(urlparams(\n reverse('wiki.compare_revisions', args=[doc.slug], locale=doc.locale),\n **{'from': from_id, 'to': to_id}\n ))\n\n\[email protected]\ndef bugize_text(content):\n content = jinja2.escape(content)\n regex = re.compile(r'(bug)\\s+#?(\\d+)', re.IGNORECASE)\n content = regex.sub(\n jinja2.Markup('<a href=\"https://bugzilla.mozilla.org/'\n 'show_bug.cgi?id=\\\\2\" '\n 'target=\"_blank\" rel=\"noopener\">\\\\1 \\\\2</a>'),\n content)\n return content\n\n\[email protected]_function\ndef format_comment(rev, previous_revision=None, load_previous=True):\n \"\"\"\n Format comment for HTML display, with Bugzilla links and slug changes.\n\n Keyword Arguments:\n rev - The revision\n previous_revision - The previous revision (default None)\n load_previous - Try loading previous revision if None (default True)\n \"\"\"\n if previous_revision is None and load_previous:\n previous_revision = rev.previous\n comment = bugize_text(rev.comment if rev.comment else \"\")\n\n # If a page move, say so\n if previous_revision and previous_revision.slug != rev.slug:\n comment += jinja2.Markup(\n '<span class=\"slug-change\">'\n '<span>%s</span>'\n ' <i class=\"icon-long-arrow-right\" aria-hidden=\"true\"></i> '\n '<span>%s</span></span>') % (previous_revision.slug, rev.slug)\n\n return comment\n\n\[email protected]_function\ndef revisions_unified_diff(from_revision, to_revision):\n \"\"\"\n Given the two revisions generate a diff between their tidied\n content in the unified diff format.\n \"\"\"\n if from_revision is None or to_revision is None:\n return \"Diff is unavailable.\"\n\n fromfile = '[%s] #%s' % (from_revision.document.locale, from_revision.id)\n tofile = '[%s] #%s' % (to_revision.document.locale, to_revision.id)\n\n tidy_from = from_revision.get_tidied_content()\n tidy_to = to_revision.get_tidied_content()\n\n return u'\\n'.join(difflib.unified_diff(\n tidy_from.splitlines(),\n tidy_to.splitlines(),\n fromfile=fromfile,\n tofile=tofile,\n ))\n\n\[email protected]_function\ndef diff_table(content_from, content_to, prev_id, curr_id, tidy=False):\n \"\"\"\n Creates an HTML diff of the passed in content_from and content_to.\n \"\"\"\n if tidy:\n content_from, errors = tidy_content(content_from)\n content_to, errors = tidy_content(content_to)\n\n html_diff = difflib.HtmlDiff(wrapcolumn=DIFF_WRAP_COLUMN)\n try:\n diff = html_diff.make_table(content_from.splitlines(),\n content_to.splitlines(),\n ugettext('Revision %s') % prev_id,\n ugettext('Revision %s') % curr_id,\n context=True,\n numlines=config.DIFF_CONTEXT_LINES)\n except RuntimeError:\n # some diffs hit a max recursion error\n message = ugettext(u'There was an error generating the content.')\n diff = '<div class=\"warning\"><p>%s</p></div>' % message\n return jinja2.Markup(diff)\n\n\[email protected]_function\ndef tag_diff_table(prev_tags, curr_tags, prev_id, curr_id):\n html_diff = difflib.HtmlDiff(wrapcolumn=DIFF_WRAP_COLUMN)\n\n diff = html_diff.make_table([prev_tags], [curr_tags],\n ugettext('Revision %s') % prev_id,\n ugettext('Revision %s') % curr_id)\n\n # Simple formatting update: 784877\n diff = diff.replace('\",', '\"<br />').replace('<td', '<td valign=\"top\"')\n return jinja2.Markup(diff)\n\n\[email protected]_function\ndef colorize_diff(diff):\n # we're doing something horrible here because this will show up\n # in feed reader and other clients that don't load CSS files\n diff = diff.replace('<span class=\"diff_add\"', '<span class=\"diff_add\" '\n 'style=\"background-color: #afa; text-decoration: none;\"')\n diff = diff.replace('<span class=\"diff_sub\"', '<span class=\"diff_sub\" '\n 'style=\"background-color: #faa; text-decoration: none;\"')\n diff = diff.replace('<span class=\"diff_chg\"', '<span class=\"diff_chg\" '\n 'style=\"background-color: #fe0; text-decoration: none;\"')\n return diff\n\n\[email protected]\ndef wiki_bleach(val):\n return jinja2.Markup(clean_content(val))\n\n\[email protected]\ndef selector_content_find(document, selector):\n \"\"\"\n Provided a selector, returns the relevant content from the document\n \"\"\"\n content = ''\n try:\n page = pq(document.rendered_html)\n except ValueError:\n # pass errors during construction\n pass\n try:\n content = page.find(selector).text()\n except SelectorSyntaxError:\n # pass errors during find/select\n pass\n return content\n\n\ndef _recursive_escape(value, esc=conditional_escape):\n \"\"\"\n Recursively escapes strings in an object.\n\n Traverses dict, list and tuples. These are the data structures supported\n by the JSON encoder.\n \"\"\"\n if isinstance(value, dict):\n return type(value)((esc(k), _recursive_escape(v))\n for (k, v) in value.iteritems())\n elif isinstance(value, (list, tuple)):\n return type(value)(_recursive_escape(v) for v in value)\n elif isinstance(value, six.string_types):\n return esc(value)\n elif isinstance(value, (int, long, float)) or value in (True, False, None):\n return value\n # We've exhausted all the types acceptable by the default JSON encoder.\n # Django's improved JSON encoder handles a few other types, all of which\n # are represented by strings. For these types, we apply JSON encoding\n # immediately and then escape the result.\n return esc(DjangoJSONEncoder().default(value))\n\n\[email protected]\ndef tojson(value):\n \"\"\"\n Returns the JSON representation of the value.\n \"\"\"\n try:\n # If value contains custom subclasses of int, str, datetime, etc.\n # arbitrary exceptions may be raised during escaping or serialization.\n result = json.dumps(_recursive_escape(value), cls=DjangoJSONEncoder)\n except Exception:\n return ''\n return jinja2.Markup(result)\n\n\[email protected]\ndef absolutify(url, for_wiki_site=False):\n \"\"\"Joins settings.SITE_URL with a URL path.\"\"\"\n if url.startswith('http'):\n return url\n\n if for_wiki_site:\n site_url = settings.WIKI_SITE_URL\n else:\n site_url = settings.SITE_URL\n\n site = urlsplit(site_url)\n parts = urlsplit(url)\n scheme = site.scheme\n netloc = site.netloc\n path = parts.path\n query = parts.query\n fragment = parts.fragment\n\n if path == '':\n path = '/'\n\n return urlunparse([scheme, netloc, path, None, query, fragment])\n\n\[email protected]_function\ndef wiki_url(path):\n \"\"\"\n Create a URL pointing to Kuma.\n Look for a wiki page in the current locale, or default to given path\n \"\"\"\n if '#' in path:\n slug, fragment = path.split('#', 1)\n else:\n slug = path\n fragment = ''\n new_path = reverse('wiki.document', args=[slug])\n if fragment:\n new_path += '#' + fragment\n return new_path\n\n\[email protected]_function\n@lru_cache.lru_cache()\ndef include_svg(path, title=None, title_id=None):\n \"\"\"\n Embded an SVG file by path, optionally changing the title,\n and adding an id\n \"\"\"\n svg = loader.get_template(path).render()\n if (title):\n svg_parsed = pq(svg, namespaces={'svg': 'http://www.w3.org/2000/svg'})\n svg_parsed('svg|title')[0].text = title\n if (title_id):\n svg_parsed('svg|title').attr['id'] = title_id\n svg_out = svg_parsed.outerHtml()\n else:\n svg_out = svg\n return jinja2.Markup(svg_out)\n\n\[email protected]\ndef length_plus_one(lengthy):\n \"\"\"Useful when you want to do something like\n `{{ somelist|length_plus_one }}` and you want it to output the\n Python equivalent of `len(somelist) + 1`.\n \"\"\"\n return len(lengthy) + 1\n", "path": "kuma/wiki/templatetags/jinja_helpers.py" } ]
diff --git a/kuma/static/styles/components/wiki/crumbs.scss b/kuma/static/styles/components/wiki/crumbs.scss index 5281e2404b7..427a3d58fca 100644 --- a/kuma/static/styles/components/wiki/crumbs.scss +++ b/kuma/static/styles/components/wiki/crumbs.scss @@ -35,6 +35,13 @@ $crumb-vertical-spacing-desktop: $grid-spacing / 4; } } + a.crumb-current-page { + &:link, + &:visited { + color: $text-color; + } + } + span { display: inline-block; position: relative; diff --git a/kuma/wiki/jinja2/wiki/includes/document_macros.html b/kuma/wiki/jinja2/wiki/includes/document_macros.html index be21351c6a5..5d3d2916637 100644 --- a/kuma/wiki/jinja2/wiki/includes/document_macros.html +++ b/kuma/wiki/jinja2/wiki/includes/document_macros.html @@ -11,7 +11,10 @@ </li> {% endfor %} <li property="itemListElement" typeof="ListItem" class="crumb"> - <span property="name" aria-current="page">{{ document.title }}</span> + <a href="{{ document.get_absolute_url() }}" class="crumb-current-page" property="item" typeof="WebPage"> + <span property="name" aria-current="page">{{ document.title }}</span> + </a> + <meta property="position" content="{{ document.parents|length_plus_one }}"> </li> </ol> </nav> diff --git a/kuma/wiki/templatetags/jinja_helpers.py b/kuma/wiki/templatetags/jinja_helpers.py index 35075a29b98..ed5580e448c 100644 --- a/kuma/wiki/templatetags/jinja_helpers.py +++ b/kuma/wiki/templatetags/jinja_helpers.py @@ -262,3 +262,12 @@ def include_svg(path, title=None, title_id=None): else: svg_out = svg return jinja2.Markup(svg_out) + + [email protected] +def length_plus_one(lengthy): + """Useful when you want to do something like + `{{ somelist|length_plus_one }}` and you want it to output the + Python equivalent of `len(somelist) + 1`. + """ + return len(lengthy) + 1
cloudtools__troposphere-120
Update metadata to include property keys Within the CloudFormation metadata object for auto scaling launch configurations, it'd be nice to be able to rely on the validators within the template to sanity check the allowed keys within the 'config' dictionary: ``` "Resources": { "MyInstance": { "Type": "AWS::EC2::Instance", "Metadata" : { "AWS::CloudFormation::Init" : { "config" : { "packages" : { : }, "groups" : { : }, "users" : { : }, "sources" : { : }, "files" : { : }, "commands" : { : }, "services" : { : } } } }, "Properties": { : } } } ``` Currently the implementation of troposphere.cloudformation.InitConfig only contains one key which is 'files'
[ { "content": "# Copyright (c) 2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSHelperFn, AWSObject, AWSProperty, Ref\nfrom .validators import integer\n\n\nclass Stack(AWSObject):\n type = \"AWS::CloudFormation::Stack\"\n\n props = {\n 'TemplateURL': (basestring, True),\n 'TimeoutInMinutes': (integer, False),\n 'Parameters': (dict, False),\n }\n\n\nclass WaitCondition(AWSObject):\n type = \"AWS::CloudFormation::WaitCondition\"\n\n props = {\n 'Count': (integer, False),\n 'Handle': (Ref, True),\n 'Timeout': (integer, True),\n }\n\n\nclass WaitConditionHandle(AWSObject):\n type = \"AWS::CloudFormation::WaitConditionHandle\"\n\n props = {}\n\n\nclass InitFileContext(AWSHelperFn):\n def __init__(self, data):\n self.data = data\n\n def JSONrepr(self):\n return self.data\n\n\nclass InitFile(AWSProperty):\n props = {\n 'content': (basestring, True),\n 'mode': (basestring, False),\n 'owner': (basestring, False),\n 'group': (basestring, False),\n 'context': (InitFileContext, False)\n }\n\n\nclass InitFiles(AWSHelperFn):\n def __init__(self, data):\n self.validate(data)\n self.data = data\n\n def validate(self, data):\n for k in data:\n if not isinstance(data[k], InitFile):\n raise ValueError(\"File '\" + k + \"' must be of type InitFile\")\n\n def JSONrepr(self):\n return self.data\n\n\nclass InitConfig(AWSProperty):\n props = {\n 'files': (dict, False)\n }\n\n\nclass Init(AWSHelperFn):\n def __init__(self, data):\n self.validate(data)\n self.data = {\"AWS::CloudFormation::Init\": data}\n\n def validate(self, data):\n if 'config' not in data:\n raise ValueError('config property is required')\n if not isinstance(data['config'], InitConfig):\n raise ValueError(\n 'config property must be of type autoscaling.InitConfig'\n )\n\n def JSONrepr(self):\n return self.data\n", "path": "troposphere/cloudformation.py" } ]
[ { "content": "# Copyright (c) 2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSHelperFn, AWSObject, AWSProperty, Ref\nfrom .validators import integer\n\n\nclass Stack(AWSObject):\n type = \"AWS::CloudFormation::Stack\"\n\n props = {\n 'TemplateURL': (basestring, True),\n 'TimeoutInMinutes': (integer, False),\n 'Parameters': (dict, False),\n }\n\n\nclass WaitCondition(AWSObject):\n type = \"AWS::CloudFormation::WaitCondition\"\n\n props = {\n 'Count': (integer, False),\n 'Handle': (Ref, True),\n 'Timeout': (integer, True),\n }\n\n\nclass WaitConditionHandle(AWSObject):\n type = \"AWS::CloudFormation::WaitConditionHandle\"\n\n props = {}\n\n\nclass InitFileContext(AWSHelperFn):\n def __init__(self, data):\n self.data = data\n\n def JSONrepr(self):\n return self.data\n\n\nclass InitFile(AWSProperty):\n props = {\n 'content': (basestring, True),\n 'mode': (basestring, False),\n 'owner': (basestring, False),\n 'group': (basestring, False),\n 'context': (InitFileContext, False)\n }\n\n\nclass InitFiles(AWSHelperFn):\n def __init__(self, data):\n self.validate(data)\n self.data = data\n\n def validate(self, data):\n for k in data:\n if not isinstance(data[k], InitFile):\n raise ValueError(\"File '\" + k + \"' must be of type InitFile\")\n\n def JSONrepr(self):\n return self.data\n\n\nclass InitConfig(AWSProperty):\n props = {\n 'groups': (dict, False),\n 'users': (dict, False),\n 'sources': (dict, False),\n 'packages': (dict, False),\n 'files': (dict, False),\n 'commands': (dict, False),\n 'services': (dict, False)\n }\n\n\nclass Init(AWSHelperFn):\n def __init__(self, data):\n self.validate(data)\n self.data = {\"AWS::CloudFormation::Init\": data}\n\n def validate(self, data):\n if 'config' not in data:\n raise ValueError('config property is required')\n if not isinstance(data['config'], InitConfig):\n raise ValueError(\n 'config property must be of type autoscaling.InitConfig'\n )\n\n def JSONrepr(self):\n return self.data\n", "path": "troposphere/cloudformation.py" } ]
diff --git a/troposphere/cloudformation.py b/troposphere/cloudformation.py index c79ff3904..069496741 100644 --- a/troposphere/cloudformation.py +++ b/troposphere/cloudformation.py @@ -67,7 +67,13 @@ def JSONrepr(self): class InitConfig(AWSProperty): props = { - 'files': (dict, False) + 'groups': (dict, False), + 'users': (dict, False), + 'sources': (dict, False), + 'packages': (dict, False), + 'files': (dict, False), + 'commands': (dict, False), + 'services': (dict, False) }
mdn__kuma-6598
Possibly to prefill Reason textarea on $delete If you're going to have some human-helped automation that deletes the 20% or so non-en-US documents that aren't actually translated, it would be nice if you don't have to type in the same reason every time. <img width="989" alt="Screen Shot 2020-02-26 at 11 56 40 AM" src="https://user-images.githubusercontent.com/26739/75367987-1be85500-588f-11ea-8ba1-f49e0db69cc7.png"> Would be neat if you could control it with something like `?reason=Sample%20reason`
[ { "content": "from django.db import IntegrityError\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import ugettext\nfrom django.views.decorators.cache import never_cache\n\nfrom kuma.core.decorators import (\n block_user_agents,\n ensure_wiki_domain,\n login_required,\n permission_required,\n)\nfrom kuma.core.urlresolvers import reverse\n\nfrom ..decorators import check_readonly, process_document_path\nfrom ..forms import DocumentDeletionForm\nfrom ..models import Document, DocumentDeletionLog, Revision\nfrom ..utils import locale_and_slug_from_path\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@check_readonly\ndef revert_document(request, document_path, revision_id):\n \"\"\"\n Revert document to a specific revision.\n \"\"\"\n document_locale, document_slug, needs_redirect = locale_and_slug_from_path(\n document_path, request\n )\n\n revision = get_object_or_404(\n Revision.objects.select_related(\"document\"),\n pk=revision_id,\n document__slug=document_slug,\n )\n\n if request.method == \"GET\":\n # Render the confirmation page\n return render(\n request,\n \"wiki/confirm_revision_revert.html\",\n {\"revision\": revision, \"document\": revision.document},\n )\n else:\n comment = request.POST.get(\"comment\")\n document = revision.document\n old_revision_pk = revision.pk\n try:\n new_revision = document.revert(revision, request.user, comment)\n # schedule a rendering of the new revision if it really was saved\n if new_revision.pk != old_revision_pk:\n document.schedule_rendering(\"max-age=0\")\n except IntegrityError:\n return render(\n request,\n \"wiki/confirm_revision_revert.html\",\n {\n \"revision\": revision,\n \"document\": revision.document,\n \"error\": ugettext(\n \"Document already exists. Note: You cannot \"\n \"revert a document that has been moved until you \"\n \"delete its redirect.\"\n ),\n },\n )\n return redirect(\"wiki.document_revisions\", revision.document.slug)\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@permission_required(\"wiki.delete_document\")\n@check_readonly\n@process_document_path\ndef delete_document(request, document_slug, document_locale):\n \"\"\"\n Delete a Document.\n \"\"\"\n document = get_object_or_404(Document, locale=document_locale, slug=document_slug)\n\n # HACK: https://bugzil.la/972545 - Don't delete pages that have children\n # TODO: https://bugzil.la/972541 - Deleting a page that has subpages\n prevent = document.children.exists()\n\n first_revision = document.revisions.all()[0]\n\n if request.method == \"POST\":\n form = DocumentDeletionForm(data=request.POST)\n if form.is_valid():\n DocumentDeletionLog.objects.create(\n locale=document.locale,\n slug=document.slug,\n user=request.user,\n reason=form.cleaned_data[\"reason\"],\n )\n document.delete()\n return redirect(document)\n else:\n form = DocumentDeletionForm()\n\n context = {\n \"document\": document,\n \"form\": form,\n \"request\": request,\n \"revision\": first_revision,\n \"prevent\": prevent,\n }\n return render(request, \"wiki/confirm_document_delete.html\", context)\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@permission_required(\"wiki.restore_document\")\n@check_readonly\n@process_document_path\ndef restore_document(request, document_slug, document_locale):\n \"\"\"\n Restore a deleted Document.\n \"\"\"\n document = get_object_or_404(\n Document.deleted_objects.all(), slug=document_slug, locale=document_locale\n )\n document.restore()\n return redirect(document)\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@permission_required(\"wiki.purge_document\")\n@check_readonly\n@process_document_path\ndef purge_document(request, document_slug, document_locale):\n \"\"\"\n Permanently purge a deleted Document.\n \"\"\"\n document = get_object_or_404(\n Document.deleted_objects.all(), slug=document_slug, locale=document_locale\n )\n deletion_log_entries = DocumentDeletionLog.objects.filter(\n locale=document_locale, slug=document_slug\n )\n if deletion_log_entries.exists():\n deletion_log = deletion_log_entries.order_by(\"-pk\")[0]\n else:\n deletion_log = {}\n\n if request.method == \"POST\" and \"confirm\" in request.POST:\n document.purge()\n return redirect(\n reverse(\"wiki.document\", args=(document_slug,), locale=document_locale)\n )\n else:\n return render(\n request,\n \"wiki/confirm_purge.html\",\n {\"document\": document, \"deletion_log\": deletion_log},\n )\n", "path": "kuma/wiki/views/delete.py" } ]
[ { "content": "from django.db import IntegrityError\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import ugettext\nfrom django.views.decorators.cache import never_cache\n\nfrom kuma.core.decorators import (\n block_user_agents,\n ensure_wiki_domain,\n login_required,\n permission_required,\n)\nfrom kuma.core.urlresolvers import reverse\n\nfrom ..decorators import check_readonly, process_document_path\nfrom ..forms import DocumentDeletionForm\nfrom ..models import Document, DocumentDeletionLog, Revision\nfrom ..utils import locale_and_slug_from_path\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@check_readonly\ndef revert_document(request, document_path, revision_id):\n \"\"\"\n Revert document to a specific revision.\n \"\"\"\n document_locale, document_slug, needs_redirect = locale_and_slug_from_path(\n document_path, request\n )\n\n revision = get_object_or_404(\n Revision.objects.select_related(\"document\"),\n pk=revision_id,\n document__slug=document_slug,\n )\n\n if request.method == \"GET\":\n # Render the confirmation page\n return render(\n request,\n \"wiki/confirm_revision_revert.html\",\n {\"revision\": revision, \"document\": revision.document},\n )\n else:\n comment = request.POST.get(\"comment\")\n document = revision.document\n old_revision_pk = revision.pk\n try:\n new_revision = document.revert(revision, request.user, comment)\n # schedule a rendering of the new revision if it really was saved\n if new_revision.pk != old_revision_pk:\n document.schedule_rendering(\"max-age=0\")\n except IntegrityError:\n return render(\n request,\n \"wiki/confirm_revision_revert.html\",\n {\n \"revision\": revision,\n \"document\": revision.document,\n \"error\": ugettext(\n \"Document already exists. Note: You cannot \"\n \"revert a document that has been moved until you \"\n \"delete its redirect.\"\n ),\n },\n )\n return redirect(\"wiki.document_revisions\", revision.document.slug)\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@permission_required(\"wiki.delete_document\")\n@check_readonly\n@process_document_path\ndef delete_document(request, document_slug, document_locale):\n \"\"\"\n Delete a Document.\n \"\"\"\n document = get_object_or_404(Document, locale=document_locale, slug=document_slug)\n\n # HACK: https://bugzil.la/972545 - Don't delete pages that have children\n # TODO: https://bugzil.la/972541 - Deleting a page that has subpages\n prevent = document.children.exists()\n\n first_revision = document.revisions.all()[0]\n\n if request.method == \"POST\":\n form = DocumentDeletionForm(data=request.POST)\n if form.is_valid():\n DocumentDeletionLog.objects.create(\n locale=document.locale,\n slug=document.slug,\n user=request.user,\n reason=form.cleaned_data[\"reason\"],\n )\n document.delete()\n return redirect(document)\n else:\n\n form = DocumentDeletionForm(initial={\"reason\": request.GET.get(\"reason\", \"\")})\n\n context = {\n \"document\": document,\n \"form\": form,\n \"request\": request,\n \"revision\": first_revision,\n \"prevent\": prevent,\n }\n return render(request, \"wiki/confirm_document_delete.html\", context)\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@permission_required(\"wiki.restore_document\")\n@check_readonly\n@process_document_path\ndef restore_document(request, document_slug, document_locale):\n \"\"\"\n Restore a deleted Document.\n \"\"\"\n document = get_object_or_404(\n Document.deleted_objects.all(), slug=document_slug, locale=document_locale\n )\n document.restore()\n return redirect(document)\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@permission_required(\"wiki.purge_document\")\n@check_readonly\n@process_document_path\ndef purge_document(request, document_slug, document_locale):\n \"\"\"\n Permanently purge a deleted Document.\n \"\"\"\n document = get_object_or_404(\n Document.deleted_objects.all(), slug=document_slug, locale=document_locale\n )\n deletion_log_entries = DocumentDeletionLog.objects.filter(\n locale=document_locale, slug=document_slug\n )\n if deletion_log_entries.exists():\n deletion_log = deletion_log_entries.order_by(\"-pk\")[0]\n else:\n deletion_log = {}\n\n if request.method == \"POST\" and \"confirm\" in request.POST:\n document.purge()\n return redirect(\n reverse(\"wiki.document\", args=(document_slug,), locale=document_locale)\n )\n else:\n return render(\n request,\n \"wiki/confirm_purge.html\",\n {\"document\": document, \"deletion_log\": deletion_log},\n )\n", "path": "kuma/wiki/views/delete.py" } ]
diff --git a/kuma/wiki/views/delete.py b/kuma/wiki/views/delete.py index cd80d192a5a..23ee6197d2c 100644 --- a/kuma/wiki/views/delete.py +++ b/kuma/wiki/views/delete.py @@ -100,7 +100,8 @@ def delete_document(request, document_slug, document_locale): document.delete() return redirect(document) else: - form = DocumentDeletionForm() + + form = DocumentDeletionForm(initial={"reason": request.GET.get("reason", "")}) context = { "document": document,
tornadoweb__tornado-2629
When HttpResponse body is empty it returns a ValueError when it should not In checkin https://github.com/tornadoweb/tornado/pull/2514/commits/7b846ea56bff1892a4d4d05206210b4d234e292b the code for httpclient.HttpResponse.body was changed to throw a ValueError when the body is empty. But since the message body is optional throwing an ValueError seems not right because it is not an error. Can it be reverted back to the 5 behavior of just returning a None when the body is empty.
[ { "content": "\"\"\"Blocking and non-blocking HTTP client interfaces.\n\nThis module defines a common interface shared by two implementations,\n``simple_httpclient`` and ``curl_httpclient``. Applications may either\ninstantiate their chosen implementation class directly or use the\n`AsyncHTTPClient` class from this module, which selects an implementation\nthat can be overridden with the `AsyncHTTPClient.configure` method.\n\nThe default implementation is ``simple_httpclient``, and this is expected\nto be suitable for most users' needs. However, some applications may wish\nto switch to ``curl_httpclient`` for reasons such as the following:\n\n* ``curl_httpclient`` has some features not found in ``simple_httpclient``,\n including support for HTTP proxies and the ability to use a specified\n network interface.\n\n* ``curl_httpclient`` is more likely to be compatible with sites that are\n not-quite-compliant with the HTTP spec, or sites that use little-exercised\n features of HTTP.\n\n* ``curl_httpclient`` is faster.\n\nNote that if you are using ``curl_httpclient``, it is highly\nrecommended that you use a recent version of ``libcurl`` and\n``pycurl``. Currently the minimum supported version of libcurl is\n7.22.0, and the minimum version of pycurl is 7.18.2. It is highly\nrecommended that your ``libcurl`` installation is built with\nasynchronous DNS resolver (threaded or c-ares), otherwise you may\nencounter various problems with request timeouts (for more\ninformation, see\nhttp://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUTMS\nand comments in curl_httpclient.py).\n\nTo select ``curl_httpclient``, call `AsyncHTTPClient.configure` at startup::\n\n AsyncHTTPClient.configure(\"tornado.curl_httpclient.CurlAsyncHTTPClient\")\n\"\"\"\n\nimport datetime\nimport functools\nfrom io import BytesIO\nimport ssl\nimport time\nimport weakref\n\nfrom tornado.concurrent import (\n Future,\n future_set_result_unless_cancelled,\n future_set_exception_unless_cancelled,\n)\nfrom tornado.escape import utf8, native_str\nfrom tornado import gen, httputil\nfrom tornado.ioloop import IOLoop\nfrom tornado.util import Configurable\n\nfrom typing import Type, Any, Union, Dict, Callable, Optional, cast, Awaitable\n\n\nclass HTTPClient(object):\n \"\"\"A blocking HTTP client.\n\n This interface is provided to make it easier to share code between\n synchronous and asynchronous applications. Applications that are\n running an `.IOLoop` must use `AsyncHTTPClient` instead.\n\n Typical usage looks like this::\n\n http_client = httpclient.HTTPClient()\n try:\n response = http_client.fetch(\"http://www.google.com/\")\n print(response.body)\n except httpclient.HTTPError as e:\n # HTTPError is raised for non-200 responses; the response\n # can be found in e.response.\n print(\"Error: \" + str(e))\n except Exception as e:\n # Other errors are possible, such as IOError.\n print(\"Error: \" + str(e))\n http_client.close()\n\n .. versionchanged:: 5.0\n\n Due to limitations in `asyncio`, it is no longer possible to\n use the synchronous ``HTTPClient`` while an `.IOLoop` is running.\n Use `AsyncHTTPClient` instead.\n\n \"\"\"\n\n def __init__(\n self, async_client_class: Type[\"AsyncHTTPClient\"] = None, **kwargs: Any\n ) -> None:\n # Initialize self._closed at the beginning of the constructor\n # so that an exception raised here doesn't lead to confusing\n # failures in __del__.\n self._closed = True\n self._io_loop = IOLoop(make_current=False)\n if async_client_class is None:\n async_client_class = AsyncHTTPClient\n\n # Create the client while our IOLoop is \"current\", without\n # clobbering the thread's real current IOLoop (if any).\n async def make_client() -> \"AsyncHTTPClient\":\n await gen.sleep(0)\n assert async_client_class is not None\n return async_client_class(**kwargs)\n\n self._async_client = self._io_loop.run_sync(make_client)\n self._closed = False\n\n def __del__(self) -> None:\n self.close()\n\n def close(self) -> None:\n \"\"\"Closes the HTTPClient, freeing any resources used.\"\"\"\n if not self._closed:\n self._async_client.close()\n self._io_loop.close()\n self._closed = True\n\n def fetch(\n self, request: Union[\"HTTPRequest\", str], **kwargs: Any\n ) -> \"HTTPResponse\":\n \"\"\"Executes a request, returning an `HTTPResponse`.\n\n The request may be either a string URL or an `HTTPRequest` object.\n If it is a string, we construct an `HTTPRequest` using any additional\n kwargs: ``HTTPRequest(request, **kwargs)``\n\n If an error occurs during the fetch, we raise an `HTTPError` unless\n the ``raise_error`` keyword argument is set to False.\n \"\"\"\n response = self._io_loop.run_sync(\n functools.partial(self._async_client.fetch, request, **kwargs)\n )\n return response\n\n\nclass AsyncHTTPClient(Configurable):\n \"\"\"An non-blocking HTTP client.\n\n Example usage::\n\n async def f():\n http_client = AsyncHTTPClient()\n try:\n response = await http_client.fetch(\"http://www.google.com\")\n except Exception as e:\n print(\"Error: %s\" % e)\n else:\n print(response.body)\n\n The constructor for this class is magic in several respects: It\n actually creates an instance of an implementation-specific\n subclass, and instances are reused as a kind of pseudo-singleton\n (one per `.IOLoop`). The keyword argument ``force_instance=True``\n can be used to suppress this singleton behavior. Unless\n ``force_instance=True`` is used, no arguments should be passed to\n the `AsyncHTTPClient` constructor. The implementation subclass as\n well as arguments to its constructor can be set with the static\n method `configure()`\n\n All `AsyncHTTPClient` implementations support a ``defaults``\n keyword argument, which can be used to set default values for\n `HTTPRequest` attributes. For example::\n\n AsyncHTTPClient.configure(\n None, defaults=dict(user_agent=\"MyUserAgent\"))\n # or with force_instance:\n client = AsyncHTTPClient(force_instance=True,\n defaults=dict(user_agent=\"MyUserAgent\"))\n\n .. versionchanged:: 5.0\n The ``io_loop`` argument (deprecated since version 4.1) has been removed.\n\n \"\"\"\n\n _instance_cache = None # type: Dict[IOLoop, AsyncHTTPClient]\n\n @classmethod\n def configurable_base(cls) -> Type[Configurable]:\n return AsyncHTTPClient\n\n @classmethod\n def configurable_default(cls) -> Type[Configurable]:\n from tornado.simple_httpclient import SimpleAsyncHTTPClient\n\n return SimpleAsyncHTTPClient\n\n @classmethod\n def _async_clients(cls) -> Dict[IOLoop, \"AsyncHTTPClient\"]:\n attr_name = \"_async_client_dict_\" + cls.__name__\n if not hasattr(cls, attr_name):\n setattr(cls, attr_name, weakref.WeakKeyDictionary())\n return getattr(cls, attr_name)\n\n def __new__(cls, force_instance: bool = False, **kwargs: Any) -> \"AsyncHTTPClient\":\n io_loop = IOLoop.current()\n if force_instance:\n instance_cache = None\n else:\n instance_cache = cls._async_clients()\n if instance_cache is not None and io_loop in instance_cache:\n return instance_cache[io_loop]\n instance = super(AsyncHTTPClient, cls).__new__(cls, **kwargs) # type: ignore\n # Make sure the instance knows which cache to remove itself from.\n # It can't simply call _async_clients() because we may be in\n # __new__(AsyncHTTPClient) but instance.__class__ may be\n # SimpleAsyncHTTPClient.\n instance._instance_cache = instance_cache\n if instance_cache is not None:\n instance_cache[instance.io_loop] = instance\n return instance\n\n def initialize(self, defaults: Dict[str, Any] = None) -> None:\n self.io_loop = IOLoop.current()\n self.defaults = dict(HTTPRequest._DEFAULTS)\n if defaults is not None:\n self.defaults.update(defaults)\n self._closed = False\n\n def close(self) -> None:\n \"\"\"Destroys this HTTP client, freeing any file descriptors used.\n\n This method is **not needed in normal use** due to the way\n that `AsyncHTTPClient` objects are transparently reused.\n ``close()`` is generally only necessary when either the\n `.IOLoop` is also being closed, or the ``force_instance=True``\n argument was used when creating the `AsyncHTTPClient`.\n\n No other methods may be called on the `AsyncHTTPClient` after\n ``close()``.\n\n \"\"\"\n if self._closed:\n return\n self._closed = True\n if self._instance_cache is not None:\n cached_val = self._instance_cache.pop(self.io_loop, None)\n # If there's an object other than self in the instance\n # cache for our IOLoop, something has gotten mixed up. A\n # value of None appears to be possible when this is called\n # from a destructor (HTTPClient.__del__) as the weakref\n # gets cleared before the destructor runs.\n if cached_val is not None and cached_val is not self:\n raise RuntimeError(\"inconsistent AsyncHTTPClient cache\")\n\n def fetch(\n self,\n request: Union[str, \"HTTPRequest\"],\n raise_error: bool = True,\n **kwargs: Any\n ) -> Awaitable[\"HTTPResponse\"]:\n \"\"\"Executes a request, asynchronously returning an `HTTPResponse`.\n\n The request may be either a string URL or an `HTTPRequest` object.\n If it is a string, we construct an `HTTPRequest` using any additional\n kwargs: ``HTTPRequest(request, **kwargs)``\n\n This method returns a `.Future` whose result is an\n `HTTPResponse`. By default, the ``Future`` will raise an\n `HTTPError` if the request returned a non-200 response code\n (other errors may also be raised if the server could not be\n contacted). Instead, if ``raise_error`` is set to False, the\n response will always be returned regardless of the response\n code.\n\n If a ``callback`` is given, it will be invoked with the `HTTPResponse`.\n In the callback interface, `HTTPError` is not automatically raised.\n Instead, you must check the response's ``error`` attribute or\n call its `~HTTPResponse.rethrow` method.\n\n .. versionchanged:: 6.0\n\n The ``callback`` argument was removed. Use the returned\n `.Future` instead.\n\n The ``raise_error=False`` argument only affects the\n `HTTPError` raised when a non-200 response code is used,\n instead of suppressing all errors.\n \"\"\"\n if self._closed:\n raise RuntimeError(\"fetch() called on closed AsyncHTTPClient\")\n if not isinstance(request, HTTPRequest):\n request = HTTPRequest(url=request, **kwargs)\n else:\n if kwargs:\n raise ValueError(\n \"kwargs can't be used if request is an HTTPRequest object\"\n )\n # We may modify this (to add Host, Accept-Encoding, etc),\n # so make sure we don't modify the caller's object. This is also\n # where normal dicts get converted to HTTPHeaders objects.\n request.headers = httputil.HTTPHeaders(request.headers)\n request_proxy = _RequestProxy(request, self.defaults)\n future = Future() # type: Future[HTTPResponse]\n\n def handle_response(response: \"HTTPResponse\") -> None:\n if response.error:\n if raise_error or not response._error_is_response_code:\n future_set_exception_unless_cancelled(future, response.error)\n return\n future_set_result_unless_cancelled(future, response)\n\n self.fetch_impl(cast(HTTPRequest, request_proxy), handle_response)\n return future\n\n def fetch_impl(\n self, request: \"HTTPRequest\", callback: Callable[[\"HTTPResponse\"], None]\n ) -> None:\n raise NotImplementedError()\n\n @classmethod\n def configure(\n cls, impl: \"Union[None, str, Type[Configurable]]\", **kwargs: Any\n ) -> None:\n \"\"\"Configures the `AsyncHTTPClient` subclass to use.\n\n ``AsyncHTTPClient()`` actually creates an instance of a subclass.\n This method may be called with either a class object or the\n fully-qualified name of such a class (or ``None`` to use the default,\n ``SimpleAsyncHTTPClient``)\n\n If additional keyword arguments are given, they will be passed\n to the constructor of each subclass instance created. The\n keyword argument ``max_clients`` determines the maximum number\n of simultaneous `~AsyncHTTPClient.fetch()` operations that can\n execute in parallel on each `.IOLoop`. Additional arguments\n may be supported depending on the implementation class in use.\n\n Example::\n\n AsyncHTTPClient.configure(\"tornado.curl_httpclient.CurlAsyncHTTPClient\")\n \"\"\"\n super(AsyncHTTPClient, cls).configure(impl, **kwargs)\n\n\nclass HTTPRequest(object):\n \"\"\"HTTP client request object.\"\"\"\n\n _headers = None # type: Union[Dict[str, str], httputil.HTTPHeaders]\n\n # Default values for HTTPRequest parameters.\n # Merged with the values on the request object by AsyncHTTPClient\n # implementations.\n _DEFAULTS = dict(\n connect_timeout=20.0,\n request_timeout=20.0,\n follow_redirects=True,\n max_redirects=5,\n decompress_response=True,\n proxy_password=\"\",\n allow_nonstandard_methods=False,\n validate_cert=True,\n )\n\n def __init__(\n self,\n url: str,\n method: str = \"GET\",\n headers: Union[Dict[str, str], httputil.HTTPHeaders] = None,\n body: Union[bytes, str] = None,\n auth_username: str = None,\n auth_password: str = None,\n auth_mode: str = None,\n connect_timeout: float = None,\n request_timeout: float = None,\n if_modified_since: Union[float, datetime.datetime] = None,\n follow_redirects: bool = None,\n max_redirects: int = None,\n user_agent: str = None,\n use_gzip: bool = None,\n network_interface: str = None,\n streaming_callback: Callable[[bytes], None] = None,\n header_callback: Callable[[str], None] = None,\n prepare_curl_callback: Callable[[Any], None] = None,\n proxy_host: str = None,\n proxy_port: int = None,\n proxy_username: str = None,\n proxy_password: str = None,\n proxy_auth_mode: str = None,\n allow_nonstandard_methods: bool = None,\n validate_cert: bool = None,\n ca_certs: str = None,\n allow_ipv6: bool = None,\n client_key: str = None,\n client_cert: str = None,\n body_producer: Callable[[Callable[[bytes], None]], \"Future[None]\"] = None,\n expect_100_continue: bool = False,\n decompress_response: bool = None,\n ssl_options: Union[Dict[str, Any], ssl.SSLContext] = None,\n ) -> None:\n r\"\"\"All parameters except ``url`` are optional.\n\n :arg str url: URL to fetch\n :arg str method: HTTP method, e.g. \"GET\" or \"POST\"\n :arg headers: Additional HTTP headers to pass on the request\n :type headers: `~tornado.httputil.HTTPHeaders` or `dict`\n :arg body: HTTP request body as a string (byte or unicode; if unicode\n the utf-8 encoding will be used)\n :arg body_producer: Callable used for lazy/asynchronous request bodies.\n It is called with one argument, a ``write`` function, and should\n return a `.Future`. It should call the write function with new\n data as it becomes available. The write function returns a\n `.Future` which can be used for flow control.\n Only one of ``body`` and ``body_producer`` may\n be specified. ``body_producer`` is not supported on\n ``curl_httpclient``. When using ``body_producer`` it is recommended\n to pass a ``Content-Length`` in the headers as otherwise chunked\n encoding will be used, and many servers do not support chunked\n encoding on requests. New in Tornado 4.0\n :arg str auth_username: Username for HTTP authentication\n :arg str auth_password: Password for HTTP authentication\n :arg str auth_mode: Authentication mode; default is \"basic\".\n Allowed values are implementation-defined; ``curl_httpclient``\n supports \"basic\" and \"digest\"; ``simple_httpclient`` only supports\n \"basic\"\n :arg float connect_timeout: Timeout for initial connection in seconds,\n default 20 seconds\n :arg float request_timeout: Timeout for entire request in seconds,\n default 20 seconds\n :arg if_modified_since: Timestamp for ``If-Modified-Since`` header\n :type if_modified_since: `datetime` or `float`\n :arg bool follow_redirects: Should redirects be followed automatically\n or return the 3xx response? Default True.\n :arg int max_redirects: Limit for ``follow_redirects``, default 5.\n :arg str user_agent: String to send as ``User-Agent`` header\n :arg bool decompress_response: Request a compressed response from\n the server and decompress it after downloading. Default is True.\n New in Tornado 4.0.\n :arg bool use_gzip: Deprecated alias for ``decompress_response``\n since Tornado 4.0.\n :arg str network_interface: Network interface or source IP to use for request.\n See ``curl_httpclient`` note below.\n :arg collections.abc.Callable streaming_callback: If set, ``streaming_callback`` will\n be run with each chunk of data as it is received, and\n ``HTTPResponse.body`` and ``HTTPResponse.buffer`` will be empty in\n the final response.\n :arg collections.abc.Callable header_callback: If set, ``header_callback`` will\n be run with each header line as it is received (including the\n first line, e.g. ``HTTP/1.0 200 OK\\r\\n``, and a final line\n containing only ``\\r\\n``. All lines include the trailing newline\n characters). ``HTTPResponse.headers`` will be empty in the final\n response. This is most useful in conjunction with\n ``streaming_callback``, because it's the only way to get access to\n header data while the request is in progress.\n :arg collections.abc.Callable prepare_curl_callback: If set, will be called with\n a ``pycurl.Curl`` object to allow the application to make additional\n ``setopt`` calls.\n :arg str proxy_host: HTTP proxy hostname. To use proxies,\n ``proxy_host`` and ``proxy_port`` must be set; ``proxy_username``,\n ``proxy_pass`` and ``proxy_auth_mode`` are optional. Proxies are\n currently only supported with ``curl_httpclient``.\n :arg int proxy_port: HTTP proxy port\n :arg str proxy_username: HTTP proxy username\n :arg str proxy_password: HTTP proxy password\n :arg str proxy_auth_mode: HTTP proxy Authentication mode;\n default is \"basic\". supports \"basic\" and \"digest\"\n :arg bool allow_nonstandard_methods: Allow unknown values for ``method``\n argument? Default is False.\n :arg bool validate_cert: For HTTPS requests, validate the server's\n certificate? Default is True.\n :arg str ca_certs: filename of CA certificates in PEM format,\n or None to use defaults. See note below when used with\n ``curl_httpclient``.\n :arg str client_key: Filename for client SSL key, if any. See\n note below when used with ``curl_httpclient``.\n :arg str client_cert: Filename for client SSL certificate, if any.\n See note below when used with ``curl_httpclient``.\n :arg ssl.SSLContext ssl_options: `ssl.SSLContext` object for use in\n ``simple_httpclient`` (unsupported by ``curl_httpclient``).\n Overrides ``validate_cert``, ``ca_certs``, ``client_key``,\n and ``client_cert``.\n :arg bool allow_ipv6: Use IPv6 when available? Default is True.\n :arg bool expect_100_continue: If true, send the\n ``Expect: 100-continue`` header and wait for a continue response\n before sending the request body. Only supported with\n ``simple_httpclient``.\n\n .. note::\n\n When using ``curl_httpclient`` certain options may be\n inherited by subsequent fetches because ``pycurl`` does\n not allow them to be cleanly reset. This applies to the\n ``ca_certs``, ``client_key``, ``client_cert``, and\n ``network_interface`` arguments. If you use these\n options, you should pass them on every request (you don't\n have to always use the same values, but it's not possible\n to mix requests that specify these options with ones that\n use the defaults).\n\n .. versionadded:: 3.1\n The ``auth_mode`` argument.\n\n .. versionadded:: 4.0\n The ``body_producer`` and ``expect_100_continue`` arguments.\n\n .. versionadded:: 4.2\n The ``ssl_options`` argument.\n\n .. versionadded:: 4.5\n The ``proxy_auth_mode`` argument.\n \"\"\"\n # Note that some of these attributes go through property setters\n # defined below.\n self.headers = headers\n if if_modified_since:\n self.headers[\"If-Modified-Since\"] = httputil.format_timestamp(\n if_modified_since\n )\n self.proxy_host = proxy_host\n self.proxy_port = proxy_port\n self.proxy_username = proxy_username\n self.proxy_password = proxy_password\n self.proxy_auth_mode = proxy_auth_mode\n self.url = url\n self.method = method\n self.body = body\n self.body_producer = body_producer\n self.auth_username = auth_username\n self.auth_password = auth_password\n self.auth_mode = auth_mode\n self.connect_timeout = connect_timeout\n self.request_timeout = request_timeout\n self.follow_redirects = follow_redirects\n self.max_redirects = max_redirects\n self.user_agent = user_agent\n if decompress_response is not None:\n self.decompress_response = decompress_response # type: Optional[bool]\n else:\n self.decompress_response = use_gzip\n self.network_interface = network_interface\n self.streaming_callback = streaming_callback\n self.header_callback = header_callback\n self.prepare_curl_callback = prepare_curl_callback\n self.allow_nonstandard_methods = allow_nonstandard_methods\n self.validate_cert = validate_cert\n self.ca_certs = ca_certs\n self.allow_ipv6 = allow_ipv6\n self.client_key = client_key\n self.client_cert = client_cert\n self.ssl_options = ssl_options\n self.expect_100_continue = expect_100_continue\n self.start_time = time.time()\n\n @property\n def headers(self) -> httputil.HTTPHeaders:\n # TODO: headers may actually be a plain dict until fairly late in\n # the process (AsyncHTTPClient.fetch), but practically speaking,\n # whenever the property is used they're already HTTPHeaders.\n return self._headers # type: ignore\n\n @headers.setter\n def headers(self, value: Union[Dict[str, str], httputil.HTTPHeaders]) -> None:\n if value is None:\n self._headers = httputil.HTTPHeaders()\n else:\n self._headers = value # type: ignore\n\n @property\n def body(self) -> bytes:\n return self._body\n\n @body.setter\n def body(self, value: Union[bytes, str]) -> None:\n self._body = utf8(value)\n\n\nclass HTTPResponse(object):\n \"\"\"HTTP Response object.\n\n Attributes:\n\n * ``request``: HTTPRequest object\n\n * ``code``: numeric HTTP status code, e.g. 200 or 404\n\n * ``reason``: human-readable reason phrase describing the status code\n\n * ``headers``: `tornado.httputil.HTTPHeaders` object\n\n * ``effective_url``: final location of the resource after following any\n redirects\n\n * ``buffer``: ``cStringIO`` object for response body\n\n * ``body``: response body as bytes (created on demand from ``self.buffer``)\n\n * ``error``: Exception object, if any\n\n * ``request_time``: seconds from request start to finish. Includes all\n network operations from DNS resolution to receiving the last byte of\n data. Does not include time spent in the queue (due to the\n ``max_clients`` option). If redirects were followed, only includes\n the final request.\n\n * ``start_time``: Time at which the HTTP operation started, based on\n `time.time` (not the monotonic clock used by `.IOLoop.time`). May\n be ``None`` if the request timed out while in the queue.\n\n * ``time_info``: dictionary of diagnostic timing information from the\n request. Available data are subject to change, but currently uses timings\n available from http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html,\n plus ``queue``, which is the delay (if any) introduced by waiting for\n a slot under `AsyncHTTPClient`'s ``max_clients`` setting.\n\n .. versionadded:: 5.1\n\n Added the ``start_time`` attribute.\n\n .. versionchanged:: 5.1\n\n The ``request_time`` attribute previously included time spent in the queue\n for ``simple_httpclient``, but not in ``curl_httpclient``. Now queueing time\n is excluded in both implementations. ``request_time`` is now more accurate for\n ``curl_httpclient`` because it uses a monotonic clock when available.\n \"\"\"\n\n # I'm not sure why these don't get type-inferred from the references in __init__.\n error = None # type: Optional[BaseException]\n _error_is_response_code = False\n request = None # type: HTTPRequest\n\n def __init__(\n self,\n request: HTTPRequest,\n code: int,\n headers: httputil.HTTPHeaders = None,\n buffer: BytesIO = None,\n effective_url: str = None,\n error: BaseException = None,\n request_time: float = None,\n time_info: Dict[str, float] = None,\n reason: str = None,\n start_time: float = None,\n ) -> None:\n if isinstance(request, _RequestProxy):\n self.request = request.request\n else:\n self.request = request\n self.code = code\n self.reason = reason or httputil.responses.get(code, \"Unknown\")\n if headers is not None:\n self.headers = headers\n else:\n self.headers = httputil.HTTPHeaders()\n self.buffer = buffer\n self._body = None # type: Optional[bytes]\n if effective_url is None:\n self.effective_url = request.url\n else:\n self.effective_url = effective_url\n self._error_is_response_code = False\n if error is None:\n if self.code < 200 or self.code >= 300:\n self._error_is_response_code = True\n self.error = HTTPError(self.code, message=self.reason, response=self)\n else:\n self.error = None\n else:\n self.error = error\n self.start_time = start_time\n self.request_time = request_time\n self.time_info = time_info or {}\n\n @property\n def body(self) -> bytes:\n if self.buffer is None:\n raise ValueError(\"body not set\")\n elif self._body is None:\n self._body = self.buffer.getvalue()\n\n return self._body\n\n def rethrow(self) -> None:\n \"\"\"If there was an error on the request, raise an `HTTPError`.\"\"\"\n if self.error:\n raise self.error\n\n def __repr__(self) -> str:\n args = \",\".join(\"%s=%r\" % i for i in sorted(self.__dict__.items()))\n return \"%s(%s)\" % (self.__class__.__name__, args)\n\n\nclass HTTPClientError(Exception):\n \"\"\"Exception thrown for an unsuccessful HTTP request.\n\n Attributes:\n\n * ``code`` - HTTP error integer error code, e.g. 404. Error code 599 is\n used when no HTTP response was received, e.g. for a timeout.\n\n * ``response`` - `HTTPResponse` object, if any.\n\n Note that if ``follow_redirects`` is False, redirects become HTTPErrors,\n and you can look at ``error.response.headers['Location']`` to see the\n destination of the redirect.\n\n .. versionchanged:: 5.1\n\n Renamed from ``HTTPError`` to ``HTTPClientError`` to avoid collisions with\n `tornado.web.HTTPError`. The name ``tornado.httpclient.HTTPError`` remains\n as an alias.\n \"\"\"\n\n def __init__(\n self, code: int, message: str = None, response: HTTPResponse = None\n ) -> None:\n self.code = code\n self.message = message or httputil.responses.get(code, \"Unknown\")\n self.response = response\n super(HTTPClientError, self).__init__(code, message, response)\n\n def __str__(self) -> str:\n return \"HTTP %d: %s\" % (self.code, self.message)\n\n # There is a cyclic reference between self and self.response,\n # which breaks the default __repr__ implementation.\n # (especially on pypy, which doesn't have the same recursion\n # detection as cpython).\n __repr__ = __str__\n\n\nHTTPError = HTTPClientError\n\n\nclass _RequestProxy(object):\n \"\"\"Combines an object with a dictionary of defaults.\n\n Used internally by AsyncHTTPClient implementations.\n \"\"\"\n\n def __init__(\n self, request: HTTPRequest, defaults: Optional[Dict[str, Any]]\n ) -> None:\n self.request = request\n self.defaults = defaults\n\n def __getattr__(self, name: str) -> Any:\n request_attr = getattr(self.request, name)\n if request_attr is not None:\n return request_attr\n elif self.defaults is not None:\n return self.defaults.get(name, None)\n else:\n return None\n\n\ndef main() -> None:\n from tornado.options import define, options, parse_command_line\n\n define(\"print_headers\", type=bool, default=False)\n define(\"print_body\", type=bool, default=True)\n define(\"follow_redirects\", type=bool, default=True)\n define(\"validate_cert\", type=bool, default=True)\n define(\"proxy_host\", type=str)\n define(\"proxy_port\", type=int)\n args = parse_command_line()\n client = HTTPClient()\n for arg in args:\n try:\n response = client.fetch(\n arg,\n follow_redirects=options.follow_redirects,\n validate_cert=options.validate_cert,\n proxy_host=options.proxy_host,\n proxy_port=options.proxy_port,\n )\n except HTTPError as e:\n if e.response is not None:\n response = e.response\n else:\n raise\n if options.print_headers:\n print(response.headers)\n if options.print_body:\n print(native_str(response.body))\n client.close()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "tornado/httpclient.py" } ]
[ { "content": "\"\"\"Blocking and non-blocking HTTP client interfaces.\n\nThis module defines a common interface shared by two implementations,\n``simple_httpclient`` and ``curl_httpclient``. Applications may either\ninstantiate their chosen implementation class directly or use the\n`AsyncHTTPClient` class from this module, which selects an implementation\nthat can be overridden with the `AsyncHTTPClient.configure` method.\n\nThe default implementation is ``simple_httpclient``, and this is expected\nto be suitable for most users' needs. However, some applications may wish\nto switch to ``curl_httpclient`` for reasons such as the following:\n\n* ``curl_httpclient`` has some features not found in ``simple_httpclient``,\n including support for HTTP proxies and the ability to use a specified\n network interface.\n\n* ``curl_httpclient`` is more likely to be compatible with sites that are\n not-quite-compliant with the HTTP spec, or sites that use little-exercised\n features of HTTP.\n\n* ``curl_httpclient`` is faster.\n\nNote that if you are using ``curl_httpclient``, it is highly\nrecommended that you use a recent version of ``libcurl`` and\n``pycurl``. Currently the minimum supported version of libcurl is\n7.22.0, and the minimum version of pycurl is 7.18.2. It is highly\nrecommended that your ``libcurl`` installation is built with\nasynchronous DNS resolver (threaded or c-ares), otherwise you may\nencounter various problems with request timeouts (for more\ninformation, see\nhttp://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUTMS\nand comments in curl_httpclient.py).\n\nTo select ``curl_httpclient``, call `AsyncHTTPClient.configure` at startup::\n\n AsyncHTTPClient.configure(\"tornado.curl_httpclient.CurlAsyncHTTPClient\")\n\"\"\"\n\nimport datetime\nimport functools\nfrom io import BytesIO\nimport ssl\nimport time\nimport weakref\n\nfrom tornado.concurrent import (\n Future,\n future_set_result_unless_cancelled,\n future_set_exception_unless_cancelled,\n)\nfrom tornado.escape import utf8, native_str\nfrom tornado import gen, httputil\nfrom tornado.ioloop import IOLoop\nfrom tornado.util import Configurable\n\nfrom typing import Type, Any, Union, Dict, Callable, Optional, cast, Awaitable\n\n\nclass HTTPClient(object):\n \"\"\"A blocking HTTP client.\n\n This interface is provided to make it easier to share code between\n synchronous and asynchronous applications. Applications that are\n running an `.IOLoop` must use `AsyncHTTPClient` instead.\n\n Typical usage looks like this::\n\n http_client = httpclient.HTTPClient()\n try:\n response = http_client.fetch(\"http://www.google.com/\")\n print(response.body)\n except httpclient.HTTPError as e:\n # HTTPError is raised for non-200 responses; the response\n # can be found in e.response.\n print(\"Error: \" + str(e))\n except Exception as e:\n # Other errors are possible, such as IOError.\n print(\"Error: \" + str(e))\n http_client.close()\n\n .. versionchanged:: 5.0\n\n Due to limitations in `asyncio`, it is no longer possible to\n use the synchronous ``HTTPClient`` while an `.IOLoop` is running.\n Use `AsyncHTTPClient` instead.\n\n \"\"\"\n\n def __init__(\n self, async_client_class: Type[\"AsyncHTTPClient\"] = None, **kwargs: Any\n ) -> None:\n # Initialize self._closed at the beginning of the constructor\n # so that an exception raised here doesn't lead to confusing\n # failures in __del__.\n self._closed = True\n self._io_loop = IOLoop(make_current=False)\n if async_client_class is None:\n async_client_class = AsyncHTTPClient\n\n # Create the client while our IOLoop is \"current\", without\n # clobbering the thread's real current IOLoop (if any).\n async def make_client() -> \"AsyncHTTPClient\":\n await gen.sleep(0)\n assert async_client_class is not None\n return async_client_class(**kwargs)\n\n self._async_client = self._io_loop.run_sync(make_client)\n self._closed = False\n\n def __del__(self) -> None:\n self.close()\n\n def close(self) -> None:\n \"\"\"Closes the HTTPClient, freeing any resources used.\"\"\"\n if not self._closed:\n self._async_client.close()\n self._io_loop.close()\n self._closed = True\n\n def fetch(\n self, request: Union[\"HTTPRequest\", str], **kwargs: Any\n ) -> \"HTTPResponse\":\n \"\"\"Executes a request, returning an `HTTPResponse`.\n\n The request may be either a string URL or an `HTTPRequest` object.\n If it is a string, we construct an `HTTPRequest` using any additional\n kwargs: ``HTTPRequest(request, **kwargs)``\n\n If an error occurs during the fetch, we raise an `HTTPError` unless\n the ``raise_error`` keyword argument is set to False.\n \"\"\"\n response = self._io_loop.run_sync(\n functools.partial(self._async_client.fetch, request, **kwargs)\n )\n return response\n\n\nclass AsyncHTTPClient(Configurable):\n \"\"\"An non-blocking HTTP client.\n\n Example usage::\n\n async def f():\n http_client = AsyncHTTPClient()\n try:\n response = await http_client.fetch(\"http://www.google.com\")\n except Exception as e:\n print(\"Error: %s\" % e)\n else:\n print(response.body)\n\n The constructor for this class is magic in several respects: It\n actually creates an instance of an implementation-specific\n subclass, and instances are reused as a kind of pseudo-singleton\n (one per `.IOLoop`). The keyword argument ``force_instance=True``\n can be used to suppress this singleton behavior. Unless\n ``force_instance=True`` is used, no arguments should be passed to\n the `AsyncHTTPClient` constructor. The implementation subclass as\n well as arguments to its constructor can be set with the static\n method `configure()`\n\n All `AsyncHTTPClient` implementations support a ``defaults``\n keyword argument, which can be used to set default values for\n `HTTPRequest` attributes. For example::\n\n AsyncHTTPClient.configure(\n None, defaults=dict(user_agent=\"MyUserAgent\"))\n # or with force_instance:\n client = AsyncHTTPClient(force_instance=True,\n defaults=dict(user_agent=\"MyUserAgent\"))\n\n .. versionchanged:: 5.0\n The ``io_loop`` argument (deprecated since version 4.1) has been removed.\n\n \"\"\"\n\n _instance_cache = None # type: Dict[IOLoop, AsyncHTTPClient]\n\n @classmethod\n def configurable_base(cls) -> Type[Configurable]:\n return AsyncHTTPClient\n\n @classmethod\n def configurable_default(cls) -> Type[Configurable]:\n from tornado.simple_httpclient import SimpleAsyncHTTPClient\n\n return SimpleAsyncHTTPClient\n\n @classmethod\n def _async_clients(cls) -> Dict[IOLoop, \"AsyncHTTPClient\"]:\n attr_name = \"_async_client_dict_\" + cls.__name__\n if not hasattr(cls, attr_name):\n setattr(cls, attr_name, weakref.WeakKeyDictionary())\n return getattr(cls, attr_name)\n\n def __new__(cls, force_instance: bool = False, **kwargs: Any) -> \"AsyncHTTPClient\":\n io_loop = IOLoop.current()\n if force_instance:\n instance_cache = None\n else:\n instance_cache = cls._async_clients()\n if instance_cache is not None and io_loop in instance_cache:\n return instance_cache[io_loop]\n instance = super(AsyncHTTPClient, cls).__new__(cls, **kwargs) # type: ignore\n # Make sure the instance knows which cache to remove itself from.\n # It can't simply call _async_clients() because we may be in\n # __new__(AsyncHTTPClient) but instance.__class__ may be\n # SimpleAsyncHTTPClient.\n instance._instance_cache = instance_cache\n if instance_cache is not None:\n instance_cache[instance.io_loop] = instance\n return instance\n\n def initialize(self, defaults: Dict[str, Any] = None) -> None:\n self.io_loop = IOLoop.current()\n self.defaults = dict(HTTPRequest._DEFAULTS)\n if defaults is not None:\n self.defaults.update(defaults)\n self._closed = False\n\n def close(self) -> None:\n \"\"\"Destroys this HTTP client, freeing any file descriptors used.\n\n This method is **not needed in normal use** due to the way\n that `AsyncHTTPClient` objects are transparently reused.\n ``close()`` is generally only necessary when either the\n `.IOLoop` is also being closed, or the ``force_instance=True``\n argument was used when creating the `AsyncHTTPClient`.\n\n No other methods may be called on the `AsyncHTTPClient` after\n ``close()``.\n\n \"\"\"\n if self._closed:\n return\n self._closed = True\n if self._instance_cache is not None:\n cached_val = self._instance_cache.pop(self.io_loop, None)\n # If there's an object other than self in the instance\n # cache for our IOLoop, something has gotten mixed up. A\n # value of None appears to be possible when this is called\n # from a destructor (HTTPClient.__del__) as the weakref\n # gets cleared before the destructor runs.\n if cached_val is not None and cached_val is not self:\n raise RuntimeError(\"inconsistent AsyncHTTPClient cache\")\n\n def fetch(\n self,\n request: Union[str, \"HTTPRequest\"],\n raise_error: bool = True,\n **kwargs: Any\n ) -> Awaitable[\"HTTPResponse\"]:\n \"\"\"Executes a request, asynchronously returning an `HTTPResponse`.\n\n The request may be either a string URL or an `HTTPRequest` object.\n If it is a string, we construct an `HTTPRequest` using any additional\n kwargs: ``HTTPRequest(request, **kwargs)``\n\n This method returns a `.Future` whose result is an\n `HTTPResponse`. By default, the ``Future`` will raise an\n `HTTPError` if the request returned a non-200 response code\n (other errors may also be raised if the server could not be\n contacted). Instead, if ``raise_error`` is set to False, the\n response will always be returned regardless of the response\n code.\n\n If a ``callback`` is given, it will be invoked with the `HTTPResponse`.\n In the callback interface, `HTTPError` is not automatically raised.\n Instead, you must check the response's ``error`` attribute or\n call its `~HTTPResponse.rethrow` method.\n\n .. versionchanged:: 6.0\n\n The ``callback`` argument was removed. Use the returned\n `.Future` instead.\n\n The ``raise_error=False`` argument only affects the\n `HTTPError` raised when a non-200 response code is used,\n instead of suppressing all errors.\n \"\"\"\n if self._closed:\n raise RuntimeError(\"fetch() called on closed AsyncHTTPClient\")\n if not isinstance(request, HTTPRequest):\n request = HTTPRequest(url=request, **kwargs)\n else:\n if kwargs:\n raise ValueError(\n \"kwargs can't be used if request is an HTTPRequest object\"\n )\n # We may modify this (to add Host, Accept-Encoding, etc),\n # so make sure we don't modify the caller's object. This is also\n # where normal dicts get converted to HTTPHeaders objects.\n request.headers = httputil.HTTPHeaders(request.headers)\n request_proxy = _RequestProxy(request, self.defaults)\n future = Future() # type: Future[HTTPResponse]\n\n def handle_response(response: \"HTTPResponse\") -> None:\n if response.error:\n if raise_error or not response._error_is_response_code:\n future_set_exception_unless_cancelled(future, response.error)\n return\n future_set_result_unless_cancelled(future, response)\n\n self.fetch_impl(cast(HTTPRequest, request_proxy), handle_response)\n return future\n\n def fetch_impl(\n self, request: \"HTTPRequest\", callback: Callable[[\"HTTPResponse\"], None]\n ) -> None:\n raise NotImplementedError()\n\n @classmethod\n def configure(\n cls, impl: \"Union[None, str, Type[Configurable]]\", **kwargs: Any\n ) -> None:\n \"\"\"Configures the `AsyncHTTPClient` subclass to use.\n\n ``AsyncHTTPClient()`` actually creates an instance of a subclass.\n This method may be called with either a class object or the\n fully-qualified name of such a class (or ``None`` to use the default,\n ``SimpleAsyncHTTPClient``)\n\n If additional keyword arguments are given, they will be passed\n to the constructor of each subclass instance created. The\n keyword argument ``max_clients`` determines the maximum number\n of simultaneous `~AsyncHTTPClient.fetch()` operations that can\n execute in parallel on each `.IOLoop`. Additional arguments\n may be supported depending on the implementation class in use.\n\n Example::\n\n AsyncHTTPClient.configure(\"tornado.curl_httpclient.CurlAsyncHTTPClient\")\n \"\"\"\n super(AsyncHTTPClient, cls).configure(impl, **kwargs)\n\n\nclass HTTPRequest(object):\n \"\"\"HTTP client request object.\"\"\"\n\n _headers = None # type: Union[Dict[str, str], httputil.HTTPHeaders]\n\n # Default values for HTTPRequest parameters.\n # Merged with the values on the request object by AsyncHTTPClient\n # implementations.\n _DEFAULTS = dict(\n connect_timeout=20.0,\n request_timeout=20.0,\n follow_redirects=True,\n max_redirects=5,\n decompress_response=True,\n proxy_password=\"\",\n allow_nonstandard_methods=False,\n validate_cert=True,\n )\n\n def __init__(\n self,\n url: str,\n method: str = \"GET\",\n headers: Union[Dict[str, str], httputil.HTTPHeaders] = None,\n body: Union[bytes, str] = None,\n auth_username: str = None,\n auth_password: str = None,\n auth_mode: str = None,\n connect_timeout: float = None,\n request_timeout: float = None,\n if_modified_since: Union[float, datetime.datetime] = None,\n follow_redirects: bool = None,\n max_redirects: int = None,\n user_agent: str = None,\n use_gzip: bool = None,\n network_interface: str = None,\n streaming_callback: Callable[[bytes], None] = None,\n header_callback: Callable[[str], None] = None,\n prepare_curl_callback: Callable[[Any], None] = None,\n proxy_host: str = None,\n proxy_port: int = None,\n proxy_username: str = None,\n proxy_password: str = None,\n proxy_auth_mode: str = None,\n allow_nonstandard_methods: bool = None,\n validate_cert: bool = None,\n ca_certs: str = None,\n allow_ipv6: bool = None,\n client_key: str = None,\n client_cert: str = None,\n body_producer: Callable[[Callable[[bytes], None]], \"Future[None]\"] = None,\n expect_100_continue: bool = False,\n decompress_response: bool = None,\n ssl_options: Union[Dict[str, Any], ssl.SSLContext] = None,\n ) -> None:\n r\"\"\"All parameters except ``url`` are optional.\n\n :arg str url: URL to fetch\n :arg str method: HTTP method, e.g. \"GET\" or \"POST\"\n :arg headers: Additional HTTP headers to pass on the request\n :type headers: `~tornado.httputil.HTTPHeaders` or `dict`\n :arg body: HTTP request body as a string (byte or unicode; if unicode\n the utf-8 encoding will be used)\n :arg body_producer: Callable used for lazy/asynchronous request bodies.\n It is called with one argument, a ``write`` function, and should\n return a `.Future`. It should call the write function with new\n data as it becomes available. The write function returns a\n `.Future` which can be used for flow control.\n Only one of ``body`` and ``body_producer`` may\n be specified. ``body_producer`` is not supported on\n ``curl_httpclient``. When using ``body_producer`` it is recommended\n to pass a ``Content-Length`` in the headers as otherwise chunked\n encoding will be used, and many servers do not support chunked\n encoding on requests. New in Tornado 4.0\n :arg str auth_username: Username for HTTP authentication\n :arg str auth_password: Password for HTTP authentication\n :arg str auth_mode: Authentication mode; default is \"basic\".\n Allowed values are implementation-defined; ``curl_httpclient``\n supports \"basic\" and \"digest\"; ``simple_httpclient`` only supports\n \"basic\"\n :arg float connect_timeout: Timeout for initial connection in seconds,\n default 20 seconds\n :arg float request_timeout: Timeout for entire request in seconds,\n default 20 seconds\n :arg if_modified_since: Timestamp for ``If-Modified-Since`` header\n :type if_modified_since: `datetime` or `float`\n :arg bool follow_redirects: Should redirects be followed automatically\n or return the 3xx response? Default True.\n :arg int max_redirects: Limit for ``follow_redirects``, default 5.\n :arg str user_agent: String to send as ``User-Agent`` header\n :arg bool decompress_response: Request a compressed response from\n the server and decompress it after downloading. Default is True.\n New in Tornado 4.0.\n :arg bool use_gzip: Deprecated alias for ``decompress_response``\n since Tornado 4.0.\n :arg str network_interface: Network interface or source IP to use for request.\n See ``curl_httpclient`` note below.\n :arg collections.abc.Callable streaming_callback: If set, ``streaming_callback`` will\n be run with each chunk of data as it is received, and\n ``HTTPResponse.body`` and ``HTTPResponse.buffer`` will be empty in\n the final response.\n :arg collections.abc.Callable header_callback: If set, ``header_callback`` will\n be run with each header line as it is received (including the\n first line, e.g. ``HTTP/1.0 200 OK\\r\\n``, and a final line\n containing only ``\\r\\n``. All lines include the trailing newline\n characters). ``HTTPResponse.headers`` will be empty in the final\n response. This is most useful in conjunction with\n ``streaming_callback``, because it's the only way to get access to\n header data while the request is in progress.\n :arg collections.abc.Callable prepare_curl_callback: If set, will be called with\n a ``pycurl.Curl`` object to allow the application to make additional\n ``setopt`` calls.\n :arg str proxy_host: HTTP proxy hostname. To use proxies,\n ``proxy_host`` and ``proxy_port`` must be set; ``proxy_username``,\n ``proxy_pass`` and ``proxy_auth_mode`` are optional. Proxies are\n currently only supported with ``curl_httpclient``.\n :arg int proxy_port: HTTP proxy port\n :arg str proxy_username: HTTP proxy username\n :arg str proxy_password: HTTP proxy password\n :arg str proxy_auth_mode: HTTP proxy Authentication mode;\n default is \"basic\". supports \"basic\" and \"digest\"\n :arg bool allow_nonstandard_methods: Allow unknown values for ``method``\n argument? Default is False.\n :arg bool validate_cert: For HTTPS requests, validate the server's\n certificate? Default is True.\n :arg str ca_certs: filename of CA certificates in PEM format,\n or None to use defaults. See note below when used with\n ``curl_httpclient``.\n :arg str client_key: Filename for client SSL key, if any. See\n note below when used with ``curl_httpclient``.\n :arg str client_cert: Filename for client SSL certificate, if any.\n See note below when used with ``curl_httpclient``.\n :arg ssl.SSLContext ssl_options: `ssl.SSLContext` object for use in\n ``simple_httpclient`` (unsupported by ``curl_httpclient``).\n Overrides ``validate_cert``, ``ca_certs``, ``client_key``,\n and ``client_cert``.\n :arg bool allow_ipv6: Use IPv6 when available? Default is True.\n :arg bool expect_100_continue: If true, send the\n ``Expect: 100-continue`` header and wait for a continue response\n before sending the request body. Only supported with\n ``simple_httpclient``.\n\n .. note::\n\n When using ``curl_httpclient`` certain options may be\n inherited by subsequent fetches because ``pycurl`` does\n not allow them to be cleanly reset. This applies to the\n ``ca_certs``, ``client_key``, ``client_cert``, and\n ``network_interface`` arguments. If you use these\n options, you should pass them on every request (you don't\n have to always use the same values, but it's not possible\n to mix requests that specify these options with ones that\n use the defaults).\n\n .. versionadded:: 3.1\n The ``auth_mode`` argument.\n\n .. versionadded:: 4.0\n The ``body_producer`` and ``expect_100_continue`` arguments.\n\n .. versionadded:: 4.2\n The ``ssl_options`` argument.\n\n .. versionadded:: 4.5\n The ``proxy_auth_mode`` argument.\n \"\"\"\n # Note that some of these attributes go through property setters\n # defined below.\n self.headers = headers\n if if_modified_since:\n self.headers[\"If-Modified-Since\"] = httputil.format_timestamp(\n if_modified_since\n )\n self.proxy_host = proxy_host\n self.proxy_port = proxy_port\n self.proxy_username = proxy_username\n self.proxy_password = proxy_password\n self.proxy_auth_mode = proxy_auth_mode\n self.url = url\n self.method = method\n self.body = body\n self.body_producer = body_producer\n self.auth_username = auth_username\n self.auth_password = auth_password\n self.auth_mode = auth_mode\n self.connect_timeout = connect_timeout\n self.request_timeout = request_timeout\n self.follow_redirects = follow_redirects\n self.max_redirects = max_redirects\n self.user_agent = user_agent\n if decompress_response is not None:\n self.decompress_response = decompress_response # type: Optional[bool]\n else:\n self.decompress_response = use_gzip\n self.network_interface = network_interface\n self.streaming_callback = streaming_callback\n self.header_callback = header_callback\n self.prepare_curl_callback = prepare_curl_callback\n self.allow_nonstandard_methods = allow_nonstandard_methods\n self.validate_cert = validate_cert\n self.ca_certs = ca_certs\n self.allow_ipv6 = allow_ipv6\n self.client_key = client_key\n self.client_cert = client_cert\n self.ssl_options = ssl_options\n self.expect_100_continue = expect_100_continue\n self.start_time = time.time()\n\n @property\n def headers(self) -> httputil.HTTPHeaders:\n # TODO: headers may actually be a plain dict until fairly late in\n # the process (AsyncHTTPClient.fetch), but practically speaking,\n # whenever the property is used they're already HTTPHeaders.\n return self._headers # type: ignore\n\n @headers.setter\n def headers(self, value: Union[Dict[str, str], httputil.HTTPHeaders]) -> None:\n if value is None:\n self._headers = httputil.HTTPHeaders()\n else:\n self._headers = value # type: ignore\n\n @property\n def body(self) -> bytes:\n return self._body\n\n @body.setter\n def body(self, value: Union[bytes, str]) -> None:\n self._body = utf8(value)\n\n\nclass HTTPResponse(object):\n \"\"\"HTTP Response object.\n\n Attributes:\n\n * ``request``: HTTPRequest object\n\n * ``code``: numeric HTTP status code, e.g. 200 or 404\n\n * ``reason``: human-readable reason phrase describing the status code\n\n * ``headers``: `tornado.httputil.HTTPHeaders` object\n\n * ``effective_url``: final location of the resource after following any\n redirects\n\n * ``buffer``: ``cStringIO`` object for response body\n\n * ``body``: response body as bytes (created on demand from ``self.buffer``)\n\n * ``error``: Exception object, if any\n\n * ``request_time``: seconds from request start to finish. Includes all\n network operations from DNS resolution to receiving the last byte of\n data. Does not include time spent in the queue (due to the\n ``max_clients`` option). If redirects were followed, only includes\n the final request.\n\n * ``start_time``: Time at which the HTTP operation started, based on\n `time.time` (not the monotonic clock used by `.IOLoop.time`). May\n be ``None`` if the request timed out while in the queue.\n\n * ``time_info``: dictionary of diagnostic timing information from the\n request. Available data are subject to change, but currently uses timings\n available from http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html,\n plus ``queue``, which is the delay (if any) introduced by waiting for\n a slot under `AsyncHTTPClient`'s ``max_clients`` setting.\n\n .. versionadded:: 5.1\n\n Added the ``start_time`` attribute.\n\n .. versionchanged:: 5.1\n\n The ``request_time`` attribute previously included time spent in the queue\n for ``simple_httpclient``, but not in ``curl_httpclient``. Now queueing time\n is excluded in both implementations. ``request_time`` is now more accurate for\n ``curl_httpclient`` because it uses a monotonic clock when available.\n \"\"\"\n\n # I'm not sure why these don't get type-inferred from the references in __init__.\n error = None # type: Optional[BaseException]\n _error_is_response_code = False\n request = None # type: HTTPRequest\n\n def __init__(\n self,\n request: HTTPRequest,\n code: int,\n headers: httputil.HTTPHeaders = None,\n buffer: BytesIO = None,\n effective_url: str = None,\n error: BaseException = None,\n request_time: float = None,\n time_info: Dict[str, float] = None,\n reason: str = None,\n start_time: float = None,\n ) -> None:\n if isinstance(request, _RequestProxy):\n self.request = request.request\n else:\n self.request = request\n self.code = code\n self.reason = reason or httputil.responses.get(code, \"Unknown\")\n if headers is not None:\n self.headers = headers\n else:\n self.headers = httputil.HTTPHeaders()\n self.buffer = buffer\n self._body = None # type: Optional[bytes]\n if effective_url is None:\n self.effective_url = request.url\n else:\n self.effective_url = effective_url\n self._error_is_response_code = False\n if error is None:\n if self.code < 200 or self.code >= 300:\n self._error_is_response_code = True\n self.error = HTTPError(self.code, message=self.reason, response=self)\n else:\n self.error = None\n else:\n self.error = error\n self.start_time = start_time\n self.request_time = request_time\n self.time_info = time_info or {}\n\n @property\n def body(self) -> bytes:\n if self.buffer is None:\n return b\"\"\n elif self._body is None:\n self._body = self.buffer.getvalue()\n\n return self._body\n\n def rethrow(self) -> None:\n \"\"\"If there was an error on the request, raise an `HTTPError`.\"\"\"\n if self.error:\n raise self.error\n\n def __repr__(self) -> str:\n args = \",\".join(\"%s=%r\" % i for i in sorted(self.__dict__.items()))\n return \"%s(%s)\" % (self.__class__.__name__, args)\n\n\nclass HTTPClientError(Exception):\n \"\"\"Exception thrown for an unsuccessful HTTP request.\n\n Attributes:\n\n * ``code`` - HTTP error integer error code, e.g. 404. Error code 599 is\n used when no HTTP response was received, e.g. for a timeout.\n\n * ``response`` - `HTTPResponse` object, if any.\n\n Note that if ``follow_redirects`` is False, redirects become HTTPErrors,\n and you can look at ``error.response.headers['Location']`` to see the\n destination of the redirect.\n\n .. versionchanged:: 5.1\n\n Renamed from ``HTTPError`` to ``HTTPClientError`` to avoid collisions with\n `tornado.web.HTTPError`. The name ``tornado.httpclient.HTTPError`` remains\n as an alias.\n \"\"\"\n\n def __init__(\n self, code: int, message: str = None, response: HTTPResponse = None\n ) -> None:\n self.code = code\n self.message = message or httputil.responses.get(code, \"Unknown\")\n self.response = response\n super(HTTPClientError, self).__init__(code, message, response)\n\n def __str__(self) -> str:\n return \"HTTP %d: %s\" % (self.code, self.message)\n\n # There is a cyclic reference between self and self.response,\n # which breaks the default __repr__ implementation.\n # (especially on pypy, which doesn't have the same recursion\n # detection as cpython).\n __repr__ = __str__\n\n\nHTTPError = HTTPClientError\n\n\nclass _RequestProxy(object):\n \"\"\"Combines an object with a dictionary of defaults.\n\n Used internally by AsyncHTTPClient implementations.\n \"\"\"\n\n def __init__(\n self, request: HTTPRequest, defaults: Optional[Dict[str, Any]]\n ) -> None:\n self.request = request\n self.defaults = defaults\n\n def __getattr__(self, name: str) -> Any:\n request_attr = getattr(self.request, name)\n if request_attr is not None:\n return request_attr\n elif self.defaults is not None:\n return self.defaults.get(name, None)\n else:\n return None\n\n\ndef main() -> None:\n from tornado.options import define, options, parse_command_line\n\n define(\"print_headers\", type=bool, default=False)\n define(\"print_body\", type=bool, default=True)\n define(\"follow_redirects\", type=bool, default=True)\n define(\"validate_cert\", type=bool, default=True)\n define(\"proxy_host\", type=str)\n define(\"proxy_port\", type=int)\n args = parse_command_line()\n client = HTTPClient()\n for arg in args:\n try:\n response = client.fetch(\n arg,\n follow_redirects=options.follow_redirects,\n validate_cert=options.validate_cert,\n proxy_host=options.proxy_host,\n proxy_port=options.proxy_port,\n )\n except HTTPError as e:\n if e.response is not None:\n response = e.response\n else:\n raise\n if options.print_headers:\n print(response.headers)\n if options.print_body:\n print(native_str(response.body))\n client.close()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "tornado/httpclient.py" } ]
diff --git a/tornado/httpclient.py b/tornado/httpclient.py index 33abe2e16e..882600af82 100644 --- a/tornado/httpclient.py +++ b/tornado/httpclient.py @@ -665,7 +665,7 @@ def __init__( @property def body(self) -> bytes: if self.buffer is None: - raise ValueError("body not set") + return b"" elif self._body is None: self._body = self.buffer.getvalue()
conan-io__conan-2921
local cache inconsistent after enabling short_paths in a recipe To help us debug your issue please explain: - [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md). - [x] I've specified the Conan version, operating system version and any tool that can be relevant. - [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion. Conan Version 1.3.3 Windows 10 With a package in local cache whose recipe does NOT have `short_paths=True`, modify in normal development folder the recipe and set `short_paths=True` and run conan create. Folders in local cache become inconsistent showing both folders from previous conan create run and .conan_link files pointing to the short paths folders. This seems no not affect conan tool behavior when running commands and works well if `short_paths` is removed once again.
[ { "content": "import os\nimport subprocess\n\nfrom conans.util.files import load, mkdir, save, rmdir\nimport tempfile\n\n\nCONAN_LINK = \".conan_link\"\n\n\ndef conan_expand_user(path):\n \"\"\" wrapper to the original expanduser function, to workaround python returning\n verbatim %USERPROFILE% when some other app (git for windows) sets HOME envvar\n \"\"\"\n # In win these variables should exist and point to user directory, which\n # must exist. Using context to avoid permanent modification of os.environ\n old_env = dict(os.environ)\n try:\n home = os.environ.get(\"HOME\")\n # Problematic cases of wrong HOME variable\n # - HOME = %USERPROFILE% verbatim, as messed by some other tools\n # - MSYS console, that defines a different user home in /c/mingw/msys/users/xxx\n # In these cases, it is safe to remove it and rely on USERPROFILE directly\n if home and (not os.path.exists(home) or\n (os.getenv(\"MSYSTEM\") and os.getenv(\"USERPROFILE\"))):\n del os.environ[\"HOME\"]\n result = os.path.expanduser(path)\n finally:\n os.environ.clear()\n os.environ.update(old_env)\n return result\n\n\ndef path_shortener(path, short_paths):\n \"\"\" short_paths is 4-state:\n False: Never shorten the path\n True: Always shorten the path, create link if not existing\n None: Use shorten path only if already exists, not create\n \"\"\"\n if short_paths is False or os.getenv(\"CONAN_USER_HOME_SHORT\") == \"None\":\n return path\n link = os.path.join(path, CONAN_LINK)\n if os.path.exists(link):\n return load(link)\n elif short_paths is None:\n return path\n\n short_home = os.getenv(\"CONAN_USER_HOME_SHORT\")\n if not short_home:\n drive = os.path.splitdrive(path)[0]\n short_home = drive + \"/.conan\"\n mkdir(short_home)\n\n # Workaround for short_home living in NTFS file systems. Give full control permission to current user to avoid\n # access problems in cygwin/msys2 windows subsystems when using short_home folder\n try:\n username = os.getenv(\"USERDOMAIN\")\n domainname = \"%s\\%s\" % (username, os.environ[\"USERNAME\"]) if username else os.environ[\"USERNAME\"]\n cmd = r'cacls %s /E /G \"%s\":F' % (short_home, domainname)\n subprocess.check_output(cmd, stderr=subprocess.STDOUT) # Ignoring any returned output, make command quiet\n except subprocess.CalledProcessError:\n # cmd can fail if trying to set ACL in non NTFS drives, ignoring it.\n pass\n\n redirect = tempfile.mkdtemp(dir=short_home, prefix=\"\")\n # This \"1\" is the way to have a non-existing directory, so commands like\n # shutil.copytree() to it, works. It can be removed without compromising the\n # temp folder generator and conan-links consistency\n redirect = os.path.join(redirect, \"1\")\n save(link, redirect)\n return redirect\n\n\ndef ignore_long_path_files(src_folder, build_folder, output):\n def _filter(src, files):\n filtered_files = []\n for the_file in files:\n source_path = os.path.join(src, the_file)\n # Without storage path, just relative\n rel_path = os.path.relpath(source_path, src_folder)\n dest_path = os.path.normpath(os.path.join(build_folder, rel_path))\n # it is NOT that \"/\" is counted as \"\\\\\" so it counts double\n # seems a bug in python, overflows paths near the limit of 260,\n if len(dest_path) >= 249:\n filtered_files.append(the_file)\n output.warn(\"Filename too long, file excluded: %s\" % dest_path)\n return filtered_files\n return _filter\n\n\ndef rm_conandir(path):\n \"\"\"removal of a directory that might contain a link to a short path\"\"\"\n link = os.path.join(path, CONAN_LINK)\n if os.path.exists(link):\n short_path = load(link)\n rmdir(os.path.dirname(short_path))\n rmdir(path)\n", "path": "conans/util/windows.py" } ]
[ { "content": "import os\nimport subprocess\n\nfrom conans.util.files import load, mkdir, save, rmdir\nimport tempfile\n\n\nCONAN_LINK = \".conan_link\"\n\n\ndef conan_expand_user(path):\n \"\"\" wrapper to the original expanduser function, to workaround python returning\n verbatim %USERPROFILE% when some other app (git for windows) sets HOME envvar\n \"\"\"\n # In win these variables should exist and point to user directory, which\n # must exist. Using context to avoid permanent modification of os.environ\n old_env = dict(os.environ)\n try:\n home = os.environ.get(\"HOME\")\n # Problematic cases of wrong HOME variable\n # - HOME = %USERPROFILE% verbatim, as messed by some other tools\n # - MSYS console, that defines a different user home in /c/mingw/msys/users/xxx\n # In these cases, it is safe to remove it and rely on USERPROFILE directly\n if home and (not os.path.exists(home) or\n (os.getenv(\"MSYSTEM\") and os.getenv(\"USERPROFILE\"))):\n del os.environ[\"HOME\"]\n result = os.path.expanduser(path)\n finally:\n os.environ.clear()\n os.environ.update(old_env)\n return result\n\n\ndef path_shortener(path, short_paths):\n \"\"\" short_paths is 4-state:\n False: Never shorten the path\n True: Always shorten the path, create link if not existing\n None: Use shorten path only if already exists, not create\n \"\"\"\n if short_paths is False or os.getenv(\"CONAN_USER_HOME_SHORT\") == \"None\":\n return path\n link = os.path.join(path, CONAN_LINK)\n if os.path.exists(link):\n return load(link)\n elif short_paths is None:\n return path\n\n if os.path.exists(path):\n rmdir(path)\n\n short_home = os.getenv(\"CONAN_USER_HOME_SHORT\")\n if not short_home:\n drive = os.path.splitdrive(path)[0]\n short_home = drive + \"/.conan\"\n mkdir(short_home)\n\n # Workaround for short_home living in NTFS file systems. Give full control permission to current user to avoid\n # access problems in cygwin/msys2 windows subsystems when using short_home folder\n try:\n username = os.getenv(\"USERDOMAIN\")\n domainname = \"%s\\%s\" % (username, os.environ[\"USERNAME\"]) if username else os.environ[\"USERNAME\"]\n cmd = r'cacls %s /E /G \"%s\":F' % (short_home, domainname)\n subprocess.check_output(cmd, stderr=subprocess.STDOUT) # Ignoring any returned output, make command quiet\n except subprocess.CalledProcessError:\n # cmd can fail if trying to set ACL in non NTFS drives, ignoring it.\n pass\n\n redirect = tempfile.mkdtemp(dir=short_home, prefix=\"\")\n # This \"1\" is the way to have a non-existing directory, so commands like\n # shutil.copytree() to it, works. It can be removed without compromising the\n # temp folder generator and conan-links consistency\n redirect = os.path.join(redirect, \"1\")\n save(link, redirect)\n return redirect\n\n\ndef ignore_long_path_files(src_folder, build_folder, output):\n def _filter(src, files):\n filtered_files = []\n for the_file in files:\n source_path = os.path.join(src, the_file)\n # Without storage path, just relative\n rel_path = os.path.relpath(source_path, src_folder)\n dest_path = os.path.normpath(os.path.join(build_folder, rel_path))\n # it is NOT that \"/\" is counted as \"\\\\\" so it counts double\n # seems a bug in python, overflows paths near the limit of 260,\n if len(dest_path) >= 249:\n filtered_files.append(the_file)\n output.warn(\"Filename too long, file excluded: %s\" % dest_path)\n return filtered_files\n return _filter\n\n\ndef rm_conandir(path):\n \"\"\"removal of a directory that might contain a link to a short path\"\"\"\n link = os.path.join(path, CONAN_LINK)\n if os.path.exists(link):\n short_path = load(link)\n rmdir(os.path.dirname(short_path))\n rmdir(path)\n", "path": "conans/util/windows.py" } ]
diff --git a/conans/test/functional/short_paths_test.py b/conans/test/functional/short_paths_test.py new file mode 100644 index 00000000000..6d9f55b6832 --- /dev/null +++ b/conans/test/functional/short_paths_test.py @@ -0,0 +1,67 @@ +import os +import platform +import unittest + +from conans.model.ref import ConanFileReference +from conans.test.utils.tools import TestClient + + +class ShortPathsTest(unittest.TestCase): + + @unittest.skipUnless(platform.system() == "Windows", "Requires Windows") + def inconsistent_cache_test(self): + conanfile = """ +import os +from conans import ConanFile, tools + + +class TestConan(ConanFile): + name = "test" + version = "1.0" + short_paths = {0} + exports_sources = "source_file.cpp" + + def source(self): + for item in os.listdir(self.source_folder): + self.output.info("SOURCE: " + str(item)) + def build(self): + tools.save(os.path.join(self.build_folder, "artifact"), "") + for item in os.listdir(self.build_folder): + self.output.info("BUILD: " + str(item)) + def package(self): + self.copy("source_file.cpp") + self.copy("artifact") + for item in os.listdir(self.build_folder): + self.output.info("PACKAGE: " + str(item)) +""" + + client = TestClient() + client.save({"conanfile.py": conanfile.format("False"), + "source_file.cpp": ""}) + client.run("create . danimtb/testing") + conan_ref = ConanFileReference("test", "1.0", "danimtb", "testing") + source_folder = os.path.join(client.client_cache.conan(conan_ref), "source") + build_folder = os.path.join(client.client_cache.conan(conan_ref), "build", + "5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9") + package_folder = os.path.join(client.client_cache.conan(conan_ref), "package", + "5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9") + self.assertIn("SOURCE: source_file.cpp", client.out) + self.assertEqual(["source_file.cpp"], os.listdir(source_folder)) + self.assertIn("BUILD: source_file.cpp", client.out) + self.assertIn("BUILD: artifact", client.out) + self.assertEqual(["artifact", "conanbuildinfo.txt", "conaninfo.txt", "source_file.cpp"], + os.listdir(build_folder)) + self.assertIn("PACKAGE: source_file.cpp", client.out) + self.assertIn("PACKAGE: artifact", client.out) + self.assertEqual(["artifact", "conaninfo.txt", "conanmanifest.txt", "source_file.cpp"], + os.listdir(package_folder)) + client.save({"conanfile.py": conanfile.format("True")}) + client.run("create . danimtb/testing") + self.assertIn("SOURCE: source_file.cpp", client.out) + self.assertEqual([".conan_link"], os.listdir(source_folder)) + self.assertIn("BUILD: source_file.cpp", client.out) + self.assertIn("BUILD: artifact", client.out) + self.assertEqual([".conan_link"], os.listdir(build_folder)) + self.assertIn("PACKAGE: source_file.cpp", client.out) + self.assertIn("PACKAGE: artifact", client.out) + self.assertEqual([".conan_link"], os.listdir(package_folder)) diff --git a/conans/util/windows.py b/conans/util/windows.py index 635597a8051..636fc672e87 100644 --- a/conans/util/windows.py +++ b/conans/util/windows.py @@ -45,6 +45,9 @@ def path_shortener(path, short_paths): elif short_paths is None: return path + if os.path.exists(path): + rmdir(path) + short_home = os.getenv("CONAN_USER_HOME_SHORT") if not short_home: drive = os.path.splitdrive(path)[0]
googleapis__google-cloud-python-5856
Request to release GCS Python library Hi, Is it possible to release the Storage client library for Python? I'd like the new method `get_service_account_email` to be available. Unless there exist concerns.
[ { "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = 'google-cloud-storage'\ndescription = 'Google Cloud Storage API client library'\nversion = '1.10.0'\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = 'Development Status :: 5 - Production/Stable'\ndependencies = [\n 'google-cloud-core<0.29dev,>=0.28.0',\n 'google-api-core<2.0.0dev,>=0.1.1',\n 'google-resumable-media>=0.3.1',\n]\nextras = {\n}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, 'README.rst')\nwith io.open(readme_filename, encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages()\n if package.startswith('google')]\n\n# Determine which namespaces are needed.\nnamespaces = ['google']\nif 'google.cloud' in packages:\n namespaces.append('google.cloud')\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author='Google LLC',\n author_email='[email protected]',\n license='Apache 2.0',\n url='https://github.com/GoogleCloudPlatform/google-cloud-python',\n classifiers=[\n release_status,\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: OS Independent',\n 'Topic :: Internet',\n ],\n platforms='Posix; MacOS X; Windows',\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "storage/setup.py" } ]
[ { "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = 'google-cloud-storage'\ndescription = 'Google Cloud Storage API client library'\nversion = '1.11.0'\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = 'Development Status :: 5 - Production/Stable'\ndependencies = [\n 'google-cloud-core<0.29dev,>=0.28.0',\n 'google-api-core<2.0.0dev,>=0.1.1',\n 'google-resumable-media>=0.3.1',\n]\nextras = {\n}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, 'README.rst')\nwith io.open(readme_filename, encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages()\n if package.startswith('google')]\n\n# Determine which namespaces are needed.\nnamespaces = ['google']\nif 'google.cloud' in packages:\n namespaces.append('google.cloud')\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author='Google LLC',\n author_email='[email protected]',\n license='Apache 2.0',\n url='https://github.com/GoogleCloudPlatform/google-cloud-python',\n classifiers=[\n release_status,\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: OS Independent',\n 'Topic :: Internet',\n ],\n platforms='Posix; MacOS X; Windows',\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "storage/setup.py" } ]
diff --git a/storage/CHANGELOG.md b/storage/CHANGELOG.md index 002e92db7c09..b1b0ef25782e 100644 --- a/storage/CHANGELOG.md +++ b/storage/CHANGELOG.md @@ -4,11 +4,32 @@ [1]: https://pypi.org/project/google-cloud-storage/#history +## 1.11.0 + +### Implementation Changes +- Preserve message / args from an `InvalidResponse`. (#5492) +- Fix generating signed urls for blobs with non-ascii names. (#5625) +- Move bucket location specification to `Bucket.create`; deprecate `Bucket.location` setter (#5808) + +### New Features +- Add `Client.get_service_account_email`. (#5765) + +### Documentation +- Clarify `None` values for resource-backed properties. (#5509) +- Elaborate docs for `{Bucket,Blob}.make_{public,private}`; note how to enable anonymous accesss to `Blob.public_url`. (#5767) + +### Internal / Testing Changes +- Harden `create_bucket` systest against 429 responses. (#5535) +- Add system test: signed URLs w/ non-ASCII blob name. (#5626) +- Harden `tearDownModule` against 429 TooManyRequests. (#5701) +- Retry `notification.create()` on `503 ServiceUnavailable`. (#5741) +- Fix failing KMS system tests. (#5832, #5837, #5860) + ## 1.10.0 ### New Features - Add support for KMS keys (#5259) -- Add '{Blob,Bucket}make_private' method (#5336) +- Add `{Blob,Bucket}make_private` method (#5336) ### Internal / Testing Changes - Modify system tests to use prerelease versions of grpcio (#5304) diff --git a/storage/setup.py b/storage/setup.py index a0ce64d4aca7..ac046e924b5e 100644 --- a/storage/setup.py +++ b/storage/setup.py @@ -22,7 +22,7 @@ name = 'google-cloud-storage' description = 'Google Cloud Storage API client library' -version = '1.10.0' +version = '1.11.0' # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta'
twisted__twisted-12069
spawnProcess() passes incorrect environment to subprocess when env=None and posix_spawnp() is used [Documentation on reactor.spawnProcess](https://docs.twisted.org/en/stable/api/twisted.internet.interfaces.IReactorProcess.html) says the following about env parameter: ```env is None: On POSIX: pass os.environ``` However, twisted has [this code](https://github.com/twisted/twisted/blob/68f112f1eecb4613a3b678314a5479464c184ab4/src/twisted/internet/process.py#L881) in the code path leading to a call to posix_spawnp(). ``` if environment is None: environment = {} ``` This leads to a subprocess being initialized with empty environment even though `os.environ` was expected. **Describe how to cause this behavior** There's a PR with automated tests added to Twisted. **Describe the correct behavior you'd like to see** Subprocess having parent process environment when invoked via `reactor.spawnProcess(..., env=None)`. **Testing environment** - Operating System and Version; - Debian 12 - Twisted version: 23.10.0 - Reactor: default on Linux **Additional context** Probably a regression since 23.8.0 when posix_spawnp was enabled.
[ { "content": "# -*- test-case-name: twisted.test.test_process -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nUNIX Process management.\n\nDo NOT use this module directly - use reactor.spawnProcess() instead.\n\nMaintainer: Itamar Shtull-Trauring\n\"\"\"\nfrom __future__ import annotations\n\nimport errno\nimport gc\nimport io\nimport os\nimport signal\nimport stat\nimport sys\nimport traceback\nfrom collections import defaultdict\nfrom typing import TYPE_CHECKING, Dict, List, Optional, Tuple\n\n_PS_CLOSE: int\n_PS_DUP2: int\n\nif not TYPE_CHECKING:\n try:\n from os import POSIX_SPAWN_CLOSE as _PS_CLOSE, POSIX_SPAWN_DUP2 as _PS_DUP2\n except ImportError:\n pass\n\nfrom zope.interface import implementer\n\nfrom twisted.internet import abstract, error, fdesc\nfrom twisted.internet._baseprocess import BaseProcess\nfrom twisted.internet.interfaces import IProcessTransport\nfrom twisted.internet.main import CONNECTION_DONE, CONNECTION_LOST\nfrom twisted.python import failure, log\nfrom twisted.python.runtime import platform\nfrom twisted.python.util import switchUID\n\nif platform.isWindows():\n raise ImportError(\n \"twisted.internet.process does not work on Windows. \"\n \"Use the reactor.spawnProcess() API instead.\"\n )\n\ntry:\n import pty as _pty\nexcept ImportError:\n pty = None\nelse:\n pty = _pty\n\ntry:\n import fcntl as _fcntl\n import termios\nexcept ImportError:\n fcntl = None\nelse:\n fcntl = _fcntl\n\n# Some people were importing this, which is incorrect, just keeping it\n# here for backwards compatibility:\nProcessExitedAlready = error.ProcessExitedAlready\n\nreapProcessHandlers: Dict[int, _BaseProcess] = {}\n\n\ndef reapAllProcesses() -> None:\n \"\"\"\n Reap all registered processes.\n \"\"\"\n # Coerce this to a list, as reaping the process changes the dictionary and\n # causes a \"size changed during iteration\" exception\n for process in list(reapProcessHandlers.values()):\n process.reapProcess()\n\n\ndef registerReapProcessHandler(pid, process):\n \"\"\"\n Register a process handler for the given pid, in case L{reapAllProcesses}\n is called.\n\n @param pid: the pid of the process.\n @param process: a process handler.\n \"\"\"\n if pid in reapProcessHandlers:\n raise RuntimeError(\"Try to register an already registered process.\")\n try:\n auxPID, status = os.waitpid(pid, os.WNOHANG)\n except BaseException:\n log.msg(f\"Failed to reap {pid}:\")\n log.err()\n\n if pid is None:\n return\n\n auxPID = None\n if auxPID:\n process.processEnded(status)\n else:\n # if auxPID is 0, there are children but none have exited\n reapProcessHandlers[pid] = process\n\n\ndef unregisterReapProcessHandler(pid, process):\n \"\"\"\n Unregister a process handler previously registered with\n L{registerReapProcessHandler}.\n \"\"\"\n if not (pid in reapProcessHandlers and reapProcessHandlers[pid] == process):\n raise RuntimeError(\"Try to unregister a process not registered.\")\n del reapProcessHandlers[pid]\n\n\nclass ProcessWriter(abstract.FileDescriptor):\n \"\"\"\n (Internal) Helper class to write into a Process's input pipe.\n\n I am a helper which describes a selectable asynchronous writer to a\n process's input pipe, including stdin.\n\n @ivar enableReadHack: A flag which determines how readability on this\n write descriptor will be handled. If C{True}, then readability may\n indicate the reader for this write descriptor has been closed (ie,\n the connection has been lost). If C{False}, then readability events\n are ignored.\n \"\"\"\n\n connected = 1\n ic = 0\n enableReadHack = False\n\n def __init__(self, reactor, proc, name, fileno, forceReadHack=False):\n \"\"\"\n Initialize, specifying a Process instance to connect to.\n \"\"\"\n abstract.FileDescriptor.__init__(self, reactor)\n fdesc.setNonBlocking(fileno)\n self.proc = proc\n self.name = name\n self.fd = fileno\n\n if not stat.S_ISFIFO(os.fstat(self.fileno()).st_mode):\n # If the fd is not a pipe, then the read hack is never\n # applicable. This case arises when ProcessWriter is used by\n # StandardIO and stdout is redirected to a normal file.\n self.enableReadHack = False\n elif forceReadHack:\n self.enableReadHack = True\n else:\n # Detect if this fd is actually a write-only fd. If it's\n # valid to read, don't try to detect closing via read.\n # This really only means that we cannot detect a TTY's write\n # pipe being closed.\n try:\n os.read(self.fileno(), 0)\n except OSError:\n # It's a write-only pipe end, enable hack\n self.enableReadHack = True\n\n if self.enableReadHack:\n self.startReading()\n\n def fileno(self):\n \"\"\"\n Return the fileno() of my process's stdin.\n \"\"\"\n return self.fd\n\n def writeSomeData(self, data):\n \"\"\"\n Write some data to the open process.\n \"\"\"\n rv = fdesc.writeToFD(self.fd, data)\n if rv == len(data) and self.enableReadHack:\n # If the send buffer is now empty and it is necessary to monitor\n # this descriptor for readability to detect close, try detecting\n # readability now.\n self.startReading()\n return rv\n\n def write(self, data):\n self.stopReading()\n abstract.FileDescriptor.write(self, data)\n\n def doRead(self):\n \"\"\"\n The only way a write pipe can become \"readable\" is at EOF, because the\n child has closed it, and we're using a reactor which doesn't\n distinguish between readable and closed (such as the select reactor).\n\n Except that's not true on linux < 2.6.11. It has the following\n characteristics: write pipe is completely empty => POLLOUT (writable in\n select), write pipe is not completely empty => POLLIN (readable in\n select), write pipe's reader closed => POLLIN|POLLERR (readable and\n writable in select)\n\n That's what this funky code is for. If linux was not broken, this\n function could be simply \"return CONNECTION_LOST\".\n \"\"\"\n if self.enableReadHack:\n return CONNECTION_LOST\n else:\n self.stopReading()\n\n def connectionLost(self, reason):\n \"\"\"\n See abstract.FileDescriptor.connectionLost.\n \"\"\"\n # At least on macOS 10.4, exiting while stdout is non-blocking can\n # result in data loss. For some reason putting the file descriptor\n # back into blocking mode seems to resolve this issue.\n fdesc.setBlocking(self.fd)\n\n abstract.FileDescriptor.connectionLost(self, reason)\n self.proc.childConnectionLost(self.name, reason)\n\n\nclass ProcessReader(abstract.FileDescriptor):\n \"\"\"\n ProcessReader\n\n I am a selectable representation of a process's output pipe, such as\n stdout and stderr.\n \"\"\"\n\n connected = True\n\n def __init__(self, reactor, proc, name, fileno):\n \"\"\"\n Initialize, specifying a process to connect to.\n \"\"\"\n abstract.FileDescriptor.__init__(self, reactor)\n fdesc.setNonBlocking(fileno)\n self.proc = proc\n self.name = name\n self.fd = fileno\n self.startReading()\n\n def fileno(self):\n \"\"\"\n Return the fileno() of my process's stderr.\n \"\"\"\n return self.fd\n\n def writeSomeData(self, data):\n # the only time this is actually called is after .loseConnection Any\n # actual write attempt would fail, so we must avoid that. This hack\n # allows us to use .loseConnection on both readers and writers.\n assert data == b\"\"\n return CONNECTION_LOST\n\n def doRead(self):\n \"\"\"\n This is called when the pipe becomes readable.\n \"\"\"\n return fdesc.readFromFD(self.fd, self.dataReceived)\n\n def dataReceived(self, data):\n self.proc.childDataReceived(self.name, data)\n\n def loseConnection(self):\n if self.connected and not self.disconnecting:\n self.disconnecting = 1\n self.stopReading()\n self.reactor.callLater(\n 0, self.connectionLost, failure.Failure(CONNECTION_DONE)\n )\n\n def connectionLost(self, reason):\n \"\"\"\n Close my end of the pipe, signal the Process (which signals the\n ProcessProtocol).\n \"\"\"\n abstract.FileDescriptor.connectionLost(self, reason)\n self.proc.childConnectionLost(self.name, reason)\n\n\nclass _BaseProcess(BaseProcess):\n \"\"\"\n Base class for Process and PTYProcess.\n \"\"\"\n\n status: Optional[int] = None\n pid = None\n\n def reapProcess(self):\n \"\"\"\n Try to reap a process (without blocking) via waitpid.\n\n This is called when sigchild is caught or a Process object loses its\n \"connection\" (stdout is closed) This ought to result in reaping all\n zombie processes, since it will be called twice as often as it needs\n to be.\n\n (Unfortunately, this is a slightly experimental approach, since\n UNIX has no way to be really sure that your process is going to\n go away w/o blocking. I don't want to block.)\n \"\"\"\n try:\n try:\n pid, status = os.waitpid(self.pid, os.WNOHANG)\n except OSError as e:\n if e.errno == errno.ECHILD:\n # no child process\n pid = None\n else:\n raise\n except BaseException:\n log.msg(f\"Failed to reap {self.pid}:\")\n log.err()\n pid = None\n if pid:\n unregisterReapProcessHandler(pid, self)\n self.processEnded(status)\n\n def _getReason(self, status):\n exitCode = sig = None\n if os.WIFEXITED(status):\n exitCode = os.WEXITSTATUS(status)\n else:\n sig = os.WTERMSIG(status)\n if exitCode or sig:\n return error.ProcessTerminated(exitCode, sig, status)\n return error.ProcessDone(status)\n\n def signalProcess(self, signalID):\n \"\"\"\n Send the given signal C{signalID} to the process. It'll translate a\n few signals ('HUP', 'STOP', 'INT', 'KILL', 'TERM') from a string\n representation to its int value, otherwise it'll pass directly the\n value provided\n\n @type signalID: C{str} or C{int}\n \"\"\"\n if signalID in (\"HUP\", \"STOP\", \"INT\", \"KILL\", \"TERM\"):\n signalID = getattr(signal, f\"SIG{signalID}\")\n if self.pid is None:\n raise ProcessExitedAlready()\n try:\n os.kill(self.pid, signalID)\n except OSError as e:\n if e.errno == errno.ESRCH:\n raise ProcessExitedAlready()\n else:\n raise\n\n def _resetSignalDisposition(self):\n # The Python interpreter ignores some signals, and our child\n # process will inherit that behaviour. To have a child process\n # that responds to signals normally, we need to reset our\n # child process's signal handling (just) after we fork and\n # before we execvpe.\n for signalnum in range(1, signal.NSIG):\n if signal.getsignal(signalnum) == signal.SIG_IGN:\n # Reset signal handling to the default\n signal.signal(signalnum, signal.SIG_DFL)\n\n def _trySpawnInsteadOfFork(\n self, path, uid, gid, executable, args, environment, kwargs\n ):\n \"\"\"\n Try to use posix_spawnp() instead of fork(), if possible.\n\n This implementation returns False because the non-PTY subclass\n implements the actual logic; we can't yet use this for pty processes.\n\n @return: a boolean indicating whether posix_spawnp() was used or not.\n \"\"\"\n return False\n\n def _fork(self, path, uid, gid, executable, args, environment, **kwargs):\n \"\"\"\n Fork and then exec sub-process.\n\n @param path: the path where to run the new process.\n @type path: L{bytes} or L{unicode}\n\n @param uid: if defined, the uid used to run the new process.\n @type uid: L{int}\n\n @param gid: if defined, the gid used to run the new process.\n @type gid: L{int}\n\n @param executable: the executable to run in a new process.\n @type executable: L{str}\n\n @param args: arguments used to create the new process.\n @type args: L{list}.\n\n @param environment: environment used for the new process.\n @type environment: L{dict}.\n\n @param kwargs: keyword arguments to L{_setupChild} method.\n \"\"\"\n\n if self._trySpawnInsteadOfFork(\n path, uid, gid, executable, args, environment, kwargs\n ):\n return\n\n collectorEnabled = gc.isenabled()\n gc.disable()\n try:\n self.pid = os.fork()\n except BaseException:\n # Still in the parent process\n if collectorEnabled:\n gc.enable()\n raise\n else:\n if self.pid == 0:\n # A return value of 0 from fork() indicates that we are now\n # executing in the child process.\n\n # Do not put *ANY* code outside the try block. The child\n # process must either exec or _exit. If it gets outside this\n # block (due to an exception that is not handled here, but\n # which might be handled higher up), there will be two copies\n # of the parent running in parallel, doing all kinds of damage.\n\n # After each change to this code, review it to make sure there\n # are no exit paths.\n\n try:\n # Stop debugging. If I am, I don't care anymore.\n sys.settrace(None)\n self._setupChild(**kwargs)\n self._execChild(path, uid, gid, executable, args, environment)\n except BaseException:\n # If there are errors, try to write something descriptive\n # to stderr before exiting.\n\n # The parent's stderr isn't *necessarily* fd 2 anymore, or\n # even still available; however, even libc assumes that\n # write(2, err) is a useful thing to attempt.\n\n try:\n # On Python 3, print_exc takes a text stream, but\n # on Python 2 it still takes a byte stream. So on\n # Python 3 we will wrap up the byte stream returned\n # by os.fdopen using TextIOWrapper.\n\n # We hard-code UTF-8 as the encoding here, rather\n # than looking at something like\n # getfilesystemencoding() or sys.stderr.encoding,\n # because we want an encoding that will be able to\n # encode the full range of code points. We are\n # (most likely) talking to the parent process on\n # the other end of this pipe and not the filesystem\n # or the original sys.stderr, so there's no point\n # in trying to match the encoding of one of those\n # objects.\n\n stderr = io.TextIOWrapper(os.fdopen(2, \"wb\"), encoding=\"utf-8\")\n msg = (\"Upon execvpe {} {} in environment id {}\" \"\\n:\").format(\n executable, str(args), id(environment)\n )\n stderr.write(msg)\n traceback.print_exc(file=stderr)\n stderr.flush()\n\n for fd in range(3):\n os.close(fd)\n except BaseException:\n # Handle all errors during the error-reporting process\n # silently to ensure that the child terminates.\n pass\n\n # See comment above about making sure that we reach this line\n # of code.\n os._exit(1)\n\n # we are now in parent process\n if collectorEnabled:\n gc.enable()\n self.status = -1 # this records the exit status of the child\n\n def _setupChild(self, *args, **kwargs):\n \"\"\"\n Setup the child process. Override in subclasses.\n \"\"\"\n raise NotImplementedError()\n\n def _execChild(self, path, uid, gid, executable, args, environment):\n \"\"\"\n The exec() which is done in the forked child.\n \"\"\"\n if path:\n os.chdir(path)\n if uid is not None or gid is not None:\n if uid is None:\n uid = os.geteuid()\n if gid is None:\n gid = os.getegid()\n # set the UID before I actually exec the process\n os.setuid(0)\n os.setgid(0)\n switchUID(uid, gid)\n os.execvpe(executable, args, environment)\n\n def __repr__(self) -> str:\n \"\"\"\n String representation of a process.\n \"\"\"\n return \"<{} pid={} status={}>\".format(\n self.__class__.__name__,\n self.pid,\n self.status,\n )\n\n\nclass _FDDetector:\n \"\"\"\n This class contains the logic necessary to decide which of the available\n system techniques should be used to detect the open file descriptors for\n the current process. The chosen technique gets monkey-patched into the\n _listOpenFDs method of this class so that the detection only needs to occur\n once.\n\n @ivar listdir: The implementation of listdir to use. This gets overwritten\n by the test cases.\n @ivar getpid: The implementation of getpid to use, returns the PID of the\n running process.\n @ivar openfile: The implementation of open() to use, by default the Python\n builtin.\n \"\"\"\n\n # So that we can unit test this\n listdir = os.listdir\n getpid = os.getpid\n openfile = open\n\n def __init__(self):\n self._implementations = [\n self._procFDImplementation,\n self._devFDImplementation,\n self._fallbackFDImplementation,\n ]\n\n def _listOpenFDs(self):\n \"\"\"\n Return an iterable of file descriptors which I{may} be open in this\n process.\n\n This will try to return the fewest possible descriptors without missing\n any.\n \"\"\"\n self._listOpenFDs = self._getImplementation()\n return self._listOpenFDs()\n\n def _getImplementation(self):\n \"\"\"\n Pick a method which gives correct results for C{_listOpenFDs} in this\n runtime environment.\n\n This involves a lot of very platform-specific checks, some of which may\n be relatively expensive. Therefore the returned method should be saved\n and re-used, rather than always calling this method to determine what it\n is.\n\n See the implementation for the details of how a method is selected.\n \"\"\"\n for impl in self._implementations:\n try:\n before = impl()\n except BaseException:\n continue\n with self.openfile(\"/dev/null\", \"r\"):\n after = impl()\n if before != after:\n return impl\n # If no implementation can detect the newly opened file above, then just\n # return the last one. The last one should therefore always be one\n # which makes a simple static guess which includes all possible open\n # file descriptors, but perhaps also many other values which do not\n # correspond to file descriptors. For example, the scheme implemented\n # by _fallbackFDImplementation is suitable to be the last entry.\n return impl\n\n def _devFDImplementation(self):\n \"\"\"\n Simple implementation for systems where /dev/fd actually works.\n See: http://www.freebsd.org/cgi/man.cgi?fdescfs\n \"\"\"\n dname = \"/dev/fd\"\n result = [int(fd) for fd in self.listdir(dname)]\n return result\n\n def _procFDImplementation(self):\n \"\"\"\n Simple implementation for systems where /proc/pid/fd exists (we assume\n it works).\n \"\"\"\n dname = \"/proc/%d/fd\" % (self.getpid(),)\n return [int(fd) for fd in self.listdir(dname)]\n\n def _fallbackFDImplementation(self):\n \"\"\"\n Fallback implementation where either the resource module can inform us\n about the upper bound of how many FDs to expect, or where we just guess\n a constant maximum if there is no resource module.\n\n All possible file descriptors from 0 to that upper bound are returned\n with no attempt to exclude invalid file descriptor values.\n \"\"\"\n try:\n import resource\n except ImportError:\n maxfds = 1024\n else:\n # OS-X reports 9223372036854775808. That's a lot of fds to close.\n # OS-X should get the /dev/fd implementation instead, so mostly\n # this check probably isn't necessary.\n maxfds = min(1024, resource.getrlimit(resource.RLIMIT_NOFILE)[1])\n return range(maxfds)\n\n\ndetector = _FDDetector()\n\n\ndef _listOpenFDs():\n \"\"\"\n Use the global detector object to figure out which FD implementation to\n use.\n \"\"\"\n return detector._listOpenFDs()\n\n\ndef _getFileActions(\n fdState: List[Tuple[int, bool]],\n childToParentFD: Dict[int, int],\n doClose: int,\n doDup2: int,\n) -> List[Tuple[int, ...]]:\n \"\"\"\n Get the C{file_actions} parameter for C{posix_spawn} based on the\n parameters describing the current process state.\n\n @param fdState: A list of 2-tuples of (file descriptor, close-on-exec\n flag).\n\n @param doClose: the integer to use for the 'close' instruction\n\n @param doDup2: the integer to use for the 'dup2' instruction\n \"\"\"\n fdStateDict = dict(fdState)\n parentToChildren: Dict[int, List[int]] = defaultdict(list)\n for inChild, inParent in childToParentFD.items():\n parentToChildren[inParent].append(inChild)\n allocated = set(fdStateDict)\n allocated |= set(childToParentFD.values())\n allocated |= set(childToParentFD.keys())\n nextFD = 0\n\n def allocateFD() -> int:\n nonlocal nextFD\n while nextFD in allocated:\n nextFD += 1\n allocated.add(nextFD)\n return nextFD\n\n result: List[Tuple[int, ...]] = []\n relocations = {}\n for inChild, inParent in sorted(childToParentFD.items()):\n # The parent FD will later be reused by a child FD.\n parentToChildren[inParent].remove(inChild)\n if parentToChildren[inChild]:\n new = relocations[inChild] = allocateFD()\n result.append((doDup2, inChild, new))\n if inParent in relocations:\n result.append((doDup2, relocations[inParent], inChild))\n if not parentToChildren[inParent]:\n result.append((doClose, relocations[inParent]))\n else:\n if inParent == inChild:\n if fdStateDict[inParent]:\n # If the child is attempting to inherit the parent as-is,\n # and it is not close-on-exec, the job is already done; we\n # can bail. Otherwise...\n\n tempFD = allocateFD()\n # The child wants to inherit the parent as-is, so the\n # handle must be heritable.. dup2 makes the new descriptor\n # inheritable by default, *but*, per the man page, “if\n # fildes and fildes2 are equal, then dup2() just returns\n # fildes2; no other changes are made to the existing\n # descriptor”, so we need to dup it somewhere else and dup\n # it back before closing the temporary place we put it.\n result.extend(\n [\n (doDup2, inParent, tempFD),\n (doDup2, tempFD, inChild),\n (doClose, tempFD),\n ]\n )\n else:\n result.append((doDup2, inParent, inChild))\n\n for eachFD, uninheritable in fdStateDict.items():\n if eachFD not in childToParentFD and not uninheritable:\n result.append((doClose, eachFD))\n\n return result\n\n\n@implementer(IProcessTransport)\nclass Process(_BaseProcess):\n \"\"\"\n An operating-system Process.\n\n This represents an operating-system process with arbitrary input/output\n pipes connected to it. Those pipes may represent standard input, standard\n output, and standard error, or any other file descriptor.\n\n On UNIX, this is implemented using posix_spawnp() when possible (or fork(),\n exec(), pipe() and fcntl() when not). These calls may not exist elsewhere\n so this code is not cross-platform. (also, windows can only select on\n sockets...)\n \"\"\"\n\n debug = False\n debug_child = False\n\n status = -1\n pid = None\n\n processWriterFactory = ProcessWriter\n processReaderFactory = ProcessReader\n\n def __init__(\n self,\n reactor,\n executable,\n args,\n environment,\n path,\n proto,\n uid=None,\n gid=None,\n childFDs=None,\n ):\n \"\"\"\n Spawn an operating-system process.\n\n This is where the hard work of disconnecting all currently open\n files / forking / executing the new process happens. (This is\n executed automatically when a Process is instantiated.)\n\n This will also run the subprocess as a given user ID and group ID, if\n specified. (Implementation Note: this doesn't support all the arcane\n nuances of setXXuid on UNIX: it will assume that either your effective\n or real UID is 0.)\n \"\"\"\n self._reactor = reactor\n if not proto:\n assert \"r\" not in childFDs.values()\n assert \"w\" not in childFDs.values()\n _BaseProcess.__init__(self, proto)\n\n self.pipes = {}\n # keys are childFDs, we can sense them closing\n # values are ProcessReader/ProcessWriters\n\n helpers = {}\n # keys are childFDs\n # values are parentFDs\n\n if childFDs is None:\n childFDs = {\n 0: \"w\", # we write to the child's stdin\n 1: \"r\", # we read from their stdout\n 2: \"r\", # and we read from their stderr\n }\n\n debug = self.debug\n if debug:\n print(\"childFDs\", childFDs)\n\n _openedPipes = []\n\n def pipe():\n r, w = os.pipe()\n _openedPipes.extend([r, w])\n return r, w\n\n # fdmap.keys() are filenos of pipes that are used by the child.\n fdmap = {} # maps childFD to parentFD\n try:\n for childFD, target in childFDs.items():\n if debug:\n print(\"[%d]\" % childFD, target)\n if target == \"r\":\n # we need a pipe that the parent can read from\n readFD, writeFD = pipe()\n if debug:\n print(\"readFD=%d, writeFD=%d\" % (readFD, writeFD))\n fdmap[childFD] = writeFD # child writes to this\n helpers[childFD] = readFD # parent reads from this\n elif target == \"w\":\n # we need a pipe that the parent can write to\n readFD, writeFD = pipe()\n if debug:\n print(\"readFD=%d, writeFD=%d\" % (readFD, writeFD))\n fdmap[childFD] = readFD # child reads from this\n helpers[childFD] = writeFD # parent writes to this\n else:\n assert type(target) == int, f\"{target!r} should be an int\"\n fdmap[childFD] = target # parent ignores this\n if debug:\n print(\"fdmap\", fdmap)\n if debug:\n print(\"helpers\", helpers)\n # the child only cares about fdmap.values()\n\n self._fork(path, uid, gid, executable, args, environment, fdmap=fdmap)\n except BaseException:\n for pipe in _openedPipes:\n os.close(pipe)\n raise\n\n # we are the parent process:\n self.proto = proto\n\n # arrange for the parent-side pipes to be read and written\n for childFD, parentFD in helpers.items():\n os.close(fdmap[childFD])\n if childFDs[childFD] == \"r\":\n reader = self.processReaderFactory(reactor, self, childFD, parentFD)\n self.pipes[childFD] = reader\n\n if childFDs[childFD] == \"w\":\n writer = self.processWriterFactory(\n reactor, self, childFD, parentFD, forceReadHack=True\n )\n self.pipes[childFD] = writer\n\n try:\n # the 'transport' is used for some compatibility methods\n if self.proto is not None:\n self.proto.makeConnection(self)\n except BaseException:\n log.err()\n\n # The reactor might not be running yet. This might call back into\n # processEnded synchronously, triggering an application-visible\n # callback. That's probably not ideal. The replacement API for\n # spawnProcess should improve upon this situation.\n registerReapProcessHandler(self.pid, self)\n\n def _trySpawnInsteadOfFork(\n self, path, uid, gid, executable, args, environment, kwargs\n ):\n \"\"\"\n Try to use posix_spawnp() instead of fork(), if possible.\n\n @return: a boolean indicating whether posix_spawnp() was used or not.\n \"\"\"\n if (\n # no support for setuid/setgid anywhere but in QNX's\n # posix_spawnattr_setcred\n (uid is not None)\n or (gid is not None)\n or ((path is not None) and (os.path.abspath(path) != os.path.abspath(\".\")))\n or getattr(self._reactor, \"_neverUseSpawn\", False)\n ):\n return False\n fdmap = kwargs.get(\"fdmap\")\n fdState = []\n for eachFD in _listOpenFDs():\n try:\n isCloseOnExec = fcntl.fcntl(eachFD, fcntl.F_GETFD, fcntl.FD_CLOEXEC)\n except OSError:\n pass\n else:\n fdState.append((eachFD, isCloseOnExec))\n if environment is None:\n environment = {}\n\n setSigDef = [\n everySignal\n for everySignal in range(1, signal.NSIG)\n if signal.getsignal(everySignal) == signal.SIG_IGN\n ]\n\n self.pid = os.posix_spawnp(\n executable,\n args,\n environment,\n file_actions=_getFileActions(\n fdState, fdmap, doClose=_PS_CLOSE, doDup2=_PS_DUP2\n ),\n setsigdef=setSigDef,\n )\n self.status = -1\n return True\n\n if getattr(os, \"posix_spawnp\", None) is None:\n # If there's no posix_spawn implemented, let the superclass handle it\n del _trySpawnInsteadOfFork\n\n def _setupChild(self, fdmap):\n \"\"\"\n fdmap[childFD] = parentFD\n\n The child wants to end up with 'childFD' attached to what used to be\n the parent's parentFD. As an example, a bash command run like\n 'command 2>&1' would correspond to an fdmap of {0:0, 1:1, 2:1}.\n 'command >foo.txt' would be {0:0, 1:os.open('foo.txt'), 2:2}.\n\n This is accomplished in two steps::\n\n 1. close all file descriptors that aren't values of fdmap. This\n means 0 .. maxfds (or just the open fds within that range, if\n the platform supports '/proc/<pid>/fd').\n\n 2. for each childFD::\n\n - if fdmap[childFD] == childFD, the descriptor is already in\n place. Make sure the CLOEXEC flag is not set, then delete\n the entry from fdmap.\n\n - if childFD is in fdmap.values(), then the target descriptor\n is busy. Use os.dup() to move it elsewhere, update all\n fdmap[childFD] items that point to it, then close the\n original. Then fall through to the next case.\n\n - now fdmap[childFD] is not in fdmap.values(), and is free.\n Use os.dup2() to move it to the right place, then close the\n original.\n \"\"\"\n debug = self.debug_child\n if debug:\n errfd = sys.stderr\n errfd.write(\"starting _setupChild\\n\")\n\n destList = fdmap.values()\n for fd in _listOpenFDs():\n if fd in destList:\n continue\n if debug and fd == errfd.fileno():\n continue\n try:\n os.close(fd)\n except BaseException:\n pass\n\n # at this point, the only fds still open are the ones that need to\n # be moved to their appropriate positions in the child (the targets\n # of fdmap, i.e. fdmap.values() )\n\n if debug:\n print(\"fdmap\", fdmap, file=errfd)\n for child in sorted(fdmap.keys()):\n target = fdmap[child]\n if target == child:\n # fd is already in place\n if debug:\n print(\"%d already in place\" % target, file=errfd)\n fdesc._unsetCloseOnExec(child)\n else:\n if child in fdmap.values():\n # we can't replace child-fd yet, as some other mapping\n # still needs the fd it wants to target. We must preserve\n # that old fd by duping it to a new home.\n newtarget = os.dup(child) # give it a safe home\n if debug:\n print(\"os.dup(%d) -> %d\" % (child, newtarget), file=errfd)\n os.close(child) # close the original\n for c, p in list(fdmap.items()):\n if p == child:\n fdmap[c] = newtarget # update all pointers\n # now it should be available\n if debug:\n print(\"os.dup2(%d,%d)\" % (target, child), file=errfd)\n os.dup2(target, child)\n\n # At this point, the child has everything it needs. We want to close\n # everything that isn't going to be used by the child, i.e.\n # everything not in fdmap.keys(). The only remaining fds open are\n # those in fdmap.values().\n\n # Any given fd may appear in fdmap.values() multiple times, so we\n # need to remove duplicates first.\n\n old = []\n for fd in fdmap.values():\n if fd not in old:\n if fd not in fdmap.keys():\n old.append(fd)\n if debug:\n print(\"old\", old, file=errfd)\n for fd in old:\n os.close(fd)\n\n self._resetSignalDisposition()\n\n def writeToChild(self, childFD, data):\n self.pipes[childFD].write(data)\n\n def closeChildFD(self, childFD):\n # for writer pipes, loseConnection tries to write the remaining data\n # out to the pipe before closing it\n # if childFD is not in the list of pipes, assume that it is already\n # closed\n if childFD in self.pipes:\n self.pipes[childFD].loseConnection()\n\n def pauseProducing(self):\n for p in self.pipes.values():\n if isinstance(p, ProcessReader):\n p.stopReading()\n\n def resumeProducing(self):\n for p in self.pipes.values():\n if isinstance(p, ProcessReader):\n p.startReading()\n\n # compatibility\n def closeStdin(self):\n \"\"\"\n Call this to close standard input on this process.\n \"\"\"\n self.closeChildFD(0)\n\n def closeStdout(self):\n self.closeChildFD(1)\n\n def closeStderr(self):\n self.closeChildFD(2)\n\n def loseConnection(self):\n self.closeStdin()\n self.closeStderr()\n self.closeStdout()\n\n def write(self, data):\n \"\"\"\n Call this to write to standard input on this process.\n\n NOTE: This will silently lose data if there is no standard input.\n \"\"\"\n if 0 in self.pipes:\n self.pipes[0].write(data)\n\n def registerProducer(self, producer, streaming):\n \"\"\"\n Call this to register producer for standard input.\n\n If there is no standard input producer.stopProducing() will\n be called immediately.\n \"\"\"\n if 0 in self.pipes:\n self.pipes[0].registerProducer(producer, streaming)\n else:\n producer.stopProducing()\n\n def unregisterProducer(self):\n \"\"\"\n Call this to unregister producer for standard input.\"\"\"\n if 0 in self.pipes:\n self.pipes[0].unregisterProducer()\n\n def writeSequence(self, seq):\n \"\"\"\n Call this to write to standard input on this process.\n\n NOTE: This will silently lose data if there is no standard input.\n \"\"\"\n if 0 in self.pipes:\n self.pipes[0].writeSequence(seq)\n\n def childDataReceived(self, name, data):\n self.proto.childDataReceived(name, data)\n\n def childConnectionLost(self, childFD, reason):\n # this is called when one of the helpers (ProcessReader or\n # ProcessWriter) notices their pipe has been closed\n os.close(self.pipes[childFD].fileno())\n del self.pipes[childFD]\n try:\n self.proto.childConnectionLost(childFD)\n except BaseException:\n log.err()\n self.maybeCallProcessEnded()\n\n def maybeCallProcessEnded(self):\n # we don't call ProcessProtocol.processEnded until:\n # the child has terminated, AND\n # all writers have indicated an error status, AND\n # all readers have indicated EOF\n # This insures that we've gathered all output from the process.\n if self.pipes:\n return\n if not self.lostProcess:\n self.reapProcess()\n return\n _BaseProcess.maybeCallProcessEnded(self)\n\n def getHost(self):\n # ITransport.getHost\n raise NotImplementedError()\n\n def getPeer(self):\n # ITransport.getPeer\n raise NotImplementedError()\n\n\n@implementer(IProcessTransport)\nclass PTYProcess(abstract.FileDescriptor, _BaseProcess):\n \"\"\"\n An operating-system Process that uses PTY support.\n \"\"\"\n\n status = -1\n pid = None\n\n def __init__(\n self,\n reactor,\n executable,\n args,\n environment,\n path,\n proto,\n uid=None,\n gid=None,\n usePTY=None,\n ):\n \"\"\"\n Spawn an operating-system process.\n\n This is where the hard work of disconnecting all currently open\n files / forking / executing the new process happens. (This is\n executed automatically when a Process is instantiated.)\n\n This will also run the subprocess as a given user ID and group ID, if\n specified. (Implementation Note: this doesn't support all the arcane\n nuances of setXXuid on UNIX: it will assume that either your effective\n or real UID is 0.)\n \"\"\"\n if pty is None and not isinstance(usePTY, (tuple, list)):\n # no pty module and we didn't get a pty to use\n raise NotImplementedError(\n \"cannot use PTYProcess on platforms without the pty module.\"\n )\n abstract.FileDescriptor.__init__(self, reactor)\n _BaseProcess.__init__(self, proto)\n\n if isinstance(usePTY, (tuple, list)):\n masterfd, slavefd, _ = usePTY\n else:\n masterfd, slavefd = pty.openpty()\n\n try:\n self._fork(\n path,\n uid,\n gid,\n executable,\n args,\n environment,\n masterfd=masterfd,\n slavefd=slavefd,\n )\n except BaseException:\n if not isinstance(usePTY, (tuple, list)):\n os.close(masterfd)\n os.close(slavefd)\n raise\n\n # we are now in parent process:\n os.close(slavefd)\n fdesc.setNonBlocking(masterfd)\n self.fd = masterfd\n self.startReading()\n self.connected = 1\n self.status = -1\n try:\n self.proto.makeConnection(self)\n except BaseException:\n log.err()\n registerReapProcessHandler(self.pid, self)\n\n def _setupChild(self, masterfd, slavefd):\n \"\"\"\n Set up child process after C{fork()} but before C{exec()}.\n\n This involves:\n\n - closing C{masterfd}, since it is not used in the subprocess\n\n - creating a new session with C{os.setsid}\n\n - changing the controlling terminal of the process (and the new\n session) to point at C{slavefd}\n\n - duplicating C{slavefd} to standard input, output, and error\n\n - closing all other open file descriptors (according to\n L{_listOpenFDs})\n\n - re-setting all signal handlers to C{SIG_DFL}\n\n @param masterfd: The master end of a PTY file descriptors opened with\n C{openpty}.\n @type masterfd: L{int}\n\n @param slavefd: The slave end of a PTY opened with C{openpty}.\n @type slavefd: L{int}\n \"\"\"\n os.close(masterfd)\n os.setsid()\n fcntl.ioctl(slavefd, termios.TIOCSCTTY, \"\")\n\n for fd in range(3):\n if fd != slavefd:\n os.close(fd)\n\n os.dup2(slavefd, 0) # stdin\n os.dup2(slavefd, 1) # stdout\n os.dup2(slavefd, 2) # stderr\n\n for fd in _listOpenFDs():\n if fd > 2:\n try:\n os.close(fd)\n except BaseException:\n pass\n\n self._resetSignalDisposition()\n\n def closeStdin(self):\n # PTYs do not have stdin/stdout/stderr. They only have in and out, just\n # like sockets. You cannot close one without closing off the entire PTY\n pass\n\n def closeStdout(self):\n pass\n\n def closeStderr(self):\n pass\n\n def doRead(self):\n \"\"\"\n Called when my standard output stream is ready for reading.\n \"\"\"\n return fdesc.readFromFD(\n self.fd, lambda data: self.proto.childDataReceived(1, data)\n )\n\n def fileno(self):\n \"\"\"\n This returns the file number of standard output on this process.\n \"\"\"\n return self.fd\n\n def maybeCallProcessEnded(self):\n # two things must happen before we call the ProcessProtocol's\n # processEnded method. 1: the child process must die and be reaped\n # (which calls our own processEnded method). 2: the child must close\n # their stdin/stdout/stderr fds, causing the pty to close, causing\n # our connectionLost method to be called. #2 can also be triggered\n # by calling .loseConnection().\n if self.lostProcess == 2:\n _BaseProcess.maybeCallProcessEnded(self)\n\n def connectionLost(self, reason):\n \"\"\"\n I call this to clean up when one or all of my connections has died.\n \"\"\"\n abstract.FileDescriptor.connectionLost(self, reason)\n os.close(self.fd)\n self.lostProcess += 1\n self.maybeCallProcessEnded()\n\n def writeSomeData(self, data):\n \"\"\"\n Write some data to the open process.\n \"\"\"\n return fdesc.writeToFD(self.fd, data)\n\n def closeChildFD(self, descriptor):\n # IProcessTransport\n raise NotImplementedError()\n\n def writeToChild(self, childFD, data):\n # IProcessTransport\n raise NotImplementedError()\n", "path": "src/twisted/internet/process.py" } ]
[ { "content": "# -*- test-case-name: twisted.test.test_process -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nUNIX Process management.\n\nDo NOT use this module directly - use reactor.spawnProcess() instead.\n\nMaintainer: Itamar Shtull-Trauring\n\"\"\"\nfrom __future__ import annotations\n\nimport errno\nimport gc\nimport io\nimport os\nimport signal\nimport stat\nimport sys\nimport traceback\nfrom collections import defaultdict\nfrom typing import TYPE_CHECKING, Dict, List, Optional, Tuple\n\n_PS_CLOSE: int\n_PS_DUP2: int\n\nif not TYPE_CHECKING:\n try:\n from os import POSIX_SPAWN_CLOSE as _PS_CLOSE, POSIX_SPAWN_DUP2 as _PS_DUP2\n except ImportError:\n pass\n\nfrom zope.interface import implementer\n\nfrom twisted.internet import abstract, error, fdesc\nfrom twisted.internet._baseprocess import BaseProcess\nfrom twisted.internet.interfaces import IProcessTransport\nfrom twisted.internet.main import CONNECTION_DONE, CONNECTION_LOST\nfrom twisted.python import failure, log\nfrom twisted.python.runtime import platform\nfrom twisted.python.util import switchUID\n\nif platform.isWindows():\n raise ImportError(\n \"twisted.internet.process does not work on Windows. \"\n \"Use the reactor.spawnProcess() API instead.\"\n )\n\ntry:\n import pty as _pty\nexcept ImportError:\n pty = None\nelse:\n pty = _pty\n\ntry:\n import fcntl as _fcntl\n import termios\nexcept ImportError:\n fcntl = None\nelse:\n fcntl = _fcntl\n\n# Some people were importing this, which is incorrect, just keeping it\n# here for backwards compatibility:\nProcessExitedAlready = error.ProcessExitedAlready\n\nreapProcessHandlers: Dict[int, _BaseProcess] = {}\n\n\ndef reapAllProcesses() -> None:\n \"\"\"\n Reap all registered processes.\n \"\"\"\n # Coerce this to a list, as reaping the process changes the dictionary and\n # causes a \"size changed during iteration\" exception\n for process in list(reapProcessHandlers.values()):\n process.reapProcess()\n\n\ndef registerReapProcessHandler(pid, process):\n \"\"\"\n Register a process handler for the given pid, in case L{reapAllProcesses}\n is called.\n\n @param pid: the pid of the process.\n @param process: a process handler.\n \"\"\"\n if pid in reapProcessHandlers:\n raise RuntimeError(\"Try to register an already registered process.\")\n try:\n auxPID, status = os.waitpid(pid, os.WNOHANG)\n except BaseException:\n log.msg(f\"Failed to reap {pid}:\")\n log.err()\n\n if pid is None:\n return\n\n auxPID = None\n if auxPID:\n process.processEnded(status)\n else:\n # if auxPID is 0, there are children but none have exited\n reapProcessHandlers[pid] = process\n\n\ndef unregisterReapProcessHandler(pid, process):\n \"\"\"\n Unregister a process handler previously registered with\n L{registerReapProcessHandler}.\n \"\"\"\n if not (pid in reapProcessHandlers and reapProcessHandlers[pid] == process):\n raise RuntimeError(\"Try to unregister a process not registered.\")\n del reapProcessHandlers[pid]\n\n\nclass ProcessWriter(abstract.FileDescriptor):\n \"\"\"\n (Internal) Helper class to write into a Process's input pipe.\n\n I am a helper which describes a selectable asynchronous writer to a\n process's input pipe, including stdin.\n\n @ivar enableReadHack: A flag which determines how readability on this\n write descriptor will be handled. If C{True}, then readability may\n indicate the reader for this write descriptor has been closed (ie,\n the connection has been lost). If C{False}, then readability events\n are ignored.\n \"\"\"\n\n connected = 1\n ic = 0\n enableReadHack = False\n\n def __init__(self, reactor, proc, name, fileno, forceReadHack=False):\n \"\"\"\n Initialize, specifying a Process instance to connect to.\n \"\"\"\n abstract.FileDescriptor.__init__(self, reactor)\n fdesc.setNonBlocking(fileno)\n self.proc = proc\n self.name = name\n self.fd = fileno\n\n if not stat.S_ISFIFO(os.fstat(self.fileno()).st_mode):\n # If the fd is not a pipe, then the read hack is never\n # applicable. This case arises when ProcessWriter is used by\n # StandardIO and stdout is redirected to a normal file.\n self.enableReadHack = False\n elif forceReadHack:\n self.enableReadHack = True\n else:\n # Detect if this fd is actually a write-only fd. If it's\n # valid to read, don't try to detect closing via read.\n # This really only means that we cannot detect a TTY's write\n # pipe being closed.\n try:\n os.read(self.fileno(), 0)\n except OSError:\n # It's a write-only pipe end, enable hack\n self.enableReadHack = True\n\n if self.enableReadHack:\n self.startReading()\n\n def fileno(self):\n \"\"\"\n Return the fileno() of my process's stdin.\n \"\"\"\n return self.fd\n\n def writeSomeData(self, data):\n \"\"\"\n Write some data to the open process.\n \"\"\"\n rv = fdesc.writeToFD(self.fd, data)\n if rv == len(data) and self.enableReadHack:\n # If the send buffer is now empty and it is necessary to monitor\n # this descriptor for readability to detect close, try detecting\n # readability now.\n self.startReading()\n return rv\n\n def write(self, data):\n self.stopReading()\n abstract.FileDescriptor.write(self, data)\n\n def doRead(self):\n \"\"\"\n The only way a write pipe can become \"readable\" is at EOF, because the\n child has closed it, and we're using a reactor which doesn't\n distinguish between readable and closed (such as the select reactor).\n\n Except that's not true on linux < 2.6.11. It has the following\n characteristics: write pipe is completely empty => POLLOUT (writable in\n select), write pipe is not completely empty => POLLIN (readable in\n select), write pipe's reader closed => POLLIN|POLLERR (readable and\n writable in select)\n\n That's what this funky code is for. If linux was not broken, this\n function could be simply \"return CONNECTION_LOST\".\n \"\"\"\n if self.enableReadHack:\n return CONNECTION_LOST\n else:\n self.stopReading()\n\n def connectionLost(self, reason):\n \"\"\"\n See abstract.FileDescriptor.connectionLost.\n \"\"\"\n # At least on macOS 10.4, exiting while stdout is non-blocking can\n # result in data loss. For some reason putting the file descriptor\n # back into blocking mode seems to resolve this issue.\n fdesc.setBlocking(self.fd)\n\n abstract.FileDescriptor.connectionLost(self, reason)\n self.proc.childConnectionLost(self.name, reason)\n\n\nclass ProcessReader(abstract.FileDescriptor):\n \"\"\"\n ProcessReader\n\n I am a selectable representation of a process's output pipe, such as\n stdout and stderr.\n \"\"\"\n\n connected = True\n\n def __init__(self, reactor, proc, name, fileno):\n \"\"\"\n Initialize, specifying a process to connect to.\n \"\"\"\n abstract.FileDescriptor.__init__(self, reactor)\n fdesc.setNonBlocking(fileno)\n self.proc = proc\n self.name = name\n self.fd = fileno\n self.startReading()\n\n def fileno(self):\n \"\"\"\n Return the fileno() of my process's stderr.\n \"\"\"\n return self.fd\n\n def writeSomeData(self, data):\n # the only time this is actually called is after .loseConnection Any\n # actual write attempt would fail, so we must avoid that. This hack\n # allows us to use .loseConnection on both readers and writers.\n assert data == b\"\"\n return CONNECTION_LOST\n\n def doRead(self):\n \"\"\"\n This is called when the pipe becomes readable.\n \"\"\"\n return fdesc.readFromFD(self.fd, self.dataReceived)\n\n def dataReceived(self, data):\n self.proc.childDataReceived(self.name, data)\n\n def loseConnection(self):\n if self.connected and not self.disconnecting:\n self.disconnecting = 1\n self.stopReading()\n self.reactor.callLater(\n 0, self.connectionLost, failure.Failure(CONNECTION_DONE)\n )\n\n def connectionLost(self, reason):\n \"\"\"\n Close my end of the pipe, signal the Process (which signals the\n ProcessProtocol).\n \"\"\"\n abstract.FileDescriptor.connectionLost(self, reason)\n self.proc.childConnectionLost(self.name, reason)\n\n\nclass _BaseProcess(BaseProcess):\n \"\"\"\n Base class for Process and PTYProcess.\n \"\"\"\n\n status: Optional[int] = None\n pid = None\n\n def reapProcess(self):\n \"\"\"\n Try to reap a process (without blocking) via waitpid.\n\n This is called when sigchild is caught or a Process object loses its\n \"connection\" (stdout is closed) This ought to result in reaping all\n zombie processes, since it will be called twice as often as it needs\n to be.\n\n (Unfortunately, this is a slightly experimental approach, since\n UNIX has no way to be really sure that your process is going to\n go away w/o blocking. I don't want to block.)\n \"\"\"\n try:\n try:\n pid, status = os.waitpid(self.pid, os.WNOHANG)\n except OSError as e:\n if e.errno == errno.ECHILD:\n # no child process\n pid = None\n else:\n raise\n except BaseException:\n log.msg(f\"Failed to reap {self.pid}:\")\n log.err()\n pid = None\n if pid:\n unregisterReapProcessHandler(pid, self)\n self.processEnded(status)\n\n def _getReason(self, status):\n exitCode = sig = None\n if os.WIFEXITED(status):\n exitCode = os.WEXITSTATUS(status)\n else:\n sig = os.WTERMSIG(status)\n if exitCode or sig:\n return error.ProcessTerminated(exitCode, sig, status)\n return error.ProcessDone(status)\n\n def signalProcess(self, signalID):\n \"\"\"\n Send the given signal C{signalID} to the process. It'll translate a\n few signals ('HUP', 'STOP', 'INT', 'KILL', 'TERM') from a string\n representation to its int value, otherwise it'll pass directly the\n value provided\n\n @type signalID: C{str} or C{int}\n \"\"\"\n if signalID in (\"HUP\", \"STOP\", \"INT\", \"KILL\", \"TERM\"):\n signalID = getattr(signal, f\"SIG{signalID}\")\n if self.pid is None:\n raise ProcessExitedAlready()\n try:\n os.kill(self.pid, signalID)\n except OSError as e:\n if e.errno == errno.ESRCH:\n raise ProcessExitedAlready()\n else:\n raise\n\n def _resetSignalDisposition(self):\n # The Python interpreter ignores some signals, and our child\n # process will inherit that behaviour. To have a child process\n # that responds to signals normally, we need to reset our\n # child process's signal handling (just) after we fork and\n # before we execvpe.\n for signalnum in range(1, signal.NSIG):\n if signal.getsignal(signalnum) == signal.SIG_IGN:\n # Reset signal handling to the default\n signal.signal(signalnum, signal.SIG_DFL)\n\n def _trySpawnInsteadOfFork(\n self, path, uid, gid, executable, args, environment, kwargs\n ):\n \"\"\"\n Try to use posix_spawnp() instead of fork(), if possible.\n\n This implementation returns False because the non-PTY subclass\n implements the actual logic; we can't yet use this for pty processes.\n\n @return: a boolean indicating whether posix_spawnp() was used or not.\n \"\"\"\n return False\n\n def _fork(self, path, uid, gid, executable, args, environment, **kwargs):\n \"\"\"\n Fork and then exec sub-process.\n\n @param path: the path where to run the new process.\n @type path: L{bytes} or L{unicode}\n\n @param uid: if defined, the uid used to run the new process.\n @type uid: L{int}\n\n @param gid: if defined, the gid used to run the new process.\n @type gid: L{int}\n\n @param executable: the executable to run in a new process.\n @type executable: L{str}\n\n @param args: arguments used to create the new process.\n @type args: L{list}.\n\n @param environment: environment used for the new process.\n @type environment: L{dict}.\n\n @param kwargs: keyword arguments to L{_setupChild} method.\n \"\"\"\n\n if self._trySpawnInsteadOfFork(\n path, uid, gid, executable, args, environment, kwargs\n ):\n return\n\n collectorEnabled = gc.isenabled()\n gc.disable()\n try:\n self.pid = os.fork()\n except BaseException:\n # Still in the parent process\n if collectorEnabled:\n gc.enable()\n raise\n else:\n if self.pid == 0:\n # A return value of 0 from fork() indicates that we are now\n # executing in the child process.\n\n # Do not put *ANY* code outside the try block. The child\n # process must either exec or _exit. If it gets outside this\n # block (due to an exception that is not handled here, but\n # which might be handled higher up), there will be two copies\n # of the parent running in parallel, doing all kinds of damage.\n\n # After each change to this code, review it to make sure there\n # are no exit paths.\n\n try:\n # Stop debugging. If I am, I don't care anymore.\n sys.settrace(None)\n self._setupChild(**kwargs)\n self._execChild(path, uid, gid, executable, args, environment)\n except BaseException:\n # If there are errors, try to write something descriptive\n # to stderr before exiting.\n\n # The parent's stderr isn't *necessarily* fd 2 anymore, or\n # even still available; however, even libc assumes that\n # write(2, err) is a useful thing to attempt.\n\n try:\n # On Python 3, print_exc takes a text stream, but\n # on Python 2 it still takes a byte stream. So on\n # Python 3 we will wrap up the byte stream returned\n # by os.fdopen using TextIOWrapper.\n\n # We hard-code UTF-8 as the encoding here, rather\n # than looking at something like\n # getfilesystemencoding() or sys.stderr.encoding,\n # because we want an encoding that will be able to\n # encode the full range of code points. We are\n # (most likely) talking to the parent process on\n # the other end of this pipe and not the filesystem\n # or the original sys.stderr, so there's no point\n # in trying to match the encoding of one of those\n # objects.\n\n stderr = io.TextIOWrapper(os.fdopen(2, \"wb\"), encoding=\"utf-8\")\n msg = (\"Upon execvpe {} {} in environment id {}\" \"\\n:\").format(\n executable, str(args), id(environment)\n )\n stderr.write(msg)\n traceback.print_exc(file=stderr)\n stderr.flush()\n\n for fd in range(3):\n os.close(fd)\n except BaseException:\n # Handle all errors during the error-reporting process\n # silently to ensure that the child terminates.\n pass\n\n # See comment above about making sure that we reach this line\n # of code.\n os._exit(1)\n\n # we are now in parent process\n if collectorEnabled:\n gc.enable()\n self.status = -1 # this records the exit status of the child\n\n def _setupChild(self, *args, **kwargs):\n \"\"\"\n Setup the child process. Override in subclasses.\n \"\"\"\n raise NotImplementedError()\n\n def _execChild(self, path, uid, gid, executable, args, environment):\n \"\"\"\n The exec() which is done in the forked child.\n \"\"\"\n if path:\n os.chdir(path)\n if uid is not None or gid is not None:\n if uid is None:\n uid = os.geteuid()\n if gid is None:\n gid = os.getegid()\n # set the UID before I actually exec the process\n os.setuid(0)\n os.setgid(0)\n switchUID(uid, gid)\n os.execvpe(executable, args, environment)\n\n def __repr__(self) -> str:\n \"\"\"\n String representation of a process.\n \"\"\"\n return \"<{} pid={} status={}>\".format(\n self.__class__.__name__,\n self.pid,\n self.status,\n )\n\n\nclass _FDDetector:\n \"\"\"\n This class contains the logic necessary to decide which of the available\n system techniques should be used to detect the open file descriptors for\n the current process. The chosen technique gets monkey-patched into the\n _listOpenFDs method of this class so that the detection only needs to occur\n once.\n\n @ivar listdir: The implementation of listdir to use. This gets overwritten\n by the test cases.\n @ivar getpid: The implementation of getpid to use, returns the PID of the\n running process.\n @ivar openfile: The implementation of open() to use, by default the Python\n builtin.\n \"\"\"\n\n # So that we can unit test this\n listdir = os.listdir\n getpid = os.getpid\n openfile = open\n\n def __init__(self):\n self._implementations = [\n self._procFDImplementation,\n self._devFDImplementation,\n self._fallbackFDImplementation,\n ]\n\n def _listOpenFDs(self):\n \"\"\"\n Return an iterable of file descriptors which I{may} be open in this\n process.\n\n This will try to return the fewest possible descriptors without missing\n any.\n \"\"\"\n self._listOpenFDs = self._getImplementation()\n return self._listOpenFDs()\n\n def _getImplementation(self):\n \"\"\"\n Pick a method which gives correct results for C{_listOpenFDs} in this\n runtime environment.\n\n This involves a lot of very platform-specific checks, some of which may\n be relatively expensive. Therefore the returned method should be saved\n and re-used, rather than always calling this method to determine what it\n is.\n\n See the implementation for the details of how a method is selected.\n \"\"\"\n for impl in self._implementations:\n try:\n before = impl()\n except BaseException:\n continue\n with self.openfile(\"/dev/null\", \"r\"):\n after = impl()\n if before != after:\n return impl\n # If no implementation can detect the newly opened file above, then just\n # return the last one. The last one should therefore always be one\n # which makes a simple static guess which includes all possible open\n # file descriptors, but perhaps also many other values which do not\n # correspond to file descriptors. For example, the scheme implemented\n # by _fallbackFDImplementation is suitable to be the last entry.\n return impl\n\n def _devFDImplementation(self):\n \"\"\"\n Simple implementation for systems where /dev/fd actually works.\n See: http://www.freebsd.org/cgi/man.cgi?fdescfs\n \"\"\"\n dname = \"/dev/fd\"\n result = [int(fd) for fd in self.listdir(dname)]\n return result\n\n def _procFDImplementation(self):\n \"\"\"\n Simple implementation for systems where /proc/pid/fd exists (we assume\n it works).\n \"\"\"\n dname = \"/proc/%d/fd\" % (self.getpid(),)\n return [int(fd) for fd in self.listdir(dname)]\n\n def _fallbackFDImplementation(self):\n \"\"\"\n Fallback implementation where either the resource module can inform us\n about the upper bound of how many FDs to expect, or where we just guess\n a constant maximum if there is no resource module.\n\n All possible file descriptors from 0 to that upper bound are returned\n with no attempt to exclude invalid file descriptor values.\n \"\"\"\n try:\n import resource\n except ImportError:\n maxfds = 1024\n else:\n # OS-X reports 9223372036854775808. That's a lot of fds to close.\n # OS-X should get the /dev/fd implementation instead, so mostly\n # this check probably isn't necessary.\n maxfds = min(1024, resource.getrlimit(resource.RLIMIT_NOFILE)[1])\n return range(maxfds)\n\n\ndetector = _FDDetector()\n\n\ndef _listOpenFDs():\n \"\"\"\n Use the global detector object to figure out which FD implementation to\n use.\n \"\"\"\n return detector._listOpenFDs()\n\n\ndef _getFileActions(\n fdState: List[Tuple[int, bool]],\n childToParentFD: Dict[int, int],\n doClose: int,\n doDup2: int,\n) -> List[Tuple[int, ...]]:\n \"\"\"\n Get the C{file_actions} parameter for C{posix_spawn} based on the\n parameters describing the current process state.\n\n @param fdState: A list of 2-tuples of (file descriptor, close-on-exec\n flag).\n\n @param doClose: the integer to use for the 'close' instruction\n\n @param doDup2: the integer to use for the 'dup2' instruction\n \"\"\"\n fdStateDict = dict(fdState)\n parentToChildren: Dict[int, List[int]] = defaultdict(list)\n for inChild, inParent in childToParentFD.items():\n parentToChildren[inParent].append(inChild)\n allocated = set(fdStateDict)\n allocated |= set(childToParentFD.values())\n allocated |= set(childToParentFD.keys())\n nextFD = 0\n\n def allocateFD() -> int:\n nonlocal nextFD\n while nextFD in allocated:\n nextFD += 1\n allocated.add(nextFD)\n return nextFD\n\n result: List[Tuple[int, ...]] = []\n relocations = {}\n for inChild, inParent in sorted(childToParentFD.items()):\n # The parent FD will later be reused by a child FD.\n parentToChildren[inParent].remove(inChild)\n if parentToChildren[inChild]:\n new = relocations[inChild] = allocateFD()\n result.append((doDup2, inChild, new))\n if inParent in relocations:\n result.append((doDup2, relocations[inParent], inChild))\n if not parentToChildren[inParent]:\n result.append((doClose, relocations[inParent]))\n else:\n if inParent == inChild:\n if fdStateDict[inParent]:\n # If the child is attempting to inherit the parent as-is,\n # and it is not close-on-exec, the job is already done; we\n # can bail. Otherwise...\n\n tempFD = allocateFD()\n # The child wants to inherit the parent as-is, so the\n # handle must be heritable.. dup2 makes the new descriptor\n # inheritable by default, *but*, per the man page, “if\n # fildes and fildes2 are equal, then dup2() just returns\n # fildes2; no other changes are made to the existing\n # descriptor”, so we need to dup it somewhere else and dup\n # it back before closing the temporary place we put it.\n result.extend(\n [\n (doDup2, inParent, tempFD),\n (doDup2, tempFD, inChild),\n (doClose, tempFD),\n ]\n )\n else:\n result.append((doDup2, inParent, inChild))\n\n for eachFD, uninheritable in fdStateDict.items():\n if eachFD not in childToParentFD and not uninheritable:\n result.append((doClose, eachFD))\n\n return result\n\n\n@implementer(IProcessTransport)\nclass Process(_BaseProcess):\n \"\"\"\n An operating-system Process.\n\n This represents an operating-system process with arbitrary input/output\n pipes connected to it. Those pipes may represent standard input, standard\n output, and standard error, or any other file descriptor.\n\n On UNIX, this is implemented using posix_spawnp() when possible (or fork(),\n exec(), pipe() and fcntl() when not). These calls may not exist elsewhere\n so this code is not cross-platform. (also, windows can only select on\n sockets...)\n \"\"\"\n\n debug = False\n debug_child = False\n\n status = -1\n pid = None\n\n processWriterFactory = ProcessWriter\n processReaderFactory = ProcessReader\n\n def __init__(\n self,\n reactor,\n executable,\n args,\n environment,\n path,\n proto,\n uid=None,\n gid=None,\n childFDs=None,\n ):\n \"\"\"\n Spawn an operating-system process.\n\n This is where the hard work of disconnecting all currently open\n files / forking / executing the new process happens. (This is\n executed automatically when a Process is instantiated.)\n\n This will also run the subprocess as a given user ID and group ID, if\n specified. (Implementation Note: this doesn't support all the arcane\n nuances of setXXuid on UNIX: it will assume that either your effective\n or real UID is 0.)\n \"\"\"\n self._reactor = reactor\n if not proto:\n assert \"r\" not in childFDs.values()\n assert \"w\" not in childFDs.values()\n _BaseProcess.__init__(self, proto)\n\n self.pipes = {}\n # keys are childFDs, we can sense them closing\n # values are ProcessReader/ProcessWriters\n\n helpers = {}\n # keys are childFDs\n # values are parentFDs\n\n if childFDs is None:\n childFDs = {\n 0: \"w\", # we write to the child's stdin\n 1: \"r\", # we read from their stdout\n 2: \"r\", # and we read from their stderr\n }\n\n debug = self.debug\n if debug:\n print(\"childFDs\", childFDs)\n\n _openedPipes = []\n\n def pipe():\n r, w = os.pipe()\n _openedPipes.extend([r, w])\n return r, w\n\n # fdmap.keys() are filenos of pipes that are used by the child.\n fdmap = {} # maps childFD to parentFD\n try:\n for childFD, target in childFDs.items():\n if debug:\n print(\"[%d]\" % childFD, target)\n if target == \"r\":\n # we need a pipe that the parent can read from\n readFD, writeFD = pipe()\n if debug:\n print(\"readFD=%d, writeFD=%d\" % (readFD, writeFD))\n fdmap[childFD] = writeFD # child writes to this\n helpers[childFD] = readFD # parent reads from this\n elif target == \"w\":\n # we need a pipe that the parent can write to\n readFD, writeFD = pipe()\n if debug:\n print(\"readFD=%d, writeFD=%d\" % (readFD, writeFD))\n fdmap[childFD] = readFD # child reads from this\n helpers[childFD] = writeFD # parent writes to this\n else:\n assert type(target) == int, f\"{target!r} should be an int\"\n fdmap[childFD] = target # parent ignores this\n if debug:\n print(\"fdmap\", fdmap)\n if debug:\n print(\"helpers\", helpers)\n # the child only cares about fdmap.values()\n\n self._fork(path, uid, gid, executable, args, environment, fdmap=fdmap)\n except BaseException:\n for pipe in _openedPipes:\n os.close(pipe)\n raise\n\n # we are the parent process:\n self.proto = proto\n\n # arrange for the parent-side pipes to be read and written\n for childFD, parentFD in helpers.items():\n os.close(fdmap[childFD])\n if childFDs[childFD] == \"r\":\n reader = self.processReaderFactory(reactor, self, childFD, parentFD)\n self.pipes[childFD] = reader\n\n if childFDs[childFD] == \"w\":\n writer = self.processWriterFactory(\n reactor, self, childFD, parentFD, forceReadHack=True\n )\n self.pipes[childFD] = writer\n\n try:\n # the 'transport' is used for some compatibility methods\n if self.proto is not None:\n self.proto.makeConnection(self)\n except BaseException:\n log.err()\n\n # The reactor might not be running yet. This might call back into\n # processEnded synchronously, triggering an application-visible\n # callback. That's probably not ideal. The replacement API for\n # spawnProcess should improve upon this situation.\n registerReapProcessHandler(self.pid, self)\n\n def _trySpawnInsteadOfFork(\n self, path, uid, gid, executable, args, environment, kwargs\n ):\n \"\"\"\n Try to use posix_spawnp() instead of fork(), if possible.\n\n @return: a boolean indicating whether posix_spawnp() was used or not.\n \"\"\"\n if (\n # no support for setuid/setgid anywhere but in QNX's\n # posix_spawnattr_setcred\n (uid is not None)\n or (gid is not None)\n or ((path is not None) and (os.path.abspath(path) != os.path.abspath(\".\")))\n or getattr(self._reactor, \"_neverUseSpawn\", False)\n ):\n return False\n fdmap = kwargs.get(\"fdmap\")\n fdState = []\n for eachFD in _listOpenFDs():\n try:\n isCloseOnExec = fcntl.fcntl(eachFD, fcntl.F_GETFD, fcntl.FD_CLOEXEC)\n except OSError:\n pass\n else:\n fdState.append((eachFD, isCloseOnExec))\n if environment is None:\n environment = os.environ\n\n setSigDef = [\n everySignal\n for everySignal in range(1, signal.NSIG)\n if signal.getsignal(everySignal) == signal.SIG_IGN\n ]\n\n self.pid = os.posix_spawnp(\n executable,\n args,\n environment,\n file_actions=_getFileActions(\n fdState, fdmap, doClose=_PS_CLOSE, doDup2=_PS_DUP2\n ),\n setsigdef=setSigDef,\n )\n self.status = -1\n return True\n\n if getattr(os, \"posix_spawnp\", None) is None:\n # If there's no posix_spawn implemented, let the superclass handle it\n del _trySpawnInsteadOfFork\n\n def _setupChild(self, fdmap):\n \"\"\"\n fdmap[childFD] = parentFD\n\n The child wants to end up with 'childFD' attached to what used to be\n the parent's parentFD. As an example, a bash command run like\n 'command 2>&1' would correspond to an fdmap of {0:0, 1:1, 2:1}.\n 'command >foo.txt' would be {0:0, 1:os.open('foo.txt'), 2:2}.\n\n This is accomplished in two steps::\n\n 1. close all file descriptors that aren't values of fdmap. This\n means 0 .. maxfds (or just the open fds within that range, if\n the platform supports '/proc/<pid>/fd').\n\n 2. for each childFD::\n\n - if fdmap[childFD] == childFD, the descriptor is already in\n place. Make sure the CLOEXEC flag is not set, then delete\n the entry from fdmap.\n\n - if childFD is in fdmap.values(), then the target descriptor\n is busy. Use os.dup() to move it elsewhere, update all\n fdmap[childFD] items that point to it, then close the\n original. Then fall through to the next case.\n\n - now fdmap[childFD] is not in fdmap.values(), and is free.\n Use os.dup2() to move it to the right place, then close the\n original.\n \"\"\"\n debug = self.debug_child\n if debug:\n errfd = sys.stderr\n errfd.write(\"starting _setupChild\\n\")\n\n destList = fdmap.values()\n for fd in _listOpenFDs():\n if fd in destList:\n continue\n if debug and fd == errfd.fileno():\n continue\n try:\n os.close(fd)\n except BaseException:\n pass\n\n # at this point, the only fds still open are the ones that need to\n # be moved to their appropriate positions in the child (the targets\n # of fdmap, i.e. fdmap.values() )\n\n if debug:\n print(\"fdmap\", fdmap, file=errfd)\n for child in sorted(fdmap.keys()):\n target = fdmap[child]\n if target == child:\n # fd is already in place\n if debug:\n print(\"%d already in place\" % target, file=errfd)\n fdesc._unsetCloseOnExec(child)\n else:\n if child in fdmap.values():\n # we can't replace child-fd yet, as some other mapping\n # still needs the fd it wants to target. We must preserve\n # that old fd by duping it to a new home.\n newtarget = os.dup(child) # give it a safe home\n if debug:\n print(\"os.dup(%d) -> %d\" % (child, newtarget), file=errfd)\n os.close(child) # close the original\n for c, p in list(fdmap.items()):\n if p == child:\n fdmap[c] = newtarget # update all pointers\n # now it should be available\n if debug:\n print(\"os.dup2(%d,%d)\" % (target, child), file=errfd)\n os.dup2(target, child)\n\n # At this point, the child has everything it needs. We want to close\n # everything that isn't going to be used by the child, i.e.\n # everything not in fdmap.keys(). The only remaining fds open are\n # those in fdmap.values().\n\n # Any given fd may appear in fdmap.values() multiple times, so we\n # need to remove duplicates first.\n\n old = []\n for fd in fdmap.values():\n if fd not in old:\n if fd not in fdmap.keys():\n old.append(fd)\n if debug:\n print(\"old\", old, file=errfd)\n for fd in old:\n os.close(fd)\n\n self._resetSignalDisposition()\n\n def writeToChild(self, childFD, data):\n self.pipes[childFD].write(data)\n\n def closeChildFD(self, childFD):\n # for writer pipes, loseConnection tries to write the remaining data\n # out to the pipe before closing it\n # if childFD is not in the list of pipes, assume that it is already\n # closed\n if childFD in self.pipes:\n self.pipes[childFD].loseConnection()\n\n def pauseProducing(self):\n for p in self.pipes.values():\n if isinstance(p, ProcessReader):\n p.stopReading()\n\n def resumeProducing(self):\n for p in self.pipes.values():\n if isinstance(p, ProcessReader):\n p.startReading()\n\n # compatibility\n def closeStdin(self):\n \"\"\"\n Call this to close standard input on this process.\n \"\"\"\n self.closeChildFD(0)\n\n def closeStdout(self):\n self.closeChildFD(1)\n\n def closeStderr(self):\n self.closeChildFD(2)\n\n def loseConnection(self):\n self.closeStdin()\n self.closeStderr()\n self.closeStdout()\n\n def write(self, data):\n \"\"\"\n Call this to write to standard input on this process.\n\n NOTE: This will silently lose data if there is no standard input.\n \"\"\"\n if 0 in self.pipes:\n self.pipes[0].write(data)\n\n def registerProducer(self, producer, streaming):\n \"\"\"\n Call this to register producer for standard input.\n\n If there is no standard input producer.stopProducing() will\n be called immediately.\n \"\"\"\n if 0 in self.pipes:\n self.pipes[0].registerProducer(producer, streaming)\n else:\n producer.stopProducing()\n\n def unregisterProducer(self):\n \"\"\"\n Call this to unregister producer for standard input.\"\"\"\n if 0 in self.pipes:\n self.pipes[0].unregisterProducer()\n\n def writeSequence(self, seq):\n \"\"\"\n Call this to write to standard input on this process.\n\n NOTE: This will silently lose data if there is no standard input.\n \"\"\"\n if 0 in self.pipes:\n self.pipes[0].writeSequence(seq)\n\n def childDataReceived(self, name, data):\n self.proto.childDataReceived(name, data)\n\n def childConnectionLost(self, childFD, reason):\n # this is called when one of the helpers (ProcessReader or\n # ProcessWriter) notices their pipe has been closed\n os.close(self.pipes[childFD].fileno())\n del self.pipes[childFD]\n try:\n self.proto.childConnectionLost(childFD)\n except BaseException:\n log.err()\n self.maybeCallProcessEnded()\n\n def maybeCallProcessEnded(self):\n # we don't call ProcessProtocol.processEnded until:\n # the child has terminated, AND\n # all writers have indicated an error status, AND\n # all readers have indicated EOF\n # This insures that we've gathered all output from the process.\n if self.pipes:\n return\n if not self.lostProcess:\n self.reapProcess()\n return\n _BaseProcess.maybeCallProcessEnded(self)\n\n def getHost(self):\n # ITransport.getHost\n raise NotImplementedError()\n\n def getPeer(self):\n # ITransport.getPeer\n raise NotImplementedError()\n\n\n@implementer(IProcessTransport)\nclass PTYProcess(abstract.FileDescriptor, _BaseProcess):\n \"\"\"\n An operating-system Process that uses PTY support.\n \"\"\"\n\n status = -1\n pid = None\n\n def __init__(\n self,\n reactor,\n executable,\n args,\n environment,\n path,\n proto,\n uid=None,\n gid=None,\n usePTY=None,\n ):\n \"\"\"\n Spawn an operating-system process.\n\n This is where the hard work of disconnecting all currently open\n files / forking / executing the new process happens. (This is\n executed automatically when a Process is instantiated.)\n\n This will also run the subprocess as a given user ID and group ID, if\n specified. (Implementation Note: this doesn't support all the arcane\n nuances of setXXuid on UNIX: it will assume that either your effective\n or real UID is 0.)\n \"\"\"\n if pty is None and not isinstance(usePTY, (tuple, list)):\n # no pty module and we didn't get a pty to use\n raise NotImplementedError(\n \"cannot use PTYProcess on platforms without the pty module.\"\n )\n abstract.FileDescriptor.__init__(self, reactor)\n _BaseProcess.__init__(self, proto)\n\n if isinstance(usePTY, (tuple, list)):\n masterfd, slavefd, _ = usePTY\n else:\n masterfd, slavefd = pty.openpty()\n\n try:\n self._fork(\n path,\n uid,\n gid,\n executable,\n args,\n environment,\n masterfd=masterfd,\n slavefd=slavefd,\n )\n except BaseException:\n if not isinstance(usePTY, (tuple, list)):\n os.close(masterfd)\n os.close(slavefd)\n raise\n\n # we are now in parent process:\n os.close(slavefd)\n fdesc.setNonBlocking(masterfd)\n self.fd = masterfd\n self.startReading()\n self.connected = 1\n self.status = -1\n try:\n self.proto.makeConnection(self)\n except BaseException:\n log.err()\n registerReapProcessHandler(self.pid, self)\n\n def _setupChild(self, masterfd, slavefd):\n \"\"\"\n Set up child process after C{fork()} but before C{exec()}.\n\n This involves:\n\n - closing C{masterfd}, since it is not used in the subprocess\n\n - creating a new session with C{os.setsid}\n\n - changing the controlling terminal of the process (and the new\n session) to point at C{slavefd}\n\n - duplicating C{slavefd} to standard input, output, and error\n\n - closing all other open file descriptors (according to\n L{_listOpenFDs})\n\n - re-setting all signal handlers to C{SIG_DFL}\n\n @param masterfd: The master end of a PTY file descriptors opened with\n C{openpty}.\n @type masterfd: L{int}\n\n @param slavefd: The slave end of a PTY opened with C{openpty}.\n @type slavefd: L{int}\n \"\"\"\n os.close(masterfd)\n os.setsid()\n fcntl.ioctl(slavefd, termios.TIOCSCTTY, \"\")\n\n for fd in range(3):\n if fd != slavefd:\n os.close(fd)\n\n os.dup2(slavefd, 0) # stdin\n os.dup2(slavefd, 1) # stdout\n os.dup2(slavefd, 2) # stderr\n\n for fd in _listOpenFDs():\n if fd > 2:\n try:\n os.close(fd)\n except BaseException:\n pass\n\n self._resetSignalDisposition()\n\n def closeStdin(self):\n # PTYs do not have stdin/stdout/stderr. They only have in and out, just\n # like sockets. You cannot close one without closing off the entire PTY\n pass\n\n def closeStdout(self):\n pass\n\n def closeStderr(self):\n pass\n\n def doRead(self):\n \"\"\"\n Called when my standard output stream is ready for reading.\n \"\"\"\n return fdesc.readFromFD(\n self.fd, lambda data: self.proto.childDataReceived(1, data)\n )\n\n def fileno(self):\n \"\"\"\n This returns the file number of standard output on this process.\n \"\"\"\n return self.fd\n\n def maybeCallProcessEnded(self):\n # two things must happen before we call the ProcessProtocol's\n # processEnded method. 1: the child process must die and be reaped\n # (which calls our own processEnded method). 2: the child must close\n # their stdin/stdout/stderr fds, causing the pty to close, causing\n # our connectionLost method to be called. #2 can also be triggered\n # by calling .loseConnection().\n if self.lostProcess == 2:\n _BaseProcess.maybeCallProcessEnded(self)\n\n def connectionLost(self, reason):\n \"\"\"\n I call this to clean up when one or all of my connections has died.\n \"\"\"\n abstract.FileDescriptor.connectionLost(self, reason)\n os.close(self.fd)\n self.lostProcess += 1\n self.maybeCallProcessEnded()\n\n def writeSomeData(self, data):\n \"\"\"\n Write some data to the open process.\n \"\"\"\n return fdesc.writeToFD(self.fd, data)\n\n def closeChildFD(self, descriptor):\n # IProcessTransport\n raise NotImplementedError()\n\n def writeToChild(self, childFD, data):\n # IProcessTransport\n raise NotImplementedError()\n", "path": "src/twisted/internet/process.py" } ]
diff --git a/src/twisted/internet/process.py b/src/twisted/internet/process.py index ef3b88d9f19..ff7684e358b 100644 --- a/src/twisted/internet/process.py +++ b/src/twisted/internet/process.py @@ -879,7 +879,7 @@ def _trySpawnInsteadOfFork( else: fdState.append((eachFD, isCloseOnExec)) if environment is None: - environment = {} + environment = os.environ setSigDef = [ everySignal diff --git a/src/twisted/internet/test/test_process.py b/src/twisted/internet/test/test_process.py index 0b8cdee7500..d1d930cca39 100644 --- a/src/twisted/internet/test/test_process.py +++ b/src/twisted/internet/test/test_process.py @@ -29,6 +29,7 @@ from twisted.python.filepath import FilePath, _asFilesystemBytes from twisted.python.log import err, msg from twisted.python.runtime import platform +from twisted.test.test_process import Accumulator from twisted.trial.unittest import SynchronousTestCase, TestCase # Get the current Python executable as a bytestring. @@ -1001,6 +1002,132 @@ def launchProcessAndWait(reactor): hamcrest.equal_to(["process already removed as desired"]), ) + def checkSpawnProcessEnvironment(self, spawnKwargs, expectedEnv, usePosixSpawnp): + """ + Shared code for testing the environment variables + present in the spawned process. + + The spawned process serializes its environ to stderr or stdout (depending on usePTY) + which is checked against os.environ of the calling process. + """ + p = Accumulator() + d = p.endedDeferred = Deferred() + + reactor = self.buildReactor() + reactor._neverUseSpawn = not usePosixSpawnp + + reactor.callWhenRunning( + reactor.spawnProcess, + p, + pyExe, + [ + pyExe, + b"-c", + networkString( + "import os, sys; " + "env = dict(os.environ); " + # LC_CTYPE is set by python, see https://peps.python.org/pep-0538/ + 'env.pop("LC_CTYPE", None); ' + 'env.pop("__CF_USER_TEXT_ENCODING", None); ' + "sys.stderr.write(str(sorted(env.items())))" + ), + ], + usePTY=self.usePTY, + **spawnKwargs, + ) + + def shutdown(ign): + reactor.stop() + + d.addBoth(shutdown) + + self.runReactor(reactor) + + expectedEnv.pop("LC_CTYPE", None) + expectedEnv.pop("__CF_USER_TEXT_ENCODING", None) + self.assertEqual( + bytes(str(sorted(expectedEnv.items())), "utf-8"), + p.outF.getvalue() if self.usePTY else p.errF.getvalue(), + ) + + def checkSpawnProcessEnvironmentWithPosixSpawnp(self, spawnKwargs, expectedEnv): + return self.checkSpawnProcessEnvironment( + spawnKwargs, expectedEnv, usePosixSpawnp=True + ) + + def checkSpawnProcessEnvironmentWithFork(self, spawnKwargs, expectedEnv): + return self.checkSpawnProcessEnvironment( + spawnKwargs, expectedEnv, usePosixSpawnp=False + ) + + @onlyOnPOSIX + def test_environmentPosixSpawnpEnvNotSet(self): + """ + An empty environment is passed to the spawned process, when the default value of the C{env} + is used. That is, when the C{env} argument is not explicitly set. + + In this case posix_spawnp is used as the backend for spawning processes. + """ + return self.checkSpawnProcessEnvironmentWithPosixSpawnp({}, {}) + + @onlyOnPOSIX + def test_environmentForkEnvNotSet(self): + """ + An empty environment is passed to the spawned process, when the default value of the C{env} + is used. That is, when the C{env} argument is not explicitly set. + + In this case fork+execvpe is used as the backend for spawning processes. + """ + return self.checkSpawnProcessEnvironmentWithFork({}, {}) + + @onlyOnPOSIX + def test_environmentPosixSpawnpEnvNone(self): + """ + The parent process environment is passed to the spawned process, when C{env} is set to + C{None}. + + In this case posix_spawnp is used as the backend for spawning processes. + """ + return self.checkSpawnProcessEnvironmentWithPosixSpawnp( + {"env": None}, os.environ + ) + + @onlyOnPOSIX + def test_environmentForkEnvNone(self): + """ + The parent process environment is passed to the spawned process, when C{env} is set to + C{None}. + + In this case fork+execvpe is used as the backend for spawning processes. + """ + return self.checkSpawnProcessEnvironmentWithFork({"env": None}, os.environ) + + @onlyOnPOSIX + def test_environmentPosixSpawnpEnvCustom(self): + """ + The user-specified environment without extra variables from parent process is passed to the + spawned process, when C{env} is set to a dictionary. + + In this case posix_spawnp is used as the backend for spawning processes. + """ + return self.checkSpawnProcessEnvironmentWithPosixSpawnp( + {"env": {"MYENV1": "myvalue1"}}, + {"MYENV1": "myvalue1"}, + ) + + @onlyOnPOSIX + def test_environmentForkEnvCustom(self): + """ + The user-specified environment without extra variables from parent process is passed to the + spawned process, when C{env} is set to a dictionary. + + In this case fork+execvpe is used as the backend for spawning processes. + """ + return self.checkSpawnProcessEnvironmentWithFork( + {"env": {"MYENV1": "myvalue1"}}, + {"MYENV1": "myvalue1"}, + ) + globals().update(ProcessTestsBuilder.makeTestCaseClasses()) diff --git a/src/twisted/newsfragments/12068.bugfix b/src/twisted/newsfragments/12068.bugfix new file mode 100644 index 00000000000..584d3ed9443 --- /dev/null +++ b/src/twisted/newsfragments/12068.bugfix @@ -0,0 +1 @@ +twisted.internet.process.Process, used by ``reactor.spawnProcess``, now copies the parent environment when the `env=None` argument is passed on Posix systems and ``os.posix_spawnp`` is used internally.
vllm-project__vllm-1666
Batch generation with long prompt generates incorrect number of outputs When a prompt in a batch generation is too long for the model, `llm.generate` returns an unexpected number of outputs: ```python In [11]: prompts = ["This is a short prompt", "This is a very long prompt " * 1000] ...: print(len(prompts)) 2 In [12]: outputs = llm.generate(prompts, sampling_params=sampling_params, use_tqdm=False) WARNING 11-14 04:11:47 scheduler.py:146] Input prompt (6002 tokens) is too long and exceeds limit of 4096 In [13]: print(len(outputs)) 3 ``` It appears the too-long prompt gets doubled up in the output: ```python In [14]: prompts = ["This is a short prompt", "This is a very long prompt " * 1000, "Here's another short ...: prompt"] ...: print(len(prompts)) 3 In [15]: outputs = llm.generate(prompts, sampling_params=sampling_params, use_tqdm=False) WARNING 11-14 04:15:02 scheduler.py:146] Input prompt (6002 tokens) is too long and exceeds limit of 4096 In [16]: outputs[0].prompt[:100] Out[16]: 'This is a short prompt' In [17]: outputs[1].prompt[:100] Out[17]: 'This is a very long prompt This is a very long prompt This is a very long prompt This is a very long' In [18]: outputs[2].prompt[:100] Out[18]: 'This is a very long prompt This is a very long prompt This is a very long prompt This is a very long' In [19]: outputs[3].prompt[:100] Out[19]: "Here's another short prompt" ``` We are using `zip` to recombine the `outputs` with input data after the generation, and this causes big problems since the zip is off-by-one after any prompt was encountered over the size limit. Here's a minimum reproducible script: ```python from vllm import LLM, SamplingParams sampling_params = SamplingParams(temperature=0.01, top_p=0.1, max_tokens=256) llm = LLM(model=f"meta-llama/Llama-2-7b-hf", max_num_batched_tokens=4096, tensor_parallel_size=1) prompts = ["This is a short prompt", "This is a very long prompt " * 1000] print(len(prompts)) outputs = llm.generate(prompts, sampling_params=sampling_params) print(len(outputs)) ``` Environment info: ``` (eb) kwood@kwood-lab:~$ cat /etc/issue Ubuntu 22.04.3 LTS \n \l (eb) kwood@kwood-lab:~$ pip freeze | grep vllm vllm==0.2.1.post1 (eb) kwood@kwood-lab:~$ nvidia-smi Tue Nov 14 04:22:19 2023 +---------------------------------------------------------------------------------------+ | NVIDIA-SMI 535.129.03 Driver Version: 535.129.03 CUDA Version: 12.2 | |-----------------------------------------+----------------------+----------------------+ | GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |=========================================+======================+======================| | 0 NVIDIA GeForce RTX 4090 On | 00000000:2D:00.0 Off | Off | | 0% 40C P8 36W / 450W | 3MiB / 24564MiB | 0% Default | | | | N/A | +-----------------------------------------+----------------------+----------------------+ +---------------------------------------------------------------------------------------+ | Processes: | | GPU GI CI PID Type Process name GPU Memory | | ID ID Usage | |=======================================================================================| | No running processes found | +---------------------------------------------------------------------------------------+ ```
[ { "content": "import copy\nimport time\nfrom functools import partial\nfrom typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Union\n\nfrom vllm.config import (CacheConfig, ModelConfig, ParallelConfig,\n SchedulerConfig)\nfrom vllm.core.scheduler import Scheduler, SchedulerOutputs\nfrom vllm.engine.arg_utils import EngineArgs\nfrom vllm.engine.ray_utils import RayWorker, initialize_cluster, ray\nfrom vllm.logger import init_logger\nfrom vllm.outputs import RequestOutput\nfrom vllm.sampling_params import SamplingParams\nfrom vllm.sequence import (SamplerOutput, Sequence, SequenceGroup,\n SequenceGroupMetadata, SequenceGroupOutputs,\n SequenceOutputs, SequenceStatus)\nfrom vllm.transformers_utils.tokenizer import (detokenize_incrementally,\n get_tokenizer)\nfrom vllm.utils import Counter\n\nif ray:\n from ray.air.util.torch_dist import init_torch_dist_process_group\n from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy\n\nif TYPE_CHECKING:\n from ray.util.placement_group import PlacementGroup\n\nlogger = init_logger(__name__)\n\n_LOGGING_INTERVAL_SEC = 5\n\n\nclass LLMEngine:\n \"\"\"An LLM engine that receives requests and generates texts.\n\n This is the main class for the vLLM engine. It receives requests\n from clients and generates texts from the LLM. It includes a tokenizer, a\n language model (possibly distributed across multiple GPUs), and GPU memory\n space allocated for intermediate states (aka KV cache). This class utilizes\n iteration-level scheduling and efficient memory management to maximize the\n serving throughput.\n\n The `LLM` class wraps this class for offline batched inference and the\n `AsyncLLMEngine` class wraps this class for online serving.\n\n NOTE: The config arguments are derived from the `EngineArgs` class. For the\n comprehensive list of arguments, see `EngineArgs`.\n\n Args:\n model_config: The configuration related to the LLM model.\n cache_config: The configuration related to the KV cache memory\n management.\n parallel_config: The configuration related to distributed execution.\n scheduler_config: The configuration related to the request scheduler.\n distributed_init_method: The initialization method for distributed\n execution. See `torch.distributed.init_process_group` for details.\n placement_group: Ray placement group for distributed execution.\n Required for distributed execution.\n log_stats: Whether to log statistics.\n \"\"\"\n\n def __init__(\n self,\n model_config: ModelConfig,\n cache_config: CacheConfig,\n parallel_config: ParallelConfig,\n scheduler_config: SchedulerConfig,\n distributed_init_method: str,\n placement_group: Optional[\"PlacementGroup\"],\n log_stats: bool,\n ) -> None:\n logger.info(\n \"Initializing an LLM engine with config: \"\n f\"model={model_config.model!r}, \"\n f\"tokenizer={model_config.tokenizer!r}, \"\n f\"tokenizer_mode={model_config.tokenizer_mode}, \"\n f\"revision={model_config.revision}, \"\n f\"tokenizer_revision={model_config.tokenizer_revision}, \"\n f\"trust_remote_code={model_config.trust_remote_code}, \"\n f\"dtype={model_config.dtype}, \"\n f\"max_seq_len={model_config.max_model_len}, \"\n f\"download_dir={model_config.download_dir!r}, \"\n f\"load_format={model_config.load_format}, \"\n f\"tensor_parallel_size={parallel_config.tensor_parallel_size}, \"\n f\"quantization={model_config.quantization}, \"\n f\"seed={model_config.seed})\")\n # TODO(woosuk): Print more configs in debug mode.\n\n self.model_config = model_config\n self.cache_config = cache_config\n assert self.cache_config.sliding_window == getattr(\n self.model_config.hf_config, \"sliding_window\", None)\n self.parallel_config = parallel_config\n self.scheduler_config = scheduler_config\n self.log_stats = log_stats\n self._verify_args()\n\n self.tokenizer = get_tokenizer(\n model_config.tokenizer,\n tokenizer_mode=model_config.tokenizer_mode,\n trust_remote_code=model_config.trust_remote_code,\n tokenizer_revision=model_config.tokenizer_revision,\n revision=model_config.revision)\n self.seq_counter = Counter()\n\n # Create the parallel GPU workers.\n if self.parallel_config.worker_use_ray:\n self._init_workers_ray(placement_group)\n else:\n self._init_workers(distributed_init_method)\n\n # Profile the memory usage and initialize the cache.\n self._init_cache()\n\n # Create the scheduler.\n self.scheduler = Scheduler(scheduler_config, cache_config)\n\n # Logging.\n self.last_logging_time = 0.0\n # List of (timestamp, num_tokens)\n self.num_prompt_tokens: List[Tuple[float, int]] = []\n # List of (timestamp, num_tokens)\n self.num_generation_tokens: List[Tuple[float, int]] = []\n\n def _init_workers(self, distributed_init_method: str):\n # Lazy import the Worker to avoid importing torch.cuda/xformers\n # before CUDA_VISIBLE_DEVICES is set in the Worker\n from vllm.worker.worker import Worker # pylint: disable=import-outside-toplevel\n\n assert self.parallel_config.world_size == 1, (\n \"Ray is required if parallel_config.world_size > 1.\")\n\n self.workers: List[Worker] = []\n worker = Worker(\n self.model_config,\n self.parallel_config,\n self.scheduler_config,\n 0,\n distributed_init_method,\n )\n self.workers.append(worker)\n self._run_workers(\n \"init_model\",\n get_all_outputs=True,\n )\n\n def _init_workers_ray(self, placement_group: \"PlacementGroup\",\n **ray_remote_kwargs):\n # Lazy import the Worker to avoid importing torch.cuda/xformers\n # before CUDA_VISIBLE_DEVICES is set in the Worker\n from vllm.worker.worker import Worker # pylint: disable=import-outside-toplevel\n\n self.workers: List[Worker] = []\n for bundle in placement_group.bundle_specs:\n if not bundle.get(\"GPU\", 0):\n continue\n worker = ray.remote(\n num_cpus=0,\n num_gpus=1,\n scheduling_strategy=PlacementGroupSchedulingStrategy(\n placement_group=placement_group,\n placement_group_capture_child_tasks=True),\n **ray_remote_kwargs,\n )(RayWorker).remote(self.model_config.trust_remote_code)\n self.workers.append(worker)\n\n # Initialize torch distributed process group for the workers.\n init_torch_dist_process_group(self.workers, backend=\"nccl\")\n model_config = copy.deepcopy(self.model_config)\n parallel_config = copy.deepcopy(self.parallel_config)\n scheduler_config = copy.deepcopy(self.scheduler_config)\n self._run_workers(\"init_worker\",\n get_all_outputs=True,\n worker_init_fn=lambda: Worker(\n model_config,\n parallel_config,\n scheduler_config,\n None,\n None,\n ))\n self._run_workers(\n \"init_model\",\n get_all_outputs=True,\n )\n\n def _verify_args(self) -> None:\n self.model_config.verify_with_parallel_config(self.parallel_config)\n self.cache_config.verify_with_parallel_config(self.parallel_config)\n\n def _init_cache(self) -> None:\n \"\"\"Profiles the memory usage and initializes the KV cache.\"\"\"\n # Get the maximum number of blocks that can be allocated on GPU and CPU.\n num_blocks = self._run_workers(\n \"profile_num_available_blocks\",\n get_all_outputs=True,\n block_size=self.cache_config.block_size,\n gpu_memory_utilization=self.cache_config.gpu_memory_utilization,\n cpu_swap_space=self.cache_config.swap_space_bytes,\n )\n\n # Since we use a shared centralized controller, we take the minimum\n # number of blocks across all workers to make sure all the memory\n # operators can be applied to all workers.\n num_gpu_blocks = min(b[0] for b in num_blocks)\n num_cpu_blocks = min(b[1] for b in num_blocks)\n # FIXME(woosuk): Change to debug log.\n logger.info(f\"# GPU blocks: {num_gpu_blocks}, \"\n f\"# CPU blocks: {num_cpu_blocks}\")\n\n if num_gpu_blocks <= 0:\n raise ValueError(\"No available memory for the cache blocks. \"\n \"Try increasing `gpu_memory_utilization` when \"\n \"initializing the engine.\")\n\n self.cache_config.num_gpu_blocks = num_gpu_blocks\n self.cache_config.num_cpu_blocks = num_cpu_blocks\n\n # Initialize the cache.\n self._run_workers(\"init_cache_engine\", cache_config=self.cache_config)\n\n @classmethod\n def from_engine_args(cls, engine_args: EngineArgs) -> \"LLMEngine\":\n \"\"\"Creates an LLM engine from the engine arguments.\"\"\"\n # Create the engine configs.\n engine_configs = engine_args.create_engine_configs()\n parallel_config = engine_configs[2]\n # Initialize the cluster.\n distributed_init_method, placement_group = initialize_cluster(\n parallel_config)\n # Create the LLM engine.\n engine = cls(*engine_configs,\n distributed_init_method,\n placement_group,\n log_stats=not engine_args.disable_log_stats)\n return engine\n\n def add_request(\n self,\n request_id: str,\n prompt: Optional[str],\n sampling_params: SamplingParams,\n prompt_token_ids: Optional[List[int]] = None,\n arrival_time: Optional[float] = None,\n ) -> None:\n \"\"\"Add a request to the engine's request pool.\n\n The request is added to the request pool and will be processed by the\n scheduler as `engine.step()` is called. The exact scheduling policy is\n determined by the scheduler.\n\n Args:\n request_id: The unique ID of the request.\n prompt: The prompt string. Can be None if prompt_token_ids is\n provided.\n sampling_params: The sampling parameters for text generation.\n prompt_token_ids: The token IDs of the prompt. If None, we\n use the tokenizer to convert the prompts to token IDs.\n arrival_time: The arrival time of the request. If None, we use\n the current monotonic time.\n \"\"\"\n if arrival_time is None:\n arrival_time = time.monotonic()\n if prompt_token_ids is None:\n assert prompt is not None\n prompt_token_ids = self.tokenizer.encode(prompt)\n\n # Create the sequences.\n block_size = self.cache_config.block_size\n seq_id = next(self.seq_counter)\n seq = Sequence(seq_id, prompt, prompt_token_ids, block_size)\n\n # Create the sequence group.\n seq_group = SequenceGroup(request_id, [seq], sampling_params,\n arrival_time)\n\n # Add the sequence group to the scheduler.\n self.scheduler.add_seq_group(seq_group)\n\n def abort_request(self, request_id: Union[str, Iterable[str]]) -> None:\n \"\"\"Aborts a request(s) with the given ID.\n\n Args:\n request_id: The ID(s) of the request to abort.\n \"\"\"\n self.scheduler.abort_seq_group(request_id)\n\n def get_model_config(self) -> ModelConfig:\n \"\"\"Gets the model configuration.\"\"\"\n return self.model_config\n\n def get_num_unfinished_requests(self) -> int:\n \"\"\"Gets the number of unfinished requests.\"\"\"\n return self.scheduler.get_num_unfinished_seq_groups()\n\n def has_unfinished_requests(self) -> bool:\n \"\"\"Returns True if there are unfinished requests.\"\"\"\n return self.scheduler.has_unfinished_seqs()\n\n def _schedule(\n self\n ) -> Tuple[List[SequenceGroupMetadata], SchedulerOutputs,\n List[RequestOutput]]:\n seq_group_metadata_list, scheduler_outputs = self.scheduler.schedule()\n return seq_group_metadata_list, scheduler_outputs, [\n RequestOutput.from_seq_group(seq_group)\n for seq_group in scheduler_outputs.ignored_seq_groups\n ]\n\n def _check_beam_search_early_stopping(\n self,\n early_stopping: Union[bool, str],\n sampling_params: SamplingParams,\n best_running_seq: Sequence,\n current_worst_seq: Sequence,\n ) -> bool:\n assert sampling_params.use_beam_search\n length_penalty = sampling_params.length_penalty\n if early_stopping is True:\n return True\n\n current_worst_score = (current_worst_seq.get_beam_search_score(\n length_penalty=length_penalty,\n eos_token_id=self.tokenizer.eos_token_id))\n if early_stopping is False:\n highest_attainable_score = (best_running_seq.get_beam_search_score(\n length_penalty=length_penalty,\n eos_token_id=self.tokenizer.eos_token_id))\n else:\n assert early_stopping == \"never\"\n if length_penalty > 0.0:\n # If length_penalty > 0.0, beam search will prefer longer\n # sequences. The highest attainable score calculation is\n # based on the longest possible sequence length in this case.\n max_possible_length = max(\n best_running_seq.get_prompt_len() +\n sampling_params.max_tokens,\n self.scheduler_config.max_model_len)\n highest_attainable_score = (\n best_running_seq.get_beam_search_score(\n length_penalty=length_penalty,\n eos_token_id=self.tokenizer.eos_token_id,\n seq_len=max_possible_length))\n else:\n # Otherwise, beam search will prefer shorter sequences. The\n # highest attainable score calculation is based on the current\n # sequence length.\n highest_attainable_score = (\n best_running_seq.get_beam_search_score(\n length_penalty=length_penalty,\n eos_token_id=self.tokenizer.eos_token_id))\n return current_worst_score >= highest_attainable_score\n\n def _process_sequence_group_outputs(self, seq_group: SequenceGroup,\n outputs: SequenceGroupOutputs) -> None:\n # Process prompt logprobs\n prompt_logprobs = outputs.prompt_logprobs\n if prompt_logprobs is not None:\n seq_group.prompt_logprobs = prompt_logprobs\n\n # Process samples\n samples = outputs.samples\n parent_seqs = seq_group.get_seqs(status=SequenceStatus.RUNNING)\n existing_finished_seqs = seq_group.get_finished_seqs()\n parent_child_dict = {\n parent_seq.seq_id: []\n for parent_seq in parent_seqs\n }\n for sample in samples:\n parent_child_dict[sample.parent_seq_id].append(sample)\n # List of (child, parent)\n child_seqs: List[Tuple[Sequence, Sequence]] = []\n\n # Process the child samples for each parent sequence\n for parent in parent_seqs:\n child_samples: List[SequenceOutputs] = parent_child_dict[\n parent.seq_id]\n if len(child_samples) == 0:\n # This parent sequence has no children samples. Remove\n # the parent sequence from the sequence group since it will\n # not be used in the future iterations.\n parent.status = SequenceStatus.FINISHED_ABORTED\n seq_group.remove(parent.seq_id)\n self.scheduler.free_seq(parent)\n continue\n # Fork the parent sequence if there are multiple child samples.\n for child_sample in child_samples[:-1]:\n new_child_seq_id = next(self.seq_counter)\n child = parent.fork(new_child_seq_id)\n child.append_token_id(child_sample.output_token,\n child_sample.logprobs)\n child_seqs.append((child, parent))\n # Continue the parent sequence for the last child sample.\n # We reuse the parent sequence here to reduce redundant memory\n # copies, especially when using non-beam search sampling methods.\n last_child_sample = child_samples[-1]\n parent.append_token_id(last_child_sample.output_token,\n last_child_sample.logprobs)\n child_seqs.append((parent, parent))\n\n for seq, _ in child_seqs:\n self._decode_sequence(seq, seq_group.sampling_params)\n self._check_stop(seq, seq_group.sampling_params)\n\n # Non-beam search case\n if not seq_group.sampling_params.use_beam_search:\n # For newly created child sequences, add them to the sequence group\n # and fork them in block manager if they are not finished.\n for seq, parent in child_seqs:\n if seq is not parent:\n seq_group.add(seq)\n if not seq.is_finished():\n self.scheduler.fork_seq(parent, seq)\n\n # Free the finished and selected parent sequences' memory in block\n # manager. Keep them in the sequence group as candidate output.\n # NOTE: we need to fork the new sequences before freeing the\n # old sequences.\n for seq, parent in child_seqs:\n if seq is parent and seq.is_finished():\n self.scheduler.free_seq(seq)\n return\n\n # Beam search case\n # Select the child sequences to keep in the sequence group.\n selected_child_seqs = []\n unselected_child_seqs = []\n beam_width = seq_group.sampling_params.best_of\n length_penalty = seq_group.sampling_params.length_penalty\n\n # Select the newly finished sequences with the highest scores\n # to replace existing finished sequences.\n # Tuple of (seq, parent, is_new)\n existing_finished_seqs = [(seq, None, False)\n for seq in existing_finished_seqs]\n new_finished_seqs = [(seq, parent, True) for seq, parent in child_seqs\n if seq.is_finished()]\n all_finished_seqs = existing_finished_seqs + new_finished_seqs\n # Sort the finished sequences by their scores.\n all_finished_seqs.sort(key=lambda x: x[0].get_beam_search_score(\n length_penalty=length_penalty,\n eos_token_id=self.tokenizer.eos_token_id),\n reverse=True)\n for seq, parent, is_new in all_finished_seqs[:beam_width]:\n if is_new:\n # A newly generated child sequence finishes and has a high\n # score, so we will add it into the sequence group.\n selected_child_seqs.append((seq, parent))\n for seq, parent, is_new in all_finished_seqs[beam_width:]:\n if is_new:\n # A newly generated child sequence finishes but has a low\n # score, so we will not add it into the sequence group.\n # Additionally, if this sequence is a continuation of a\n # parent sequence, we will need remove the parent sequence\n # from the sequence group.\n unselected_child_seqs.append((seq, parent))\n else:\n # An existing finished sequence has a low score, so we will\n # remove it from the sequence group.\n seq_group.remove(seq.seq_id)\n\n # select the top beam_width sequences from the running\n # sequences for the next iteration to continue the beam\n # search.\n running_child_seqs = [(seq, parent) for seq, parent in child_seqs\n if not seq.is_finished()]\n # Sort the running sequences by their scores.\n running_child_seqs.sort(key=lambda x: x[0].get_beam_search_score(\n length_penalty=length_penalty,\n eos_token_id=self.tokenizer.eos_token_id),\n reverse=True)\n\n # Check if we can stop the beam search.\n if len(running_child_seqs) == 0:\n # No running sequences, stop the beam search.\n stop_beam_search = True\n elif len(all_finished_seqs) < beam_width:\n # Not enough finished sequences, continue the beam search.\n stop_beam_search = False\n else:\n # Check the early stopping criteria\n best_running_seq = running_child_seqs[0][0]\n current_worst_seq = all_finished_seqs[beam_width - 1][0]\n stop_beam_search = self._check_beam_search_early_stopping(\n seq_group.sampling_params.early_stopping,\n seq_group.sampling_params, best_running_seq, current_worst_seq)\n\n if stop_beam_search:\n # Stop the beam search and remove all the running sequences from\n # the sequence group.\n unselected_child_seqs.extend(running_child_seqs)\n else:\n # Continue the beam search and select the top beam_width sequences\n # to continue the beam search.\n selected_child_seqs.extend(running_child_seqs[:beam_width])\n # The remaining running sequences will not be used in the next\n # iteration. Again, if these sequences are continuations of\n # parent sequences, we will need to remove the parent sequences\n # from the sequence group.\n unselected_child_seqs.extend(running_child_seqs[beam_width:])\n\n # For newly created child sequences, add them to the sequence group\n # and fork them in block manager if they are not finished.\n for seq, parent in selected_child_seqs:\n if seq is not parent:\n seq_group.add(seq)\n if not seq.is_finished():\n self.scheduler.fork_seq(parent, seq)\n\n # Free the finished and selected parent sequences' memory in block\n # manager. Keep them in the sequence group as candidate output.\n for seq, parent in selected_child_seqs:\n if seq is parent and seq.is_finished():\n self.scheduler.free_seq(seq)\n\n # Remove the unselected parent sequences from the sequence group and\n # free their memory in block manager.\n for seq, parent in unselected_child_seqs:\n if seq is parent:\n # Remove the parent sequence if it is not selected for next\n # iteration\n seq_group.remove(seq.seq_id)\n self.scheduler.free_seq(seq)\n\n def _process_model_outputs(\n self, output: SamplerOutput,\n scheduler_outputs: SchedulerOutputs) -> List[RequestOutput]:\n # Update the scheduled sequence groups with the model outputs.\n scheduled_seq_groups = scheduler_outputs.scheduled_seq_groups\n for seq_group, outputs in zip(scheduled_seq_groups, output):\n self._process_sequence_group_outputs(seq_group, outputs)\n\n # Free the finished sequence groups.\n self.scheduler.free_finished_seq_groups()\n\n # Create the outputs.\n request_outputs: List[RequestOutput] = []\n for seq_group in (scheduled_seq_groups +\n scheduler_outputs.ignored_seq_groups):\n request_output = RequestOutput.from_seq_group(seq_group)\n request_outputs.append(request_output)\n\n if self.log_stats:\n # Log the system stats.\n self._log_system_stats(scheduler_outputs.prompt_run,\n scheduler_outputs.num_batched_tokens)\n return request_outputs\n\n def step(self) -> List[RequestOutput]:\n \"\"\"Performs one decoding iteration and returns newly generated results.\n\n This function performs one decoding iteration of the engine. It first\n schedules the sequences to be executed in the next iteration and the\n token blocks to be swapped in/out/copy. Then, it executes the model\n and updates the scheduler with the model outputs. Finally, it decodes\n the sequences and returns the newly generated results.\n \"\"\"\n seq_group_metadata_list, scheduler_outputs, ignored = self._schedule()\n if scheduler_outputs.is_empty():\n return ignored\n\n # Execute the model.\n output = self._run_workers(\n \"execute_model\",\n seq_group_metadata_list=seq_group_metadata_list,\n blocks_to_swap_in=scheduler_outputs.blocks_to_swap_in,\n blocks_to_swap_out=scheduler_outputs.blocks_to_swap_out,\n blocks_to_copy=scheduler_outputs.blocks_to_copy,\n )\n\n return self._process_model_outputs(output, scheduler_outputs) + ignored\n\n def _log_system_stats(\n self,\n prompt_run: bool,\n num_batched_tokens: int,\n ) -> None:\n now = time.monotonic()\n # Log the number of batched input tokens.\n if prompt_run:\n self.num_prompt_tokens.append((now, num_batched_tokens))\n else:\n self.num_generation_tokens.append((now, num_batched_tokens))\n\n elapsed_time = now - self.last_logging_time\n if elapsed_time < _LOGGING_INTERVAL_SEC:\n return\n\n # Discard the old stats.\n self.num_prompt_tokens = [(t, n) for t, n in self.num_prompt_tokens\n if now - t < _LOGGING_INTERVAL_SEC]\n self.num_generation_tokens = [(t, n)\n for t, n in self.num_generation_tokens\n if now - t < _LOGGING_INTERVAL_SEC]\n\n if len(self.num_prompt_tokens) > 1:\n total_num_tokens = sum(n for _, n in self.num_prompt_tokens[:-1])\n window = now - self.num_prompt_tokens[0][0]\n avg_prompt_throughput = total_num_tokens / window\n else:\n avg_prompt_throughput = 0.0\n if len(self.num_generation_tokens) > 1:\n total_num_tokens = sum(n\n for _, n in self.num_generation_tokens[:-1])\n window = now - self.num_generation_tokens[0][0]\n avg_generation_throughput = total_num_tokens / window\n else:\n avg_generation_throughput = 0.0\n\n total_num_gpu_blocks = self.cache_config.num_gpu_blocks\n num_free_gpu_blocks = (\n self.scheduler.block_manager.get_num_free_gpu_blocks())\n num_used_gpu_blocks = total_num_gpu_blocks - num_free_gpu_blocks\n gpu_cache_usage = num_used_gpu_blocks / total_num_gpu_blocks\n\n total_num_cpu_blocks = self.cache_config.num_cpu_blocks\n if total_num_cpu_blocks > 0:\n num_free_cpu_blocks = (\n self.scheduler.block_manager.get_num_free_cpu_blocks())\n num_used_cpu_blocks = total_num_cpu_blocks - num_free_cpu_blocks\n cpu_cache_usage = num_used_cpu_blocks / total_num_cpu_blocks\n else:\n cpu_cache_usage = 0.0\n\n logger.info(\"Avg prompt throughput: \"\n f\"{avg_prompt_throughput:.1f} tokens/s, \"\n \"Avg generation throughput: \"\n f\"{avg_generation_throughput:.1f} tokens/s, \"\n f\"Running: {len(self.scheduler.running)} reqs, \"\n f\"Swapped: {len(self.scheduler.swapped)} reqs, \"\n f\"Pending: {len(self.scheduler.waiting)} reqs, \"\n f\"GPU KV cache usage: {gpu_cache_usage * 100:.1f}%, \"\n f\"CPU KV cache usage: {cpu_cache_usage * 100:.1f}%\")\n self.last_logging_time = now\n\n def _decode_sequence(self, seq: Sequence, prms: SamplingParams) -> None:\n \"\"\"Decodes the new token for a sequence.\"\"\"\n (new_tokens, new_output_text, prefix_offset,\n read_offset) = detokenize_incrementally(\n self.tokenizer,\n all_input_ids=seq.get_token_ids(),\n prev_tokens=seq.tokens,\n prefix_offset=seq.prefix_offset,\n read_offset=seq.read_offset,\n skip_special_tokens=prms.skip_special_tokens,\n spaces_between_special_tokens=prms.spaces_between_special_tokens,\n )\n if seq.tokens is None:\n seq.tokens = new_tokens\n else:\n seq.tokens.extend(new_tokens)\n seq.prefix_offset = prefix_offset\n seq.read_offset = read_offset\n seq.output_text += new_output_text\n\n def _check_stop(self, seq: Sequence,\n sampling_params: SamplingParams) -> None:\n \"\"\"Stop the finished sequences.\"\"\"\n for stop_str in sampling_params.stop:\n if seq.output_text.endswith(stop_str):\n # Truncate the output text so that the stop string is\n # not included in the output.\n seq.output_text = seq.output_text[:-len(stop_str)]\n seq.status = SequenceStatus.FINISHED_STOPPED\n return\n if seq.get_last_token_id() in sampling_params.stop_token_ids:\n seq.status = SequenceStatus.FINISHED_STOPPED\n return\n\n # Check if the sequence has reached max_model_len.\n if seq.get_len() > self.scheduler_config.max_model_len:\n seq.status = SequenceStatus.FINISHED_LENGTH_CAPPED\n return\n\n # Check if the sequence has reached max_tokens.\n if seq.get_output_len() == sampling_params.max_tokens:\n seq.status = SequenceStatus.FINISHED_LENGTH_CAPPED\n return\n\n # Check if the sequence has generated the EOS token.\n if ((not sampling_params.ignore_eos)\n and seq.get_last_token_id() == self.tokenizer.eos_token_id):\n seq.status = SequenceStatus.FINISHED_STOPPED\n return\n\n def _run_workers(\n self,\n method: str,\n *args,\n get_all_outputs: bool = False,\n **kwargs,\n ) -> Any:\n \"\"\"Runs the given method on all workers.\"\"\"\n all_outputs = []\n for worker in self.workers:\n if self.parallel_config.worker_use_ray:\n executor = partial(worker.execute_method.remote, method)\n else:\n executor = getattr(worker, method)\n\n output = executor(*args, **kwargs)\n all_outputs.append(output)\n\n if self.parallel_config.worker_use_ray:\n all_outputs = ray.get(all_outputs)\n\n if get_all_outputs:\n return all_outputs\n\n # Make sure all workers have the same results.\n output = all_outputs[0]\n for other_output in all_outputs[1:]:\n assert output == other_output\n return output\n", "path": "vllm/engine/llm_engine.py" } ]
[ { "content": "import copy\nimport time\nfrom functools import partial\nfrom typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Union\n\nfrom vllm.config import (CacheConfig, ModelConfig, ParallelConfig,\n SchedulerConfig)\nfrom vllm.core.scheduler import Scheduler, SchedulerOutputs\nfrom vllm.engine.arg_utils import EngineArgs\nfrom vllm.engine.ray_utils import RayWorker, initialize_cluster, ray\nfrom vllm.logger import init_logger\nfrom vllm.outputs import RequestOutput\nfrom vllm.sampling_params import SamplingParams\nfrom vllm.sequence import (SamplerOutput, Sequence, SequenceGroup,\n SequenceGroupMetadata, SequenceGroupOutputs,\n SequenceOutputs, SequenceStatus)\nfrom vllm.transformers_utils.tokenizer import (detokenize_incrementally,\n get_tokenizer)\nfrom vllm.utils import Counter\n\nif ray:\n from ray.air.util.torch_dist import init_torch_dist_process_group\n from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy\n\nif TYPE_CHECKING:\n from ray.util.placement_group import PlacementGroup\n\nlogger = init_logger(__name__)\n\n_LOGGING_INTERVAL_SEC = 5\n\n\nclass LLMEngine:\n \"\"\"An LLM engine that receives requests and generates texts.\n\n This is the main class for the vLLM engine. It receives requests\n from clients and generates texts from the LLM. It includes a tokenizer, a\n language model (possibly distributed across multiple GPUs), and GPU memory\n space allocated for intermediate states (aka KV cache). This class utilizes\n iteration-level scheduling and efficient memory management to maximize the\n serving throughput.\n\n The `LLM` class wraps this class for offline batched inference and the\n `AsyncLLMEngine` class wraps this class for online serving.\n\n NOTE: The config arguments are derived from the `EngineArgs` class. For the\n comprehensive list of arguments, see `EngineArgs`.\n\n Args:\n model_config: The configuration related to the LLM model.\n cache_config: The configuration related to the KV cache memory\n management.\n parallel_config: The configuration related to distributed execution.\n scheduler_config: The configuration related to the request scheduler.\n distributed_init_method: The initialization method for distributed\n execution. See `torch.distributed.init_process_group` for details.\n placement_group: Ray placement group for distributed execution.\n Required for distributed execution.\n log_stats: Whether to log statistics.\n \"\"\"\n\n def __init__(\n self,\n model_config: ModelConfig,\n cache_config: CacheConfig,\n parallel_config: ParallelConfig,\n scheduler_config: SchedulerConfig,\n distributed_init_method: str,\n placement_group: Optional[\"PlacementGroup\"],\n log_stats: bool,\n ) -> None:\n logger.info(\n \"Initializing an LLM engine with config: \"\n f\"model={model_config.model!r}, \"\n f\"tokenizer={model_config.tokenizer!r}, \"\n f\"tokenizer_mode={model_config.tokenizer_mode}, \"\n f\"revision={model_config.revision}, \"\n f\"tokenizer_revision={model_config.tokenizer_revision}, \"\n f\"trust_remote_code={model_config.trust_remote_code}, \"\n f\"dtype={model_config.dtype}, \"\n f\"max_seq_len={model_config.max_model_len}, \"\n f\"download_dir={model_config.download_dir!r}, \"\n f\"load_format={model_config.load_format}, \"\n f\"tensor_parallel_size={parallel_config.tensor_parallel_size}, \"\n f\"quantization={model_config.quantization}, \"\n f\"seed={model_config.seed})\")\n # TODO(woosuk): Print more configs in debug mode.\n\n self.model_config = model_config\n self.cache_config = cache_config\n assert self.cache_config.sliding_window == getattr(\n self.model_config.hf_config, \"sliding_window\", None)\n self.parallel_config = parallel_config\n self.scheduler_config = scheduler_config\n self.log_stats = log_stats\n self._verify_args()\n\n self.tokenizer = get_tokenizer(\n model_config.tokenizer,\n tokenizer_mode=model_config.tokenizer_mode,\n trust_remote_code=model_config.trust_remote_code,\n tokenizer_revision=model_config.tokenizer_revision,\n revision=model_config.revision)\n self.seq_counter = Counter()\n\n # Create the parallel GPU workers.\n if self.parallel_config.worker_use_ray:\n self._init_workers_ray(placement_group)\n else:\n self._init_workers(distributed_init_method)\n\n # Profile the memory usage and initialize the cache.\n self._init_cache()\n\n # Create the scheduler.\n self.scheduler = Scheduler(scheduler_config, cache_config)\n\n # Logging.\n self.last_logging_time = 0.0\n # List of (timestamp, num_tokens)\n self.num_prompt_tokens: List[Tuple[float, int]] = []\n # List of (timestamp, num_tokens)\n self.num_generation_tokens: List[Tuple[float, int]] = []\n\n def _init_workers(self, distributed_init_method: str):\n # Lazy import the Worker to avoid importing torch.cuda/xformers\n # before CUDA_VISIBLE_DEVICES is set in the Worker\n from vllm.worker.worker import Worker # pylint: disable=import-outside-toplevel\n\n assert self.parallel_config.world_size == 1, (\n \"Ray is required if parallel_config.world_size > 1.\")\n\n self.workers: List[Worker] = []\n worker = Worker(\n self.model_config,\n self.parallel_config,\n self.scheduler_config,\n 0,\n distributed_init_method,\n )\n self.workers.append(worker)\n self._run_workers(\n \"init_model\",\n get_all_outputs=True,\n )\n\n def _init_workers_ray(self, placement_group: \"PlacementGroup\",\n **ray_remote_kwargs):\n # Lazy import the Worker to avoid importing torch.cuda/xformers\n # before CUDA_VISIBLE_DEVICES is set in the Worker\n from vllm.worker.worker import Worker # pylint: disable=import-outside-toplevel\n\n self.workers: List[Worker] = []\n for bundle in placement_group.bundle_specs:\n if not bundle.get(\"GPU\", 0):\n continue\n worker = ray.remote(\n num_cpus=0,\n num_gpus=1,\n scheduling_strategy=PlacementGroupSchedulingStrategy(\n placement_group=placement_group,\n placement_group_capture_child_tasks=True),\n **ray_remote_kwargs,\n )(RayWorker).remote(self.model_config.trust_remote_code)\n self.workers.append(worker)\n\n # Initialize torch distributed process group for the workers.\n init_torch_dist_process_group(self.workers, backend=\"nccl\")\n model_config = copy.deepcopy(self.model_config)\n parallel_config = copy.deepcopy(self.parallel_config)\n scheduler_config = copy.deepcopy(self.scheduler_config)\n self._run_workers(\"init_worker\",\n get_all_outputs=True,\n worker_init_fn=lambda: Worker(\n model_config,\n parallel_config,\n scheduler_config,\n None,\n None,\n ))\n self._run_workers(\n \"init_model\",\n get_all_outputs=True,\n )\n\n def _verify_args(self) -> None:\n self.model_config.verify_with_parallel_config(self.parallel_config)\n self.cache_config.verify_with_parallel_config(self.parallel_config)\n\n def _init_cache(self) -> None:\n \"\"\"Profiles the memory usage and initializes the KV cache.\"\"\"\n # Get the maximum number of blocks that can be allocated on GPU and CPU.\n num_blocks = self._run_workers(\n \"profile_num_available_blocks\",\n get_all_outputs=True,\n block_size=self.cache_config.block_size,\n gpu_memory_utilization=self.cache_config.gpu_memory_utilization,\n cpu_swap_space=self.cache_config.swap_space_bytes,\n )\n\n # Since we use a shared centralized controller, we take the minimum\n # number of blocks across all workers to make sure all the memory\n # operators can be applied to all workers.\n num_gpu_blocks = min(b[0] for b in num_blocks)\n num_cpu_blocks = min(b[1] for b in num_blocks)\n # FIXME(woosuk): Change to debug log.\n logger.info(f\"# GPU blocks: {num_gpu_blocks}, \"\n f\"# CPU blocks: {num_cpu_blocks}\")\n\n if num_gpu_blocks <= 0:\n raise ValueError(\"No available memory for the cache blocks. \"\n \"Try increasing `gpu_memory_utilization` when \"\n \"initializing the engine.\")\n\n self.cache_config.num_gpu_blocks = num_gpu_blocks\n self.cache_config.num_cpu_blocks = num_cpu_blocks\n\n # Initialize the cache.\n self._run_workers(\"init_cache_engine\", cache_config=self.cache_config)\n\n @classmethod\n def from_engine_args(cls, engine_args: EngineArgs) -> \"LLMEngine\":\n \"\"\"Creates an LLM engine from the engine arguments.\"\"\"\n # Create the engine configs.\n engine_configs = engine_args.create_engine_configs()\n parallel_config = engine_configs[2]\n # Initialize the cluster.\n distributed_init_method, placement_group = initialize_cluster(\n parallel_config)\n # Create the LLM engine.\n engine = cls(*engine_configs,\n distributed_init_method,\n placement_group,\n log_stats=not engine_args.disable_log_stats)\n return engine\n\n def add_request(\n self,\n request_id: str,\n prompt: Optional[str],\n sampling_params: SamplingParams,\n prompt_token_ids: Optional[List[int]] = None,\n arrival_time: Optional[float] = None,\n ) -> None:\n \"\"\"Add a request to the engine's request pool.\n\n The request is added to the request pool and will be processed by the\n scheduler as `engine.step()` is called. The exact scheduling policy is\n determined by the scheduler.\n\n Args:\n request_id: The unique ID of the request.\n prompt: The prompt string. Can be None if prompt_token_ids is\n provided.\n sampling_params: The sampling parameters for text generation.\n prompt_token_ids: The token IDs of the prompt. If None, we\n use the tokenizer to convert the prompts to token IDs.\n arrival_time: The arrival time of the request. If None, we use\n the current monotonic time.\n \"\"\"\n if arrival_time is None:\n arrival_time = time.monotonic()\n if prompt_token_ids is None:\n assert prompt is not None\n prompt_token_ids = self.tokenizer.encode(prompt)\n\n # Create the sequences.\n block_size = self.cache_config.block_size\n seq_id = next(self.seq_counter)\n seq = Sequence(seq_id, prompt, prompt_token_ids, block_size)\n\n # Create the sequence group.\n seq_group = SequenceGroup(request_id, [seq], sampling_params,\n arrival_time)\n\n # Add the sequence group to the scheduler.\n self.scheduler.add_seq_group(seq_group)\n\n def abort_request(self, request_id: Union[str, Iterable[str]]) -> None:\n \"\"\"Aborts a request(s) with the given ID.\n\n Args:\n request_id: The ID(s) of the request to abort.\n \"\"\"\n self.scheduler.abort_seq_group(request_id)\n\n def get_model_config(self) -> ModelConfig:\n \"\"\"Gets the model configuration.\"\"\"\n return self.model_config\n\n def get_num_unfinished_requests(self) -> int:\n \"\"\"Gets the number of unfinished requests.\"\"\"\n return self.scheduler.get_num_unfinished_seq_groups()\n\n def has_unfinished_requests(self) -> bool:\n \"\"\"Returns True if there are unfinished requests.\"\"\"\n return self.scheduler.has_unfinished_seqs()\n\n def _schedule(\n self\n ) -> Tuple[List[SequenceGroupMetadata], SchedulerOutputs,\n List[RequestOutput]]:\n seq_group_metadata_list, scheduler_outputs = self.scheduler.schedule()\n return seq_group_metadata_list, scheduler_outputs, [\n RequestOutput.from_seq_group(seq_group)\n for seq_group in scheduler_outputs.ignored_seq_groups\n ]\n\n def _check_beam_search_early_stopping(\n self,\n early_stopping: Union[bool, str],\n sampling_params: SamplingParams,\n best_running_seq: Sequence,\n current_worst_seq: Sequence,\n ) -> bool:\n assert sampling_params.use_beam_search\n length_penalty = sampling_params.length_penalty\n if early_stopping is True:\n return True\n\n current_worst_score = (current_worst_seq.get_beam_search_score(\n length_penalty=length_penalty,\n eos_token_id=self.tokenizer.eos_token_id))\n if early_stopping is False:\n highest_attainable_score = (best_running_seq.get_beam_search_score(\n length_penalty=length_penalty,\n eos_token_id=self.tokenizer.eos_token_id))\n else:\n assert early_stopping == \"never\"\n if length_penalty > 0.0:\n # If length_penalty > 0.0, beam search will prefer longer\n # sequences. The highest attainable score calculation is\n # based on the longest possible sequence length in this case.\n max_possible_length = max(\n best_running_seq.get_prompt_len() +\n sampling_params.max_tokens,\n self.scheduler_config.max_model_len)\n highest_attainable_score = (\n best_running_seq.get_beam_search_score(\n length_penalty=length_penalty,\n eos_token_id=self.tokenizer.eos_token_id,\n seq_len=max_possible_length))\n else:\n # Otherwise, beam search will prefer shorter sequences. The\n # highest attainable score calculation is based on the current\n # sequence length.\n highest_attainable_score = (\n best_running_seq.get_beam_search_score(\n length_penalty=length_penalty,\n eos_token_id=self.tokenizer.eos_token_id))\n return current_worst_score >= highest_attainable_score\n\n def _process_sequence_group_outputs(self, seq_group: SequenceGroup,\n outputs: SequenceGroupOutputs) -> None:\n # Process prompt logprobs\n prompt_logprobs = outputs.prompt_logprobs\n if prompt_logprobs is not None:\n seq_group.prompt_logprobs = prompt_logprobs\n\n # Process samples\n samples = outputs.samples\n parent_seqs = seq_group.get_seqs(status=SequenceStatus.RUNNING)\n existing_finished_seqs = seq_group.get_finished_seqs()\n parent_child_dict = {\n parent_seq.seq_id: []\n for parent_seq in parent_seqs\n }\n for sample in samples:\n parent_child_dict[sample.parent_seq_id].append(sample)\n # List of (child, parent)\n child_seqs: List[Tuple[Sequence, Sequence]] = []\n\n # Process the child samples for each parent sequence\n for parent in parent_seqs:\n child_samples: List[SequenceOutputs] = parent_child_dict[\n parent.seq_id]\n if len(child_samples) == 0:\n # This parent sequence has no children samples. Remove\n # the parent sequence from the sequence group since it will\n # not be used in the future iterations.\n parent.status = SequenceStatus.FINISHED_ABORTED\n seq_group.remove(parent.seq_id)\n self.scheduler.free_seq(parent)\n continue\n # Fork the parent sequence if there are multiple child samples.\n for child_sample in child_samples[:-1]:\n new_child_seq_id = next(self.seq_counter)\n child = parent.fork(new_child_seq_id)\n child.append_token_id(child_sample.output_token,\n child_sample.logprobs)\n child_seqs.append((child, parent))\n # Continue the parent sequence for the last child sample.\n # We reuse the parent sequence here to reduce redundant memory\n # copies, especially when using non-beam search sampling methods.\n last_child_sample = child_samples[-1]\n parent.append_token_id(last_child_sample.output_token,\n last_child_sample.logprobs)\n child_seqs.append((parent, parent))\n\n for seq, _ in child_seqs:\n self._decode_sequence(seq, seq_group.sampling_params)\n self._check_stop(seq, seq_group.sampling_params)\n\n # Non-beam search case\n if not seq_group.sampling_params.use_beam_search:\n # For newly created child sequences, add them to the sequence group\n # and fork them in block manager if they are not finished.\n for seq, parent in child_seqs:\n if seq is not parent:\n seq_group.add(seq)\n if not seq.is_finished():\n self.scheduler.fork_seq(parent, seq)\n\n # Free the finished and selected parent sequences' memory in block\n # manager. Keep them in the sequence group as candidate output.\n # NOTE: we need to fork the new sequences before freeing the\n # old sequences.\n for seq, parent in child_seqs:\n if seq is parent and seq.is_finished():\n self.scheduler.free_seq(seq)\n return\n\n # Beam search case\n # Select the child sequences to keep in the sequence group.\n selected_child_seqs = []\n unselected_child_seqs = []\n beam_width = seq_group.sampling_params.best_of\n length_penalty = seq_group.sampling_params.length_penalty\n\n # Select the newly finished sequences with the highest scores\n # to replace existing finished sequences.\n # Tuple of (seq, parent, is_new)\n existing_finished_seqs = [(seq, None, False)\n for seq in existing_finished_seqs]\n new_finished_seqs = [(seq, parent, True) for seq, parent in child_seqs\n if seq.is_finished()]\n all_finished_seqs = existing_finished_seqs + new_finished_seqs\n # Sort the finished sequences by their scores.\n all_finished_seqs.sort(key=lambda x: x[0].get_beam_search_score(\n length_penalty=length_penalty,\n eos_token_id=self.tokenizer.eos_token_id),\n reverse=True)\n for seq, parent, is_new in all_finished_seqs[:beam_width]:\n if is_new:\n # A newly generated child sequence finishes and has a high\n # score, so we will add it into the sequence group.\n selected_child_seqs.append((seq, parent))\n for seq, parent, is_new in all_finished_seqs[beam_width:]:\n if is_new:\n # A newly generated child sequence finishes but has a low\n # score, so we will not add it into the sequence group.\n # Additionally, if this sequence is a continuation of a\n # parent sequence, we will need remove the parent sequence\n # from the sequence group.\n unselected_child_seqs.append((seq, parent))\n else:\n # An existing finished sequence has a low score, so we will\n # remove it from the sequence group.\n seq_group.remove(seq.seq_id)\n\n # select the top beam_width sequences from the running\n # sequences for the next iteration to continue the beam\n # search.\n running_child_seqs = [(seq, parent) for seq, parent in child_seqs\n if not seq.is_finished()]\n # Sort the running sequences by their scores.\n running_child_seqs.sort(key=lambda x: x[0].get_beam_search_score(\n length_penalty=length_penalty,\n eos_token_id=self.tokenizer.eos_token_id),\n reverse=True)\n\n # Check if we can stop the beam search.\n if len(running_child_seqs) == 0:\n # No running sequences, stop the beam search.\n stop_beam_search = True\n elif len(all_finished_seqs) < beam_width:\n # Not enough finished sequences, continue the beam search.\n stop_beam_search = False\n else:\n # Check the early stopping criteria\n best_running_seq = running_child_seqs[0][0]\n current_worst_seq = all_finished_seqs[beam_width - 1][0]\n stop_beam_search = self._check_beam_search_early_stopping(\n seq_group.sampling_params.early_stopping,\n seq_group.sampling_params, best_running_seq, current_worst_seq)\n\n if stop_beam_search:\n # Stop the beam search and remove all the running sequences from\n # the sequence group.\n unselected_child_seqs.extend(running_child_seqs)\n else:\n # Continue the beam search and select the top beam_width sequences\n # to continue the beam search.\n selected_child_seqs.extend(running_child_seqs[:beam_width])\n # The remaining running sequences will not be used in the next\n # iteration. Again, if these sequences are continuations of\n # parent sequences, we will need to remove the parent sequences\n # from the sequence group.\n unselected_child_seqs.extend(running_child_seqs[beam_width:])\n\n # For newly created child sequences, add them to the sequence group\n # and fork them in block manager if they are not finished.\n for seq, parent in selected_child_seqs:\n if seq is not parent:\n seq_group.add(seq)\n if not seq.is_finished():\n self.scheduler.fork_seq(parent, seq)\n\n # Free the finished and selected parent sequences' memory in block\n # manager. Keep them in the sequence group as candidate output.\n for seq, parent in selected_child_seqs:\n if seq is parent and seq.is_finished():\n self.scheduler.free_seq(seq)\n\n # Remove the unselected parent sequences from the sequence group and\n # free their memory in block manager.\n for seq, parent in unselected_child_seqs:\n if seq is parent:\n # Remove the parent sequence if it is not selected for next\n # iteration\n seq_group.remove(seq.seq_id)\n self.scheduler.free_seq(seq)\n\n def _process_model_outputs(\n self, output: SamplerOutput,\n scheduler_outputs: SchedulerOutputs) -> List[RequestOutput]:\n # Update the scheduled sequence groups with the model outputs.\n scheduled_seq_groups = scheduler_outputs.scheduled_seq_groups\n for seq_group, outputs in zip(scheduled_seq_groups, output):\n self._process_sequence_group_outputs(seq_group, outputs)\n\n # Free the finished sequence groups.\n self.scheduler.free_finished_seq_groups()\n\n # Create the outputs.\n request_outputs: List[RequestOutput] = []\n for seq_group in (scheduled_seq_groups +\n scheduler_outputs.ignored_seq_groups):\n request_output = RequestOutput.from_seq_group(seq_group)\n request_outputs.append(request_output)\n\n if self.log_stats:\n # Log the system stats.\n self._log_system_stats(scheduler_outputs.prompt_run,\n scheduler_outputs.num_batched_tokens)\n return request_outputs\n\n def step(self) -> List[RequestOutput]:\n \"\"\"Performs one decoding iteration and returns newly generated results.\n\n This function performs one decoding iteration of the engine. It first\n schedules the sequences to be executed in the next iteration and the\n token blocks to be swapped in/out/copy. Then, it executes the model\n and updates the scheduler with the model outputs. Finally, it decodes\n the sequences and returns the newly generated results.\n \"\"\"\n seq_group_metadata_list, scheduler_outputs, ignored = self._schedule()\n if scheduler_outputs.is_empty():\n return ignored\n\n # Execute the model.\n output = self._run_workers(\n \"execute_model\",\n seq_group_metadata_list=seq_group_metadata_list,\n blocks_to_swap_in=scheduler_outputs.blocks_to_swap_in,\n blocks_to_swap_out=scheduler_outputs.blocks_to_swap_out,\n blocks_to_copy=scheduler_outputs.blocks_to_copy,\n )\n\n return self._process_model_outputs(output, scheduler_outputs)\n\n def _log_system_stats(\n self,\n prompt_run: bool,\n num_batched_tokens: int,\n ) -> None:\n now = time.monotonic()\n # Log the number of batched input tokens.\n if prompt_run:\n self.num_prompt_tokens.append((now, num_batched_tokens))\n else:\n self.num_generation_tokens.append((now, num_batched_tokens))\n\n elapsed_time = now - self.last_logging_time\n if elapsed_time < _LOGGING_INTERVAL_SEC:\n return\n\n # Discard the old stats.\n self.num_prompt_tokens = [(t, n) for t, n in self.num_prompt_tokens\n if now - t < _LOGGING_INTERVAL_SEC]\n self.num_generation_tokens = [(t, n)\n for t, n in self.num_generation_tokens\n if now - t < _LOGGING_INTERVAL_SEC]\n\n if len(self.num_prompt_tokens) > 1:\n total_num_tokens = sum(n for _, n in self.num_prompt_tokens[:-1])\n window = now - self.num_prompt_tokens[0][0]\n avg_prompt_throughput = total_num_tokens / window\n else:\n avg_prompt_throughput = 0.0\n if len(self.num_generation_tokens) > 1:\n total_num_tokens = sum(n\n for _, n in self.num_generation_tokens[:-1])\n window = now - self.num_generation_tokens[0][0]\n avg_generation_throughput = total_num_tokens / window\n else:\n avg_generation_throughput = 0.0\n\n total_num_gpu_blocks = self.cache_config.num_gpu_blocks\n num_free_gpu_blocks = (\n self.scheduler.block_manager.get_num_free_gpu_blocks())\n num_used_gpu_blocks = total_num_gpu_blocks - num_free_gpu_blocks\n gpu_cache_usage = num_used_gpu_blocks / total_num_gpu_blocks\n\n total_num_cpu_blocks = self.cache_config.num_cpu_blocks\n if total_num_cpu_blocks > 0:\n num_free_cpu_blocks = (\n self.scheduler.block_manager.get_num_free_cpu_blocks())\n num_used_cpu_blocks = total_num_cpu_blocks - num_free_cpu_blocks\n cpu_cache_usage = num_used_cpu_blocks / total_num_cpu_blocks\n else:\n cpu_cache_usage = 0.0\n\n logger.info(\"Avg prompt throughput: \"\n f\"{avg_prompt_throughput:.1f} tokens/s, \"\n \"Avg generation throughput: \"\n f\"{avg_generation_throughput:.1f} tokens/s, \"\n f\"Running: {len(self.scheduler.running)} reqs, \"\n f\"Swapped: {len(self.scheduler.swapped)} reqs, \"\n f\"Pending: {len(self.scheduler.waiting)} reqs, \"\n f\"GPU KV cache usage: {gpu_cache_usage * 100:.1f}%, \"\n f\"CPU KV cache usage: {cpu_cache_usage * 100:.1f}%\")\n self.last_logging_time = now\n\n def _decode_sequence(self, seq: Sequence, prms: SamplingParams) -> None:\n \"\"\"Decodes the new token for a sequence.\"\"\"\n (new_tokens, new_output_text, prefix_offset,\n read_offset) = detokenize_incrementally(\n self.tokenizer,\n all_input_ids=seq.get_token_ids(),\n prev_tokens=seq.tokens,\n prefix_offset=seq.prefix_offset,\n read_offset=seq.read_offset,\n skip_special_tokens=prms.skip_special_tokens,\n spaces_between_special_tokens=prms.spaces_between_special_tokens,\n )\n if seq.tokens is None:\n seq.tokens = new_tokens\n else:\n seq.tokens.extend(new_tokens)\n seq.prefix_offset = prefix_offset\n seq.read_offset = read_offset\n seq.output_text += new_output_text\n\n def _check_stop(self, seq: Sequence,\n sampling_params: SamplingParams) -> None:\n \"\"\"Stop the finished sequences.\"\"\"\n for stop_str in sampling_params.stop:\n if seq.output_text.endswith(stop_str):\n # Truncate the output text so that the stop string is\n # not included in the output.\n seq.output_text = seq.output_text[:-len(stop_str)]\n seq.status = SequenceStatus.FINISHED_STOPPED\n return\n if seq.get_last_token_id() in sampling_params.stop_token_ids:\n seq.status = SequenceStatus.FINISHED_STOPPED\n return\n\n # Check if the sequence has reached max_model_len.\n if seq.get_len() > self.scheduler_config.max_model_len:\n seq.status = SequenceStatus.FINISHED_LENGTH_CAPPED\n return\n\n # Check if the sequence has reached max_tokens.\n if seq.get_output_len() == sampling_params.max_tokens:\n seq.status = SequenceStatus.FINISHED_LENGTH_CAPPED\n return\n\n # Check if the sequence has generated the EOS token.\n if ((not sampling_params.ignore_eos)\n and seq.get_last_token_id() == self.tokenizer.eos_token_id):\n seq.status = SequenceStatus.FINISHED_STOPPED\n return\n\n def _run_workers(\n self,\n method: str,\n *args,\n get_all_outputs: bool = False,\n **kwargs,\n ) -> Any:\n \"\"\"Runs the given method on all workers.\"\"\"\n all_outputs = []\n for worker in self.workers:\n if self.parallel_config.worker_use_ray:\n executor = partial(worker.execute_method.remote, method)\n else:\n executor = getattr(worker, method)\n\n output = executor(*args, **kwargs)\n all_outputs.append(output)\n\n if self.parallel_config.worker_use_ray:\n all_outputs = ray.get(all_outputs)\n\n if get_all_outputs:\n return all_outputs\n\n # Make sure all workers have the same results.\n output = all_outputs[0]\n for other_output in all_outputs[1:]:\n assert output == other_output\n return output\n", "path": "vllm/engine/llm_engine.py" } ]
diff --git a/tests/test_regression.py b/tests/test_regression.py new file mode 100644 index 00000000000..3bfb2b43f26 --- /dev/null +++ b/tests/test_regression.py @@ -0,0 +1,27 @@ +"""Containing tests that check for regressions in vLLM's behavior. + +It should include tests that are reported by users and making sure they +will never happen again. + +""" +from vllm import LLM, SamplingParams + + +def test_duplicated_ignored_sequence_group(): + """https://github.com/vllm-project/vllm/issues/1655""" + + sampling_params = SamplingParams(temperature=0.01, + top_p=0.1, + max_tokens=256) + llm = LLM(model="facebook/opt-125m", + max_num_batched_tokens=4096, + tensor_parallel_size=1) + prompts = ["This is a short prompt", "This is a very long prompt " * 1000] + outputs = llm.generate(prompts, sampling_params=sampling_params) + + assert len(prompts) == len(outputs) + + +if __name__ == "__main__": + import pytest + pytest.main([__file__]) diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index c3752b11f56..20af3fb3e38 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -567,7 +567,7 @@ def step(self) -> List[RequestOutput]: blocks_to_copy=scheduler_outputs.blocks_to_copy, ) - return self._process_model_outputs(output, scheduler_outputs) + ignored + return self._process_model_outputs(output, scheduler_outputs) def _log_system_stats( self,
buildbot__buildbot-5588
Can't use secret interpolation in p4poller.P4Source and steps.P4 I tried to use secrets for perforce passwords (using buildbot 1.0.0). First I tried to use it in the P4 change source: p4ChangeSource = p4poller.P4Source(p4port=p4Port, p4user=p4User, p4passwd=util.Secret('p4passwd'), ...) which fails with: ... [-] P4 poll failed .. ... exceptions.TypeError: Arguments contain a non-string value Then I tried to use the secret in the P4 source checkout step: factory.addStep(steps.P4(p4port=p4Port, ..., p4passwd=util.Secret('p4passwd'), ....) This fails with: Upon execvpe p4 ['p4', ..., '-P', Unpersistable('Unpersistable data: instance of class buildbot.process.properties.Secret deemed insecure'), ...] in environment id 56699632 :Traceback (most recent call last): File "/.../internet/process.py", line 445, in _fork environment) File "/.../internet/process.py", line 523, in _execChild os.execvpe(executable, args, environment) File "/.../lib/python2.7/os.py", line 353, in execvpe _execvpe(file, args, env) File "/.../lib/python2.7/os.py", line 380, in _execvpe func(fullname, *argrest) TypeError: coercing to Unicode: need string or buffer, instance found I suppose, the secrets are still not implemented for perforce functions. Or am I doing something wrong?
[ { "content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n# Portions Copyright 2013 Bad Dog Consulting\n\nimport re\n\nfrom twisted.internet import defer\nfrom twisted.python import log\n\nfrom buildbot import config\nfrom buildbot import interfaces\nfrom buildbot.interfaces import WorkerTooOldError\nfrom buildbot.process import buildstep\nfrom buildbot.process import results\nfrom buildbot.process.properties import Interpolate\nfrom buildbot.steps.source import Source\n\n# Notes:\n# see\n# http://perforce.com/perforce/doc.current/manuals/cmdref/o.gopts.html#1040647\n# for getting p4 command to output marshalled python dictionaries as output\n# for commands.\n# Perhaps switch to using 'p4 -G' : From URL above:\n# -G Causes all output (and batch input for form commands with -i) to be\n# formatted as marshalled Python dictionary objects. This is most often used\n# when scripting.\n\n\nclass P4(Source):\n\n \"\"\"Perform Perforce checkout/update operations.\"\"\"\n\n name = 'p4'\n\n renderables = ['mode', 'p4base', 'p4client', 'p4viewspec', 'p4branch']\n possible_modes = ('incremental', 'full')\n\n def __init__(self, mode='incremental',\n method=None, p4base=None, p4branch=None,\n p4port=None, p4user=None,\n p4passwd=None, p4extra_views=(), p4line_end='local',\n p4viewspec=None, p4viewspec_suffix='...',\n p4client=Interpolate(\n 'buildbot_%(prop:workername)s_%(prop:buildername)s'),\n p4client_spec_options='allwrite rmdir',\n p4extra_args=None,\n p4bin='p4',\n use_tickets=False,\n stream=False,\n debug=False,\n **kwargs):\n self.method = method\n self.mode = mode\n self.p4branch = p4branch\n self.p4bin = p4bin\n self.p4base = p4base\n self.p4port = p4port\n self.p4user = p4user\n self.p4passwd = p4passwd\n self.p4extra_views = p4extra_views\n self.p4viewspec = p4viewspec\n self.p4viewspec_suffix = p4viewspec_suffix\n self.p4line_end = p4line_end\n self.p4client = p4client\n self.p4client_spec_options = p4client_spec_options\n self.p4extra_args = p4extra_args\n self.use_tickets = use_tickets\n self.stream = stream\n self.debug = debug\n\n super().__init__(**kwargs)\n\n if self.mode not in self.possible_modes and \\\n not interfaces.IRenderable.providedBy(self.mode):\n config.error(\"mode {} is not an IRenderable, or one of {}\".format(self.mode,\n self.possible_modes))\n\n if not p4viewspec and p4base is None:\n config.error(\"You must provide p4base or p4viewspec\")\n\n if p4viewspec and (p4base or p4branch or p4extra_views):\n config.error(\n \"Either provide p4viewspec or p4base and p4branch (and optionally p4extra_views\")\n\n if p4viewspec and isinstance(p4viewspec, str):\n config.error(\n \"p4viewspec must not be a string, and should be a sequence of 2 element sequences\")\n\n if not interfaces.IRenderable.providedBy(p4base) and p4base and not p4base.startswith('/'):\n config.error('p4base should start with // [p4base = {}]'.format(p4base))\n\n if not interfaces.IRenderable.providedBy(p4base) and p4base and p4base.endswith('/'):\n config.error('p4base should not end with a trailing / [p4base = {}]'.format(p4base))\n\n if not interfaces.IRenderable.providedBy(p4branch) and p4branch and p4branch.endswith('/'):\n config.error('p4branch should not end with a trailing / [p4branch = {}]'.format(\n p4branch))\n\n if (p4branch or p4extra_views) and not p4base:\n config.error(\n 'If you specify either p4branch or p4extra_views you must also specify p4base')\n\n if stream:\n if (p4extra_views or p4viewspec):\n config.error('You can\\'t use p4extra_views not p4viewspec with stream')\n if not p4base or not p4branch:\n config.error('You must specify both p4base and p4branch when using stream')\n if \" \" in p4base or \" \" in p4branch:\n config.error('p4base and p4branch must not contain any whitespace')\n\n if self.p4client_spec_options is None:\n self.p4client_spec_options = ''\n\n @defer.inlineCallbacks\n def run_vc(self, branch, revision, patch):\n if self.debug:\n log.msg('in run_vc')\n\n self.revision = revision\n self.method = self._getMethod()\n self.stdio_log = yield self.addLogForRemoteCommands(\"stdio\")\n\n installed = yield self.checkP4()\n if not installed:\n raise WorkerTooOldError(\"p4 is not installed on worker\")\n\n # Try to obfuscate the password when used as an argument to commands.\n if self.p4passwd is not None:\n if not self.workerVersionIsOlderThan('shell', '2.16'):\n self.p4passwd_arg = ('obfuscated', self.p4passwd, 'XXXXXX')\n else:\n self.p4passwd_arg = self.p4passwd\n log.msg(\"Worker does not understand obfuscation; \"\n \"p4 password will be logged\")\n\n if self.use_tickets and self.p4passwd:\n yield self._acquireTicket()\n\n yield self._getAttrGroupMember('mode', self.mode)()\n yield self.parseGotRevision()\n return results.SUCCESS\n\n @defer.inlineCallbacks\n def mode_full(self):\n if self.debug:\n log.msg(\"P4:full()..\")\n\n # First we need to create the client\n yield self._createClientSpec()\n\n # Then p4 sync #none\n yield self._dovccmd(['sync', '#none'])\n\n # Then remove directory.\n yield self.runRmdir(self.workdir)\n\n # Then we need to sync the client\n if self.revision:\n if self.debug:\n log.msg(\"P4: full() sync command based on :base:%s changeset:%d\",\n self._getP4BaseForLog(), int(self.revision))\n yield self._dovccmd(['sync', '{}...@{}'.format(self._getP4BaseForCommand(),\n int(self.revision))], collectStdout=True)\n else:\n if self.debug:\n log.msg(\"P4: full() sync command based on :base:%s no revision\",\n self._getP4BaseForLog())\n yield self._dovccmd(['sync'], collectStdout=True)\n\n if self.debug:\n log.msg(\"P4: full() sync done.\")\n\n @defer.inlineCallbacks\n def mode_incremental(self):\n if self.debug:\n log.msg(\"P4:incremental()\")\n\n # First we need to create the client\n yield self._createClientSpec()\n\n # and plan to do a checkout\n command = ['sync', ]\n\n if self.revision:\n command.extend(['{}...@{}'.format(self._getP4BaseForCommand(), int(self.revision))])\n\n if self.debug:\n log.msg(\n \"P4:incremental() command:%s revision:%s\", command, self.revision)\n yield self._dovccmd(command)\n\n def _getP4BaseForLog(self):\n return self.p4base or '<custom viewspec>'\n\n def _getP4BaseForCommand(self):\n return self.p4base or ''\n\n def _buildVCCommand(self, doCommand):\n assert doCommand, \"No command specified\"\n\n command = [self.p4bin, ]\n\n if self.p4port:\n command.extend(['-p', self.p4port])\n if self.p4user:\n command.extend(['-u', self.p4user])\n if not self.use_tickets and self.p4passwd:\n command.extend(['-P', self.p4passwd_arg])\n if self.p4client:\n command.extend(['-c', self.p4client])\n\n # Only add the extra arguments for the `sync` command.\n if doCommand[0] == 'sync' and self.p4extra_args:\n command.extend(self.p4extra_args)\n\n command.extend(doCommand)\n return command\n\n @defer.inlineCallbacks\n def _dovccmd(self, command, collectStdout=False, initialStdin=None):\n command = self._buildVCCommand(command)\n\n if self.debug:\n log.msg(\"P4:_dovccmd():workdir->{}\".format(self.workdir))\n\n cmd = buildstep.RemoteShellCommand(self.workdir, command,\n env=self.env,\n logEnviron=self.logEnviron,\n timeout=self.timeout,\n collectStdout=collectStdout,\n initialStdin=initialStdin,)\n cmd.useLog(self.stdio_log, False)\n if self.debug:\n log.msg(\"Starting p4 command : p4 {}\".format(\" \".join(command)))\n\n yield self.runCommand(cmd)\n\n if cmd.rc != 0:\n if self.debug:\n log.msg(\"P4:_dovccmd():Source step failed while running command {}\".format(cmd))\n raise buildstep.BuildStepFailed()\n if collectStdout:\n return cmd.stdout\n return cmd.rc\n\n def _getMethod(self):\n if self.method is not None and self.mode != 'incremental':\n return self.method\n elif self.mode == 'incremental':\n return None\n elif self.method is None and self.mode == 'full':\n return 'fresh'\n return None\n\n def _sourcedirIsUpdatable(self):\n # In general you should always be able to write to the directory\n # You just specified as the root of your client\n # So just return.\n # If we find a case where this is no longer true, then this\n # needs to be implemented\n return defer.succeed(True)\n\n @defer.inlineCallbacks\n def _createClientSpec(self):\n builddir = self.getProperty('builddir')\n\n if self.debug:\n log.msg(\"P4:_createClientSpec() builddir:{}\".format(builddir))\n log.msg(\"P4:_createClientSpec() SELF.workdir:{}\".format(self.workdir))\n\n prop_dict = self.getProperties().asDict()\n prop_dict['p4client'] = self.p4client\n\n client_spec = ''\n client_spec += \"Client: {}\\n\\n\".format(self.p4client)\n client_spec += \"Owner: {}\\n\\n\".format(self.p4user)\n client_spec += \"Description:\\n\\tCreated by {}\\n\\n\".format(self.p4user)\n client_spec += \"Root:\\t{}\\n\\n\".format(self.build.path_module.normpath(\n self.build.path_module.join(builddir, self.workdir)))\n client_spec += \"Options:\\t{}\\n\\n\".format(self.p4client_spec_options)\n if self.p4line_end:\n client_spec += \"LineEnd:\\t{}\\n\\n\".format(self.p4line_end)\n else:\n client_spec += \"LineEnd:\\tlocal\\n\\n\"\n\n # Perforce generates the view for stream-associated workspaces\n if self.stream:\n client_spec += \"Stream:\\t{}/{}\\n\".format(self.p4base, self.p4branch)\n else:\n # Setup a view\n client_spec += \"View:\\n\"\n\n def has_whitespace(*args):\n return any([re.search(r'\\s', i) for i in args if i is not None])\n\n if self.p4viewspec:\n # uses only p4viewspec array of tuples to build view\n # If the user specifies a viewspec via an array of tuples then\n # Ignore any specified p4base,p4branch, and/or p4extra_views\n suffix = self.p4viewspec_suffix or ''\n for k, v in self.p4viewspec:\n if self.debug:\n log.msg('P4:_createClientSpec():key:{} value:{}'.format(k, v))\n\n qa = '\"' if has_whitespace(k, suffix) else ''\n qb = '\"' if has_whitespace(self.p4client, v, suffix) else ''\n client_spec += '\\t{}{}{}{} {}//{}/{}{}{}\\n'.format(qa, k, suffix, qa, qb,\n self.p4client, v, suffix, qb)\n else:\n # Uses p4base, p4branch, p4extra_views\n\n qa = '\"' if has_whitespace(self.p4base, self.p4branch) else ''\n\n client_spec += \"\\t{}{}\".format(qa, self.p4base)\n\n if self.p4branch:\n client_spec += \"/{}\".format(self.p4branch)\n\n client_spec += \"/...{} \".format(qa)\n\n qb = '\"' if has_whitespace(self.p4client) else ''\n client_spec += \"{}//{}/...{}\\n\".format(qb, self.p4client, qb)\n\n if self.p4extra_views:\n for k, v in self.p4extra_views:\n qa = '\"' if has_whitespace(k) else ''\n qb = '\"' if has_whitespace(k, self.p4client, v) else ''\n\n client_spec += \"\\t{}{}/...{} {}//{}/{}/...{}\\n\".format(qa, k, qa, qb,\n self.p4client, v, qb)\n\n if self.debug:\n log.msg(client_spec)\n\n stdout = yield self._dovccmd(['client', '-i'], collectStdout=True, initialStdin=client_spec)\n mo = re.search(r'Client (\\S+) (.+)$', stdout, re.M)\n return mo and (mo.group(2) == 'saved.' or mo.group(2) == 'not changed.')\n\n @defer.inlineCallbacks\n def _acquireTicket(self):\n if self.debug:\n log.msg(\"P4:acquireTicket()\")\n\n # TODO: check first if the ticket is still valid?\n initialStdin = self.p4passwd + \"\\n\"\n yield self._dovccmd(['login'], initialStdin=initialStdin)\n\n @defer.inlineCallbacks\n def parseGotRevision(self):\n command = self._buildVCCommand(['changes', '-m1', '#have'])\n\n cmd = buildstep.RemoteShellCommand(self.workdir, command,\n env=self.env,\n timeout=self.timeout,\n logEnviron=self.logEnviron,\n collectStdout=True)\n cmd.useLog(self.stdio_log, False)\n yield self.runCommand(cmd)\n\n stdout = cmd.stdout.strip()\n # Example output from p4 changes -m1 #have\n # Change 212798 on 2012/04/13 by user@user-unix-bldng2 'change to\n # pickup build'\n revision = stdout.split()[1]\n try:\n int(revision)\n except ValueError as e:\n msg = ((\"p4.parseGotRevision unable to parse output \"\n \"of 'p4 changes -m1 \\\"#have\\\"': '{}'\").format(stdout))\n log.msg(msg)\n raise buildstep.BuildStepFailed() from e\n\n if self.debug:\n log.msg(\"Got p4 revision {}\".format(revision))\n self.updateSourceProperty('got_revision', revision)\n\n @defer.inlineCallbacks\n def purge(self, ignore_ignores):\n \"\"\"Delete everything that shown up on status.\"\"\"\n command = ['sync', '#none']\n if ignore_ignores:\n command.append('--no-ignore')\n yield self._dovccmd(command, collectStdout=True)\n # FIXME: do the following comments need addressing?\n # add deferred to rm tree\n # then add defer to sync to revision\n\n @defer.inlineCallbacks\n def checkP4(self):\n cmd = buildstep.RemoteShellCommand(self.workdir, ['p4', '-V'],\n env=self.env,\n logEnviron=self.logEnviron)\n cmd.useLog(self.stdio_log, False)\n yield self.runCommand(cmd)\n return cmd.rc == 0\n\n def computeSourceRevision(self, changes):\n if not changes or None in [c.revision for c in changes]:\n return None\n lastChange = max([int(c.revision) for c in changes])\n return lastChange\n", "path": "master/buildbot/steps/source/p4.py" } ]
[ { "content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n# Portions Copyright 2013 Bad Dog Consulting\n\nimport re\n\nfrom twisted.internet import defer\nfrom twisted.python import log\n\nfrom buildbot import config\nfrom buildbot import interfaces\nfrom buildbot.interfaces import WorkerTooOldError\nfrom buildbot.process import buildstep\nfrom buildbot.process import results\nfrom buildbot.process.properties import Interpolate\nfrom buildbot.steps.source import Source\n\n# Notes:\n# see\n# http://perforce.com/perforce/doc.current/manuals/cmdref/o.gopts.html#1040647\n# for getting p4 command to output marshalled python dictionaries as output\n# for commands.\n# Perhaps switch to using 'p4 -G' : From URL above:\n# -G Causes all output (and batch input for form commands with -i) to be\n# formatted as marshalled Python dictionary objects. This is most often used\n# when scripting.\n\n\nclass P4(Source):\n\n \"\"\"Perform Perforce checkout/update operations.\"\"\"\n\n name = 'p4'\n\n renderables = ['mode', 'p4base', 'p4client', 'p4viewspec', 'p4branch', 'p4passwd']\n possible_modes = ('incremental', 'full')\n\n def __init__(self, mode='incremental',\n method=None, p4base=None, p4branch=None,\n p4port=None, p4user=None,\n p4passwd=None, p4extra_views=(), p4line_end='local',\n p4viewspec=None, p4viewspec_suffix='...',\n p4client=Interpolate(\n 'buildbot_%(prop:workername)s_%(prop:buildername)s'),\n p4client_spec_options='allwrite rmdir',\n p4extra_args=None,\n p4bin='p4',\n use_tickets=False,\n stream=False,\n debug=False,\n **kwargs):\n self.method = method\n self.mode = mode\n self.p4branch = p4branch\n self.p4bin = p4bin\n self.p4base = p4base\n self.p4port = p4port\n self.p4user = p4user\n self.p4passwd = p4passwd\n self.p4extra_views = p4extra_views\n self.p4viewspec = p4viewspec\n self.p4viewspec_suffix = p4viewspec_suffix\n self.p4line_end = p4line_end\n self.p4client = p4client\n self.p4client_spec_options = p4client_spec_options\n self.p4extra_args = p4extra_args\n self.use_tickets = use_tickets\n self.stream = stream\n self.debug = debug\n\n super().__init__(**kwargs)\n\n if self.mode not in self.possible_modes and \\\n not interfaces.IRenderable.providedBy(self.mode):\n config.error(\"mode {} is not an IRenderable, or one of {}\".format(self.mode,\n self.possible_modes))\n\n if not p4viewspec and p4base is None:\n config.error(\"You must provide p4base or p4viewspec\")\n\n if p4viewspec and (p4base or p4branch or p4extra_views):\n config.error(\n \"Either provide p4viewspec or p4base and p4branch (and optionally p4extra_views\")\n\n if p4viewspec and isinstance(p4viewspec, str):\n config.error(\n \"p4viewspec must not be a string, and should be a sequence of 2 element sequences\")\n\n if not interfaces.IRenderable.providedBy(p4base) and p4base and not p4base.startswith('/'):\n config.error('p4base should start with // [p4base = {}]'.format(p4base))\n\n if not interfaces.IRenderable.providedBy(p4base) and p4base and p4base.endswith('/'):\n config.error('p4base should not end with a trailing / [p4base = {}]'.format(p4base))\n\n if not interfaces.IRenderable.providedBy(p4branch) and p4branch and p4branch.endswith('/'):\n config.error('p4branch should not end with a trailing / [p4branch = {}]'.format(\n p4branch))\n\n if (p4branch or p4extra_views) and not p4base:\n config.error(\n 'If you specify either p4branch or p4extra_views you must also specify p4base')\n\n if stream:\n if (p4extra_views or p4viewspec):\n config.error('You can\\'t use p4extra_views not p4viewspec with stream')\n if not p4base or not p4branch:\n config.error('You must specify both p4base and p4branch when using stream')\n if \" \" in p4base or \" \" in p4branch:\n config.error('p4base and p4branch must not contain any whitespace')\n\n if self.p4client_spec_options is None:\n self.p4client_spec_options = ''\n\n @defer.inlineCallbacks\n def run_vc(self, branch, revision, patch):\n if self.debug:\n log.msg('in run_vc')\n\n self.revision = revision\n self.method = self._getMethod()\n self.stdio_log = yield self.addLogForRemoteCommands(\"stdio\")\n\n installed = yield self.checkP4()\n if not installed:\n raise WorkerTooOldError(\"p4 is not installed on worker\")\n\n # Try to obfuscate the password when used as an argument to commands.\n if self.p4passwd is not None:\n if not self.workerVersionIsOlderThan('shell', '2.16'):\n self.p4passwd_arg = ('obfuscated', self.p4passwd, 'XXXXXX')\n else:\n self.p4passwd_arg = self.p4passwd\n log.msg(\"Worker does not understand obfuscation; \"\n \"p4 password will be logged\")\n\n if self.use_tickets and self.p4passwd:\n yield self._acquireTicket()\n\n yield self._getAttrGroupMember('mode', self.mode)()\n yield self.parseGotRevision()\n return results.SUCCESS\n\n @defer.inlineCallbacks\n def mode_full(self):\n if self.debug:\n log.msg(\"P4:full()..\")\n\n # First we need to create the client\n yield self._createClientSpec()\n\n # Then p4 sync #none\n yield self._dovccmd(['sync', '#none'])\n\n # Then remove directory.\n yield self.runRmdir(self.workdir)\n\n # Then we need to sync the client\n if self.revision:\n if self.debug:\n log.msg(\"P4: full() sync command based on :base:%s changeset:%d\",\n self._getP4BaseForLog(), int(self.revision))\n yield self._dovccmd(['sync', '{}...@{}'.format(self._getP4BaseForCommand(),\n int(self.revision))], collectStdout=True)\n else:\n if self.debug:\n log.msg(\"P4: full() sync command based on :base:%s no revision\",\n self._getP4BaseForLog())\n yield self._dovccmd(['sync'], collectStdout=True)\n\n if self.debug:\n log.msg(\"P4: full() sync done.\")\n\n @defer.inlineCallbacks\n def mode_incremental(self):\n if self.debug:\n log.msg(\"P4:incremental()\")\n\n # First we need to create the client\n yield self._createClientSpec()\n\n # and plan to do a checkout\n command = ['sync', ]\n\n if self.revision:\n command.extend(['{}...@{}'.format(self._getP4BaseForCommand(), int(self.revision))])\n\n if self.debug:\n log.msg(\n \"P4:incremental() command:%s revision:%s\", command, self.revision)\n yield self._dovccmd(command)\n\n def _getP4BaseForLog(self):\n return self.p4base or '<custom viewspec>'\n\n def _getP4BaseForCommand(self):\n return self.p4base or ''\n\n def _buildVCCommand(self, doCommand):\n assert doCommand, \"No command specified\"\n\n command = [self.p4bin, ]\n\n if self.p4port:\n command.extend(['-p', self.p4port])\n if self.p4user:\n command.extend(['-u', self.p4user])\n if not self.use_tickets and self.p4passwd:\n command.extend(['-P', self.p4passwd_arg])\n if self.p4client:\n command.extend(['-c', self.p4client])\n\n # Only add the extra arguments for the `sync` command.\n if doCommand[0] == 'sync' and self.p4extra_args:\n command.extend(self.p4extra_args)\n\n command.extend(doCommand)\n return command\n\n @defer.inlineCallbacks\n def _dovccmd(self, command, collectStdout=False, initialStdin=None):\n command = self._buildVCCommand(command)\n\n if self.debug:\n log.msg(\"P4:_dovccmd():workdir->{}\".format(self.workdir))\n\n cmd = buildstep.RemoteShellCommand(self.workdir, command,\n env=self.env,\n logEnviron=self.logEnviron,\n timeout=self.timeout,\n collectStdout=collectStdout,\n initialStdin=initialStdin,)\n cmd.useLog(self.stdio_log, False)\n if self.debug:\n log.msg(\"Starting p4 command : p4 {}\".format(\" \".join(command)))\n\n yield self.runCommand(cmd)\n\n if cmd.rc != 0:\n if self.debug:\n log.msg(\"P4:_dovccmd():Source step failed while running command {}\".format(cmd))\n raise buildstep.BuildStepFailed()\n if collectStdout:\n return cmd.stdout\n return cmd.rc\n\n def _getMethod(self):\n if self.method is not None and self.mode != 'incremental':\n return self.method\n elif self.mode == 'incremental':\n return None\n elif self.method is None and self.mode == 'full':\n return 'fresh'\n return None\n\n def _sourcedirIsUpdatable(self):\n # In general you should always be able to write to the directory\n # You just specified as the root of your client\n # So just return.\n # If we find a case where this is no longer true, then this\n # needs to be implemented\n return defer.succeed(True)\n\n @defer.inlineCallbacks\n def _createClientSpec(self):\n builddir = self.getProperty('builddir')\n\n if self.debug:\n log.msg(\"P4:_createClientSpec() builddir:{}\".format(builddir))\n log.msg(\"P4:_createClientSpec() SELF.workdir:{}\".format(self.workdir))\n\n prop_dict = self.getProperties().asDict()\n prop_dict['p4client'] = self.p4client\n\n client_spec = ''\n client_spec += \"Client: {}\\n\\n\".format(self.p4client)\n client_spec += \"Owner: {}\\n\\n\".format(self.p4user)\n client_spec += \"Description:\\n\\tCreated by {}\\n\\n\".format(self.p4user)\n client_spec += \"Root:\\t{}\\n\\n\".format(self.build.path_module.normpath(\n self.build.path_module.join(builddir, self.workdir)))\n client_spec += \"Options:\\t{}\\n\\n\".format(self.p4client_spec_options)\n if self.p4line_end:\n client_spec += \"LineEnd:\\t{}\\n\\n\".format(self.p4line_end)\n else:\n client_spec += \"LineEnd:\\tlocal\\n\\n\"\n\n # Perforce generates the view for stream-associated workspaces\n if self.stream:\n client_spec += \"Stream:\\t{}/{}\\n\".format(self.p4base, self.p4branch)\n else:\n # Setup a view\n client_spec += \"View:\\n\"\n\n def has_whitespace(*args):\n return any([re.search(r'\\s', i) for i in args if i is not None])\n\n if self.p4viewspec:\n # uses only p4viewspec array of tuples to build view\n # If the user specifies a viewspec via an array of tuples then\n # Ignore any specified p4base,p4branch, and/or p4extra_views\n suffix = self.p4viewspec_suffix or ''\n for k, v in self.p4viewspec:\n if self.debug:\n log.msg('P4:_createClientSpec():key:{} value:{}'.format(k, v))\n\n qa = '\"' if has_whitespace(k, suffix) else ''\n qb = '\"' if has_whitespace(self.p4client, v, suffix) else ''\n client_spec += '\\t{}{}{}{} {}//{}/{}{}{}\\n'.format(qa, k, suffix, qa, qb,\n self.p4client, v, suffix, qb)\n else:\n # Uses p4base, p4branch, p4extra_views\n\n qa = '\"' if has_whitespace(self.p4base, self.p4branch) else ''\n\n client_spec += \"\\t{}{}\".format(qa, self.p4base)\n\n if self.p4branch:\n client_spec += \"/{}\".format(self.p4branch)\n\n client_spec += \"/...{} \".format(qa)\n\n qb = '\"' if has_whitespace(self.p4client) else ''\n client_spec += \"{}//{}/...{}\\n\".format(qb, self.p4client, qb)\n\n if self.p4extra_views:\n for k, v in self.p4extra_views:\n qa = '\"' if has_whitespace(k) else ''\n qb = '\"' if has_whitespace(k, self.p4client, v) else ''\n\n client_spec += \"\\t{}{}/...{} {}//{}/{}/...{}\\n\".format(qa, k, qa, qb,\n self.p4client, v, qb)\n\n if self.debug:\n log.msg(client_spec)\n\n stdout = yield self._dovccmd(['client', '-i'], collectStdout=True, initialStdin=client_spec)\n mo = re.search(r'Client (\\S+) (.+)$', stdout, re.M)\n return mo and (mo.group(2) == 'saved.' or mo.group(2) == 'not changed.')\n\n @defer.inlineCallbacks\n def _acquireTicket(self):\n if self.debug:\n log.msg(\"P4:acquireTicket()\")\n\n # TODO: check first if the ticket is still valid?\n initialStdin = self.p4passwd + \"\\n\"\n yield self._dovccmd(['login'], initialStdin=initialStdin)\n\n @defer.inlineCallbacks\n def parseGotRevision(self):\n command = self._buildVCCommand(['changes', '-m1', '#have'])\n\n cmd = buildstep.RemoteShellCommand(self.workdir, command,\n env=self.env,\n timeout=self.timeout,\n logEnviron=self.logEnviron,\n collectStdout=True)\n cmd.useLog(self.stdio_log, False)\n yield self.runCommand(cmd)\n\n stdout = cmd.stdout.strip()\n # Example output from p4 changes -m1 #have\n # Change 212798 on 2012/04/13 by user@user-unix-bldng2 'change to\n # pickup build'\n revision = stdout.split()[1]\n try:\n int(revision)\n except ValueError as e:\n msg = ((\"p4.parseGotRevision unable to parse output \"\n \"of 'p4 changes -m1 \\\"#have\\\"': '{}'\").format(stdout))\n log.msg(msg)\n raise buildstep.BuildStepFailed() from e\n\n if self.debug:\n log.msg(\"Got p4 revision {}\".format(revision))\n self.updateSourceProperty('got_revision', revision)\n\n @defer.inlineCallbacks\n def purge(self, ignore_ignores):\n \"\"\"Delete everything that shown up on status.\"\"\"\n command = ['sync', '#none']\n if ignore_ignores:\n command.append('--no-ignore')\n yield self._dovccmd(command, collectStdout=True)\n # FIXME: do the following comments need addressing?\n # add deferred to rm tree\n # then add defer to sync to revision\n\n @defer.inlineCallbacks\n def checkP4(self):\n cmd = buildstep.RemoteShellCommand(self.workdir, ['p4', '-V'],\n env=self.env,\n logEnviron=self.logEnviron)\n cmd.useLog(self.stdio_log, False)\n yield self.runCommand(cmd)\n return cmd.rc == 0\n\n def computeSourceRevision(self, changes):\n if not changes or None in [c.revision for c in changes]:\n return None\n lastChange = max([int(c.revision) for c in changes])\n return lastChange\n", "path": "master/buildbot/steps/source/p4.py" } ]
diff --git a/master/buildbot/steps/source/p4.py b/master/buildbot/steps/source/p4.py index b29e376dcbbf..5fdddd8abcb9 100644 --- a/master/buildbot/steps/source/p4.py +++ b/master/buildbot/steps/source/p4.py @@ -44,7 +44,7 @@ class P4(Source): name = 'p4' - renderables = ['mode', 'p4base', 'p4client', 'p4viewspec', 'p4branch'] + renderables = ['mode', 'p4base', 'p4client', 'p4viewspec', 'p4branch', 'p4passwd'] possible_modes = ('incremental', 'full') def __init__(self, mode='incremental',
internetarchive__openlibrary-5899
Work search - Sort by first published displays work with no publication date first <!-- What problem are we solving? What does the experience look like today? What are the symptoms? --> When searching for a work and sorting by `First Published`, if some works match the search and have no first publication date, they appear first. I expected to see works with known first published year first and not pages later. ### Evidence / Screenshot (if possible) n/a ### Relevant url? <!-- `https://openlibrary.org/...` --> For example: https://openlibrary.org/search?q=calamity&mode=everything&sort=old ### Steps to Reproduce <!-- What steps caused you to find the bug? --> 1. Search for `calamity` in the search bar (https://openlibrary.org/search?q=calamity&mode=everything) 2. Sort by `First Published` (https://openlibrary.org/search?q=calamity&mode=everything&sort=old) <!-- What actually happened after these steps? What did you expect to happen? --> * Actual: First result is `The Mount Rushmore Calamity by Jeff Brown` with no publication date * Expected: `A theatre wherein be represented as wel the miseries & calamities that follow the voluptuous worldlings by Noot, Jan van der`, first published in 1569 and only displayed on page 2 of the search ### Details - **Logged in (Y/N)?** No - **Browser type/version?** - **Operating system?** - **Environment (prod/dev/local)?** prod <!-- If not sure, put prod --> ### Proposal & Constraints <!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? --> When sorting by publication date (`First Published` or `Most Recent`), works with no publication date should be the last results displayed. ### Related files <!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. --> ### Stakeholders <!-- @ tag stakeholders of this bug -->
[ { "content": "from datetime import datetime\nimport copy\nimport json\nimport logging\nimport random\nimport re\nimport string\nfrom typing import List, Tuple, Any, Union, Optional, Iterable, Dict\nfrom unicodedata import normalize\nfrom json import JSONDecodeError\nimport requests\nimport web\nfrom lxml.etree import XML, XMLSyntaxError\nfrom requests import Response\nfrom six.moves import urllib\n\nfrom infogami import config\nfrom infogami.utils import delegate, stats\nfrom infogami.utils.view import public, render, render_template, safeint\nfrom openlibrary.core.lending import add_availability, get_availability_of_ocaids\nfrom openlibrary.core.models import Edition # noqa: E402\nfrom openlibrary.plugins.inside.code import fulltext_search\nfrom openlibrary.plugins.openlibrary.lists import get_list_editions\nfrom openlibrary.plugins.openlibrary.processors import urlsafe\nfrom openlibrary.plugins.upstream.utils import urlencode\nfrom openlibrary.utils import escape_bracket\nfrom openlibrary.utils.ddc import (\n normalize_ddc,\n normalize_ddc_prefix,\n normalize_ddc_range,\n)\nfrom openlibrary.utils.isbn import normalize_isbn\nfrom openlibrary.utils.lcc import (\n normalize_lcc_prefix,\n normalize_lcc_range,\n short_lcc_to_sortable_lcc,\n)\n\nlogger = logging.getLogger(\"openlibrary.worksearch\")\n\nif hasattr(config, 'plugin_worksearch'):\n solr_select_url = (\n config.plugin_worksearch.get('solr_base_url', 'localhost') + '/select'\n )\n\n default_spellcheck_count = config.plugin_worksearch.get('spellcheck_count', 10)\n\n\nALL_FIELDS = [\n \"key\",\n \"redirects\",\n \"title\",\n \"subtitle\",\n \"alternative_title\",\n \"alternative_subtitle\",\n \"edition_key\",\n \"by_statement\",\n \"publish_date\",\n \"lccn\",\n \"ia\",\n \"oclc\",\n \"isbn\",\n \"contributor\",\n \"publish_place\",\n \"publisher\",\n \"first_sentence\",\n \"author_key\",\n \"author_name\",\n \"author_alternative_name\",\n \"subject\",\n \"person\",\n \"place\",\n \"time\",\n \"has_fulltext\",\n \"title_suggest\",\n \"edition_count\",\n \"publish_year\",\n \"language\",\n \"number_of_pages\",\n \"ia_count\",\n \"publisher_facet\",\n \"author_facet\",\n \"first_publish_year\",\n # Subjects\n \"subject_key\",\n \"person_key\",\n \"place_key\",\n \"time_key\",\n # Classifications\n \"lcc\",\n \"ddc\",\n \"lcc_sort\",\n \"ddc_sort\",\n]\nFACET_FIELDS = [\n \"has_fulltext\",\n \"author_facet\",\n \"language\",\n \"first_publish_year\",\n \"publisher_facet\",\n \"subject_facet\",\n \"person_facet\",\n \"place_facet\",\n \"time_facet\",\n \"public_scan_b\",\n]\nFIELD_NAME_MAP = {\n 'author': 'author_name',\n 'authors': 'author_name',\n 'by': 'author_name',\n 'publishers': 'publisher',\n # \"Private\" fields\n # This is private because we'll change it to a multi-valued field instead of a\n # plain string at the next opportunity, which will make it much more usable.\n '_ia_collection': 'ia_collection_s',\n}\nSORTS = {\n 'editions': 'edition_count desc',\n 'old': 'first_publish_year asc',\n 'new': 'first_publish_year desc',\n 'scans': 'ia_count desc',\n # Classifications\n 'lcc_sort': 'lcc_sort asc',\n 'lcc_sort asc': 'lcc_sort asc',\n 'lcc_sort desc': 'lcc_sort desc',\n 'ddc_sort': 'ddc_sort asc',\n 'ddc_sort asc': 'ddc_sort asc',\n 'ddc_sort desc': 'ddc_sort desc',\n # Random\n 'random': 'random_1 asc',\n 'random asc': 'random_1 asc',\n 'random desc': 'random_1 desc',\n 'random.hourly': lambda: f'random_{datetime.now():%Y%m%dT%H} asc',\n 'random.daily': lambda: f'random_{datetime.now():%Y%m%d} asc',\n}\nDEFAULT_SEARCH_FIELDS = {\n 'key',\n 'author_name',\n 'author_key',\n 'title',\n 'subtitle',\n 'edition_count',\n 'ia',\n 'has_fulltext',\n 'first_publish_year',\n 'cover_i',\n 'cover_edition_key',\n 'public_scan_b',\n 'lending_edition_s',\n 'lending_identifier_s',\n 'language',\n 'ia_collection_s',\n # FIXME: These should be fetched from book_providers, but can't cause circular dep\n 'id_project_gutenberg',\n 'id_librivox',\n 'id_standard_ebooks',\n}\nOLID_URLS = {'A': 'authors', 'M': 'books', 'W': 'works'}\n\nre_to_esc = re.compile(r'[\\[\\]:/]')\nre_isbn_field = re.compile(r'^\\s*(?:isbn[:\\s]*)?([-0-9X]{9,})\\s*$', re.I)\nre_author_key = re.compile(r'(OL\\d+A)')\nre_fields = re.compile(r'(-?%s):' % '|'.join(ALL_FIELDS + list(FIELD_NAME_MAP)), re.I)\nre_op = re.compile(' +(OR|AND)$')\nre_range = re.compile(r'\\[(?P<start>.*) TO (?P<end>.*)\\]')\nre_author_facet = re.compile(r'^(OL\\d+A) (.*)$')\nre_pre = re.compile(r'<pre>(.*)</pre>', re.S)\nre_subject_types = re.compile('^(places|times|people)/(.*)')\nre_olid = re.compile(r'^OL\\d+([AMW])$')\n\nplurals = {f + 's': f for f in ('publisher', 'author')}\n\n\n@public\ndef get_solr_works(work_key: Iterable[str]) -> dict[str, dict]:\n from openlibrary.plugins.worksearch.search import get_solr\n\n return {\n doc['key']: doc\n for doc in get_solr().get_many(set(work_key), fields=DEFAULT_SEARCH_FIELDS)\n }\n\n\ndef process_sort(raw_sort):\n \"\"\"\n :param str raw_sort:\n :rtype: str\n\n >>> process_sort('editions')\n 'edition_count desc'\n >>> process_sort('editions, new')\n 'edition_count desc,first_publish_year desc'\n >>> process_sort('random')\n 'random_1 asc'\n >>> process_sort('random_custom_seed')\n 'random_custom_seed asc'\n >>> process_sort('random_custom_seed desc')\n 'random_custom_seed desc'\n >>> process_sort('random_custom_seed asc')\n 'random_custom_seed asc'\n \"\"\"\n\n def process_individual_sort(sort):\n if sort.startswith('random_'):\n return sort if ' ' in sort else sort + ' asc'\n else:\n solr_sort = SORTS[sort]\n return solr_sort() if callable(solr_sort) else solr_sort\n\n return ','.join(process_individual_sort(s.strip()) for s in raw_sort.split(','))\n\n\ndef read_author_facet(af):\n # example input: \"OL26783A Leo Tolstoy\"\n return re_author_facet.match(af).groups()\n\n\ndef get_language_name(code):\n lang = web.ctx.site.get('/languages/' + code)\n return lang.name if lang else \"'%s' unknown\" % code\n\n\ndef read_facets(root):\n e_facet_counts = root.find(\"lst[@name='facet_counts']\")\n e_facet_fields = e_facet_counts.find(\"lst[@name='facet_fields']\")\n facets = {}\n for e_lst in e_facet_fields:\n assert e_lst.tag == 'lst'\n name = e_lst.attrib['name']\n if name == 'author_facet':\n name = 'author_key'\n if name == 'has_fulltext': # boolean facets\n e_true = e_lst.find(\"int[@name='true']\")\n true_count = e_true.text if e_true is not None else 0\n e_false = e_lst.find(\"int[@name='false']\")\n false_count = e_false.text if e_false is not None else 0\n facets[name] = [\n ('true', 'yes', true_count),\n ('false', 'no', false_count),\n ]\n continue\n facets[name] = []\n for e in e_lst:\n if e.text == '0':\n continue\n k = e.attrib['name']\n if name == 'author_key':\n k, display = read_author_facet(k)\n elif name == 'language':\n display = get_language_name(k)\n else:\n display = k\n facets[name].append((k, display, e.text))\n return facets\n\n\ndef lcc_transform(raw):\n \"\"\"\n Transform the lcc search field value\n :param str raw:\n :rtype: str\n \"\"\"\n # e.g. lcc:[NC1 TO NC1000] to lcc:[NC-0001.00000000 TO NC-1000.00000000]\n # for proper range search\n m = re_range.match(raw)\n if m:\n lcc_range = [m.group('start').strip(), m.group('end').strip()]\n normed = normalize_lcc_range(*lcc_range)\n return f'[{normed[0] or lcc_range[0]} TO {normed[1] or lcc_range[1]}]'\n elif '*' in raw and not raw.startswith('*'):\n # Marshals human repr into solr repr\n # lcc:A720* should become A--0720*\n parts = raw.split('*', 1)\n lcc_prefix = normalize_lcc_prefix(parts[0])\n return (lcc_prefix or parts[0]) + '*' + parts[1]\n else:\n normed = short_lcc_to_sortable_lcc(raw.strip('\"'))\n if normed:\n use_quotes = ' ' in normed or raw.startswith('\"')\n return ('\"%s\"' if use_quotes else '%s*') % normed\n\n # If none of the transforms took\n return raw\n\n\ndef ddc_transform(raw):\n \"\"\"\n Transform the ddc search field value\n :param str raw:\n :rtype: str\n \"\"\"\n m = re_range.match(raw)\n if m:\n raw = [m.group('start').strip(), m.group('end').strip()]\n normed = normalize_ddc_range(*raw)\n return f'[{normed[0] or raw[0]} TO {normed[1] or raw[1]}]'\n elif raw.endswith('*'):\n return normalize_ddc_prefix(raw[:-1]) + '*'\n else:\n normed = normalize_ddc(raw.strip('\"'))\n if normed:\n return normed[0]\n\n # if none of the transforms took\n return raw\n\n\ndef ia_collection_s_transform(raw):\n \"\"\"\n Because this field is not a multi-valued field in solr, but a simple ;-separate\n string, we have to do searches like this for now.\n \"\"\"\n result = raw\n if not result.startswith('*'):\n result = '*' + result\n if not result.endswith('*'):\n result += '*'\n return result\n\n\ndef parse_query_fields(q):\n found = [(m.start(), m.end()) for m in re_fields.finditer(q)]\n first = q[: found[0][0]].strip() if found else q.strip()\n if first:\n yield {'field': 'text', 'value': first.replace(':', r'\\:')}\n for field_num in range(len(found)):\n op_found = None\n f = found[field_num]\n field_name = q[f[0] : f[1] - 1].lower()\n if field_name in FIELD_NAME_MAP:\n field_name = FIELD_NAME_MAP[field_name]\n if field_num == len(found) - 1:\n v = q[f[1] :].strip()\n else:\n v = q[f[1] : found[field_num + 1][0]].strip()\n m = re_op.search(v)\n if m:\n v = v[: -len(m.group(0))]\n op_found = m.group(1)\n if field_name == 'isbn':\n isbn = normalize_isbn(v)\n if isbn:\n v = isbn\n if field_name in ('lcc', 'lcc_sort'):\n v = lcc_transform(v)\n if field_name == ('ddc', 'ddc_sort'):\n v = ddc_transform(v)\n if field_name == 'ia_collection_s':\n v = ia_collection_s_transform(v)\n\n yield {'field': field_name, 'value': v.replace(':', r'\\:')}\n if op_found:\n yield {'op': op_found}\n\n\ndef build_q_list(param):\n q_list = []\n if 'q' in param:\n # Solr 4+ has support for regexes (eg `key:/foo.*/`)! But for now, let's not\n # expose that and escape all '/'. Otherwise `key:/works/OL1W` is interpreted as\n # a regex.\n q_param = param['q'].strip().replace('/', '\\\\/')\n else:\n q_param = None\n use_dismax = False\n if q_param:\n if q_param == '*:*':\n q_list.append(q_param)\n elif 'NOT ' in q_param: # this is a hack\n q_list.append(q_param.strip())\n elif re_fields.search(q_param):\n q_list.extend(\n i['op'] if 'op' in i else '{}:({})'.format(i['field'], i['value'])\n for i in parse_query_fields(q_param)\n )\n else:\n isbn = normalize_isbn(q_param)\n if isbn and len(isbn) in (10, 13):\n q_list.append('isbn:(%s)' % isbn)\n else:\n q_list.append(q_param.strip().replace(':', r'\\:'))\n use_dismax = True\n else:\n if 'author' in param:\n v = param['author'].strip()\n m = re_author_key.search(v)\n if m:\n q_list.append(\"author_key:(%s)\" % m.group(1))\n else:\n v = re_to_esc.sub(r'\\\\\\g<0>', v)\n # Somehow v can be empty at this point,\n # passing the following with empty strings causes a severe error in SOLR\n if v:\n q_list.append(\n \"(author_name:({name}) OR author_alternative_name:({name}))\".format(\n name=v\n )\n )\n\n check_params = [\n 'title',\n 'publisher',\n 'oclc',\n 'lccn',\n 'contributor',\n 'subject',\n 'place',\n 'person',\n 'time',\n ]\n q_list += [\n '{}:({})'.format(k, re_to_esc.sub(r'\\\\\\g<0>', param[k]))\n for k in check_params\n if k in param\n ]\n if param.get('isbn'):\n q_list.append(\n 'isbn:(%s)' % (normalize_isbn(param['isbn']) or param['isbn'])\n )\n return (q_list, use_dismax)\n\n\ndef execute_solr_query(\n solr_path: str, params: Union[dict, list[tuple[str, Any]]]\n) -> Optional[Response]:\n stats.begin(\"solr\", url=f'{solr_path}?{urlencode(params)}')\n try:\n response = requests.get(solr_path, params=params, timeout=10)\n response.raise_for_status()\n except requests.HTTPError:\n logger.exception(\"Failed solr query\")\n return None\n finally:\n stats.end()\n return response\n\n\ndef parse_json_from_solr_query(\n solr_path: str, params: Union[dict, list[tuple[str, Any]]]\n) -> Optional[dict]:\n \"\"\"\n Returns a json.loaded Python object or None\n \"\"\"\n response = execute_solr_query(solr_path, params)\n if not response:\n logger.error(\"Error parsing empty search engine response\")\n return None\n try:\n return response.json()\n except JSONDecodeError:\n logger.exception(\"Error parsing search engine response\")\n return None\n\n\ndef run_solr_query(\n param=None,\n rows=100,\n page=1,\n sort=None,\n spellcheck_count=None,\n offset=None,\n fields=None,\n facet=True,\n):\n param = param or {}\n\n # use page when offset is not specified\n if offset is None:\n offset = rows * (page - 1)\n\n (q_list, use_dismax) = build_q_list(param)\n params = [\n ('fl', ','.join(fields or DEFAULT_SEARCH_FIELDS)),\n ('fq', 'type:work'),\n ('q.op', 'AND'),\n ('start', offset),\n ('rows', rows),\n ]\n\n if spellcheck_count is None:\n spellcheck_count = default_spellcheck_count\n\n if spellcheck_count:\n params.append(('spellcheck', 'true'))\n params.append(('spellcheck.count', spellcheck_count))\n\n if facet:\n params.append(('facet', 'true'))\n for facet in FACET_FIELDS:\n params.append(('facet.field', facet))\n\n if q_list:\n if use_dismax:\n params.append(('q', ' '.join(q_list)))\n params.append(('defType', 'dismax'))\n params.append(('qf', 'text title^20 author_name^20'))\n params.append(('bf', 'min(100,edition_count)'))\n else:\n params.append(('q', ' '.join(q_list + ['_val_:\"sqrt(edition_count)\"^10'])))\n\n if 'public_scan' in param:\n v = param.pop('public_scan').lower()\n if v in ('true', 'false'):\n if v == 'false':\n # also constrain on print disabled since the index may not be in sync\n param.setdefault('print_disabled', 'false')\n params.append(('fq', 'public_scan_b:%s' % v))\n\n if 'print_disabled' in param:\n v = param.pop('print_disabled').lower()\n if v in ('true', 'false'):\n minus = '-' if v == 'false' else ''\n params.append(('fq', '%ssubject_key:protected_daisy' % minus))\n\n if 'has_fulltext' in param:\n v = param['has_fulltext'].lower()\n if v not in ('true', 'false'):\n del param['has_fulltext']\n params.append(('fq', 'has_fulltext:%s' % v))\n\n for field in FACET_FIELDS:\n if field == 'has_fulltext':\n continue\n if field == 'author_facet':\n field = 'author_key'\n if field not in param:\n continue\n values = param[field]\n params += [('fq', f'{field}:\"{val}\"') for val in values if val]\n\n if sort:\n params.append(('sort', sort))\n\n if 'wt' in param:\n params.append(('wt', param.get('wt')))\n url = f'{solr_select_url}?{urlencode(params)}'\n\n response = execute_solr_query(solr_select_url, params)\n solr_result = response.content if response else None # bytes or None\n return (solr_result, url, q_list)\n\n\ndef do_search(param, sort, page=1, rows=100, spellcheck_count=None):\n if sort:\n sort = process_sort(sort)\n (solr_result, solr_select, q_list) = run_solr_query(\n param, rows, page, sort, spellcheck_count\n )\n is_bad = False\n if not solr_result or solr_result.startswith(b'<html'):\n is_bad = True\n if not is_bad:\n try:\n root = XML(solr_result)\n except XMLSyntaxError:\n is_bad = True\n if is_bad:\n m = re_pre.search(solr_result)\n return web.storage(\n facet_counts=None,\n docs=[],\n is_advanced=bool(param.get('q')),\n num_found=None,\n solr_select=solr_select,\n q_list=q_list,\n error=(web.htmlunquote(m.group(1)) if m else solr_result),\n )\n\n spellcheck = root.find(\"lst[@name='spellcheck']\")\n spell_map = {}\n if spellcheck is not None and len(spellcheck):\n for e in spellcheck.find(\"lst[@name='suggestions']\"):\n assert e.tag == 'lst'\n a = e.attrib['name']\n if a in spell_map or a in ('sqrt', 'edition_count'):\n continue\n spell_map[a] = [i.text for i in e.find(\"arr[@name='suggestion']\")]\n\n docs = root.find('result')\n return web.storage(\n facet_counts=read_facets(root),\n docs=docs,\n is_advanced=bool(param.get('q')),\n num_found=(int(docs.attrib['numFound']) if docs is not None else None),\n solr_select=solr_select,\n q_list=q_list,\n error=None,\n spellcheck=spell_map,\n )\n\n\ndef get_doc(doc): # called from work_search template\n e_ia = doc.find(\"arr[@name='ia']\")\n e_id_project_gutenberg = doc.find(\"arr[@name='id_project_gutenberg']\") or []\n e_id_librivox = doc.find(\"arr[@name='id_librivox']\") or []\n e_id_standard_ebooks = doc.find(\"arr[@name='id_standard_ebooks']\") or []\n\n first_pub = None\n e_first_pub = doc.find(\"int[@name='first_publish_year']\")\n if e_first_pub is not None:\n first_pub = e_first_pub.text\n e_first_edition = doc.find(\"str[@name='first_edition']\")\n first_edition = None\n if e_first_edition is not None:\n first_edition = e_first_edition.text\n\n work_subtitle = None\n e_subtitle = doc.find(\"str[@name='subtitle']\")\n if e_subtitle is not None:\n work_subtitle = e_subtitle.text\n\n if doc.find(\"arr[@name='author_key']\") is None:\n assert doc.find(\"arr[@name='author_name']\") is None\n authors = []\n else:\n ak = [e.text for e in doc.find(\"arr[@name='author_key']\")]\n an = [e.text for e in doc.find(\"arr[@name='author_name']\")]\n authors = [\n web.storage(\n key=key,\n name=name,\n url=\"/authors/{}/{}\".format(\n key, (urlsafe(name) if name is not None else 'noname')\n ),\n )\n for key, name in zip(ak, an)\n ]\n cover = doc.find(\"str[@name='cover_edition_key']\")\n languages = doc.find(\"arr[@name='language']\")\n e_public_scan = doc.find(\"bool[@name='public_scan_b']\")\n e_lending_edition = doc.find(\"str[@name='lending_edition_s']\")\n e_lending_identifier = doc.find(\"str[@name='lending_identifier_s']\")\n e_collection = doc.find(\"str[@name='ia_collection_s']\")\n collections = set()\n if e_collection is not None:\n collections = set(e_collection.text.split(';'))\n\n doc = web.storage(\n key=doc.find(\"str[@name='key']\").text,\n title=doc.find(\"str[@name='title']\").text,\n edition_count=int(doc.find(\"int[@name='edition_count']\").text),\n ia=[e.text for e in (e_ia if e_ia is not None else [])],\n has_fulltext=(doc.find(\"bool[@name='has_fulltext']\").text == 'true'),\n public_scan=(\n (e_public_scan.text == 'true')\n if e_public_scan is not None\n else (e_ia is not None)\n ),\n lending_edition=(\n e_lending_edition.text if e_lending_edition is not None else None\n ),\n lending_identifier=(\n e_lending_identifier.text if e_lending_identifier is not None else None\n ),\n collections=collections,\n authors=authors,\n first_publish_year=first_pub,\n first_edition=first_edition,\n subtitle=work_subtitle,\n cover_edition_key=(cover.text if cover is not None else None),\n languages=languages and [lang.text for lang in languages],\n id_project_gutenberg=[e.text for e in e_id_project_gutenberg],\n id_librivox=[e.text for e in e_id_librivox],\n id_standard_ebooks=[e.text for e in e_id_standard_ebooks],\n )\n\n doc.url = doc.key + '/' + urlsafe(doc.title)\n return doc\n\n\ndef work_object(w): # called by works_by_author\n ia = w.get('ia', [])\n obj = dict(\n authors=[\n web.storage(key='/authors/' + k, name=n)\n for k, n in zip(w['author_key'], w['author_name'])\n ],\n edition_count=w['edition_count'],\n key=w['key'],\n title=w['title'],\n public_scan=w.get('public_scan_b', bool(ia)),\n lending_edition=w.get('lending_edition_s', ''),\n lending_identifier=w.get('lending_identifier_s', ''),\n collections=set(\n w['ia_collection_s'].split(';') if 'ia_collection_s' in w else []\n ),\n url=w['key'] + '/' + urlsafe(w['title']),\n cover_edition_key=w.get('cover_edition_key'),\n first_publish_year=(\n w['first_publish_year'] if 'first_publish_year' in w else None\n ),\n ia=w.get('ia', []),\n cover_i=w.get('cover_i'),\n id_project_gutenberg=w.get('id_project_gutenberg'),\n id_librivox=w.get('id_librivox'),\n id_standard_ebooks=w.get('id_standard_ebooks'),\n )\n\n for f in 'has_fulltext', 'subtitle':\n if w.get(f):\n obj[f] = w[f]\n return web.storage(obj)\n\n\nclass scan(delegate.page):\n \"\"\"\n Experimental EAN barcode scanner page to scan and add/view books by their barcodes.\n \"\"\"\n\n path = \"/barcodescanner\"\n\n def GET(self):\n return render.barcodescanner()\n\n\nclass search(delegate.page):\n def redirect_if_needed(self, i):\n params = {}\n need_redirect = False\n for k, v in i.items():\n if k in plurals:\n params[k] = None\n k = plurals[k]\n need_redirect = True\n if isinstance(v, list):\n if v == []:\n continue\n clean = [normalize('NFC', b.strip()) for b in v]\n if clean != v:\n need_redirect = True\n if len(clean) == 1 and clean[0] == '':\n clean = None\n else:\n clean = normalize('NFC', v.strip())\n if clean == '':\n need_redirect = True\n clean = None\n if clean != v:\n need_redirect = True\n params[k] = clean\n if need_redirect:\n raise web.seeother(web.changequery(**params))\n\n def isbn_redirect(self, isbn_param):\n isbn = normalize_isbn(isbn_param)\n if not isbn:\n return\n\n ed = Edition.from_isbn(isbn)\n if ed:\n web.seeother(ed.key)\n\n def GET(self):\n # Enable patrons to search for query q2 within collection q\n # q2 param gets removed and prepended to q via a redirect\n _i = web.input(q='', q2='')\n if _i.q.strip() and _i.q2.strip():\n _i.q = _i.q2.strip() + ' ' + _i.q.strip()\n _i.pop('q2')\n raise web.seeother('/search?' + urllib.parse.urlencode(_i))\n\n i = web.input(\n author_key=[],\n language=[],\n first_publish_year=[],\n publisher_facet=[],\n subject_facet=[],\n person_facet=[],\n place_facet=[],\n time_facet=[],\n public_scan_b=[],\n )\n\n # Send to full-text Search Inside if checkbox checked\n if i.get('search-fulltext'):\n raise web.seeother(\n '/search/inside?' + urllib.parse.urlencode({'q': i.get('q', '')})\n )\n\n if i.get('wisbn'):\n i.isbn = i.wisbn\n\n self.redirect_if_needed(i)\n\n if 'isbn' in i:\n self.isbn_redirect(i.isbn)\n\n q_list = []\n q = i.get('q', '').strip()\n if q:\n m = re_olid.match(q)\n if m:\n raise web.seeother(f'/{OLID_URLS[m.group(1)]}/{q}')\n m = re_isbn_field.match(q)\n if m:\n self.isbn_redirect(m.group(1))\n q_list.append(q)\n for k in ('title', 'author', 'isbn', 'subject', 'place', 'person', 'publisher'):\n if k in i:\n v = re_to_esc.sub(r'\\\\\\g<0>', i[k].strip())\n q_list.append(k + ':' + v)\n return render.work_search(\n i,\n ' '.join(q_list),\n do_search,\n get_doc,\n get_availability_of_ocaids,\n fulltext_search,\n FACET_FIELDS,\n )\n\n\ndef works_by_author(\n akey, sort='editions', page=1, rows=100, has_fulltext=False, query=None\n):\n # called by merge_author_works\n q = 'author_key:' + akey\n if query:\n q = query\n\n offset = rows * (page - 1)\n params = [\n ('fq', 'author_key:' + akey),\n ('fq', 'type:work'),\n ('q', q),\n ('start', offset),\n ('rows', rows),\n (\n 'fl',\n ','.join(\n [\n 'key',\n 'author_name',\n 'author_key',\n 'title',\n 'subtitle',\n 'edition_count',\n 'ia',\n 'cover_edition_key',\n 'has_fulltext',\n 'language',\n 'first_publish_year',\n 'public_scan_b',\n 'lending_edition_s',\n 'lending_identifier_s',\n 'ia_collection_s',\n 'id_project_gutenberg',\n 'id_librivox',\n 'id_standard_ebooks',\n 'cover_i',\n ]\n ),\n ),\n ('wt', 'json'),\n ('q.op', 'AND'),\n ('facet', 'true'),\n ('facet.mincount', 1),\n ('f.author_facet.facet.sort', 'count'),\n ('f.publish_year.facet.limit', -1),\n ('facet.limit', 25),\n ]\n\n if has_fulltext:\n params.append(('fq', 'has_fulltext:true'))\n\n if sort == \"editions\":\n params.append(('sort', 'edition_count desc'))\n elif sort.startswith('old'):\n params.append(('sort', 'first_publish_year asc'))\n elif sort.startswith('new'):\n params.append(('sort', 'first_publish_year desc'))\n elif sort.startswith('title'):\n params.append(('sort', 'title asc'))\n\n facet_fields = [\n \"author_facet\",\n \"language\",\n \"publish_year\",\n \"publisher_facet\",\n \"subject_facet\",\n \"person_facet\",\n \"place_facet\",\n \"time_facet\",\n ]\n for f in facet_fields:\n params.append((\"facet.field\", f))\n\n reply = parse_json_from_solr_query(solr_select_url, params)\n if reply is None:\n return web.storage(\n num_found=0,\n works=[],\n years=[],\n get_facet=[],\n sort=sort,\n )\n # TODO: Deep JSON structure defense - for now, let it blow up so easier to detect\n facets = reply['facet_counts']['facet_fields']\n works = [work_object(w) for w in reply['response']['docs']]\n\n def get_facet(f, limit=None):\n return list(web.group(facets[f][: limit * 2] if limit else facets[f], 2))\n\n return web.storage(\n num_found=int(reply['response']['numFound']),\n works=add_availability(works),\n years=[(int(k), v) for k, v in get_facet('publish_year')],\n get_facet=get_facet,\n sort=sort,\n )\n\n\ndef sorted_work_editions(wkey, json_data=None):\n \"\"\"Setting json_data to a real value simulates getting SOLR data back, i.e. for testing (but ick!)\"\"\"\n q = 'key:' + wkey\n if json_data:\n reply = json.loads(json_data)\n else:\n reply = parse_json_from_solr_query(\n solr_select_url,\n {\n 'q.op': 'AND',\n 'q': q,\n 'rows': 10,\n 'fl': 'edition_key',\n 'qt': 'standard',\n 'wt': 'json',\n },\n )\n if reply is None or reply.get('response', {}).get('numFound', 0) == 0:\n return []\n # TODO: Deep JSON structure defense - for now, let it blow up so easier to detect\n return reply[\"response\"]['docs'][0].get('edition_key', [])\n\n\ndef top_books_from_author(akey, rows=5, offset=0):\n q = 'author_key:(' + akey + ')'\n json_result = parse_json_from_solr_query(\n solr_select_url,\n {\n 'q': q,\n 'start': offset,\n 'rows': rows,\n 'fl': 'key,title,edition_count,first_publish_year',\n 'sort': 'edition_count desc',\n 'wt': 'json',\n },\n )\n if json_result is None:\n return {'books': [], 'total': 0}\n # TODO: Deep JSON structure defense - for now, let it blow up so easier to detect\n response = json_result['response']\n return {\n 'books': [web.storage(doc) for doc in response['docs']],\n 'total': response['numFound'],\n }\n\n\nclass advancedsearch(delegate.page):\n path = \"/advancedsearch\"\n\n def GET(self):\n return render_template(\"search/advancedsearch.html\")\n\n\ndef escape_colon(q, vf):\n if ':' not in q:\n return q\n parts = q.split(':')\n result = parts.pop(0)\n while parts:\n if not any(result.endswith(f) for f in vf):\n result += '\\\\'\n result += ':' + parts.pop(0)\n return result\n\n\ndef run_solr_search(solr_select: str, params: dict):\n response = execute_solr_query(solr_select, params)\n json_data = response.content if response else None # bytes or None\n return parse_search_response(json_data)\n\n\ndef parse_search_response(json_data):\n \"\"\"Construct response for any input\"\"\"\n if json_data is None:\n return {'error': 'Error parsing empty search engine response'}\n try:\n return json.loads(json_data)\n except json.JSONDecodeError:\n logger.exception(\"Error parsing search engine response\")\n m = re_pre.search(json_data)\n if m is None:\n return {'error': 'Error parsing search engine response'}\n error = web.htmlunquote(m.group(1))\n solr_error = 'org.apache.lucene.queryParser.ParseException: '\n if error.startswith(solr_error):\n error = error[len(solr_error) :]\n return {'error': error}\n\n\nclass list_search(delegate.page):\n path = '/search/lists'\n\n def GET(self):\n i = web.input(q='', offset='0', limit='10')\n\n lists = self.get_results(i.q, i.offset, i.limit)\n\n return render_template('search/lists.tmpl', q=i.q, lists=lists)\n\n def get_results(self, q, offset=0, limit=100):\n if 'env' not in web.ctx:\n delegate.fakeload()\n\n keys = web.ctx.site.things(\n {\n \"type\": \"/type/list\",\n \"name~\": q,\n \"limit\": int(limit),\n \"offset\": int(offset),\n }\n )\n\n return web.ctx.site.get_many(keys)\n\n\nclass list_search_json(list_search):\n path = '/search/lists'\n encoding = 'json'\n\n def GET(self):\n i = web.input(q='', offset=0, limit=10)\n offset = safeint(i.offset, 0)\n limit = safeint(i.limit, 10)\n limit = min(100, limit)\n\n docs = self.get_results(i.q, offset=offset, limit=limit)\n\n response = {'start': offset, 'docs': [doc.preview() for doc in docs]}\n\n web.header('Content-Type', 'application/json')\n return delegate.RawText(json.dumps(response))\n\n\nclass subject_search(delegate.page):\n path = '/search/subjects'\n\n def GET(self):\n return render_template('search/subjects.tmpl', self.get_results)\n\n def get_results(self, q, offset=0, limit=100):\n valid_fields = ['key', 'name', 'subject_type', 'work_count']\n q = escape_colon(escape_bracket(q), valid_fields)\n\n results = run_solr_search(\n solr_select_url,\n {\n \"fq\": \"type:subject\",\n \"q.op\": \"AND\",\n \"q\": q,\n \"start\": offset,\n \"rows\": limit,\n \"fl\": \",\".join(valid_fields),\n \"qt\": \"standard\",\n \"wt\": \"json\",\n \"sort\": \"work_count desc\",\n },\n )\n response = results['response']\n\n for doc in response['docs']:\n doc['type'] = doc.get('subject_type', 'subject')\n doc['count'] = doc.get('work_count', 0)\n\n return results\n\n\nclass subject_search_json(subject_search):\n path = '/search/subjects'\n encoding = 'json'\n\n def GET(self):\n i = web.input(q='', offset=0, limit=100)\n offset = safeint(i.offset, 0)\n limit = safeint(i.limit, 100)\n limit = min(1000, limit) # limit limit to 1000.\n\n response = self.get_results(i.q, offset=offset, limit=limit)['response']\n web.header('Content-Type', 'application/json')\n return delegate.RawText(json.dumps(response))\n\n\nclass author_search(delegate.page):\n path = '/search/authors'\n\n def GET(self):\n return render_template('search/authors.tmpl', self.get_results)\n\n def get_results(self, q, offset=0, limit=100):\n valid_fields = [\n 'key',\n 'name',\n 'alternate_names',\n 'birth_date',\n 'death_date',\n 'date',\n 'work_count',\n ]\n q = escape_colon(escape_bracket(q), valid_fields)\n q_has_fields = ':' in q.replace(r'\\:', '')\n\n d = run_solr_search(\n solr_select_url,\n {\n 'fq': 'type:author',\n 'q.op': 'AND',\n 'q': q,\n 'start': offset,\n 'rows': limit,\n 'fl': '*',\n 'qt': 'standard',\n 'sort': 'work_count desc',\n 'wt': 'json',\n **(\n {}\n if q_has_fields\n else {'defType': 'dismax', 'qf': 'name alternate_names'}\n ),\n },\n )\n\n docs = d.get('response', {}).get('docs', [])\n for doc in docs:\n # replace /authors/OL1A with OL1A\n # The template still expects the key to be in the old format\n doc['key'] = doc['key'].split(\"/\")[-1]\n return d\n\n\nclass author_search_json(author_search):\n path = '/search/authors'\n encoding = 'json'\n\n def GET(self):\n i = web.input(q='', offset=0, limit=100)\n offset = safeint(i.offset, 0)\n limit = safeint(i.limit, 100)\n limit = min(1000, limit) # limit limit to 1000.\n\n response = self.get_results(i.q, offset=offset, limit=limit)['response']\n web.header('Content-Type', 'application/json')\n return delegate.RawText(json.dumps(response))\n\n\n@public\ndef random_author_search(limit=10):\n \"\"\"\n Returns a dict that contains a random list of authors. Amount of authors\n returned is set be the given limit.\n \"\"\"\n letters_and_digits = string.ascii_letters + string.digits\n seed = ''.join(random.choice(letters_and_digits) for _ in range(10))\n\n search_results = run_solr_search(\n solr_select_url,\n {\n 'q': 'type:author',\n 'rows': limit,\n 'sort': f'random_{seed} desc',\n 'wt': 'json',\n },\n )\n\n docs = search_results.get('response', {}).get('docs', [])\n\n assert docs, f\"random_author_search({limit}) returned no docs\"\n assert (\n len(docs) == limit\n ), f\"random_author_search({limit}) returned {len(docs)} docs\"\n\n for doc in docs:\n # replace /authors/OL1A with OL1A\n # The template still expects the key to be in the old format\n doc['key'] = doc['key'].split(\"/\")[-1]\n\n return search_results['response']\n\n\ndef rewrite_list_editions_query(q, page, offset, limit):\n \"\"\"Takes a solr query. If it doesn't contain a /lists/ key, then\n return the query, unchanged, exactly as it entered the\n function. If it does contain a lists key, then use the pagination\n information to fetch the right block of keys from the\n lists_editions API and then feed these editions resulting work\n keys into solr with the form key:(OL123W, OL234W). This way, we\n can use the solr API to fetch list works and render them in\n carousels in the right format.\n \"\"\"\n if '/lists/' in q:\n editions = get_list_editions(q, offset=offset, limit=limit)\n work_ids = [ed.get('works')[0]['key'] for ed in editions]\n q = 'key:(' + ' OR '.join(work_ids) + ')'\n # We've applied the offset to fetching get_list_editions to\n # produce the right set of discrete work IDs. We don't want\n # it applied to paginate our resulting solr query.\n offset = 0\n page = 1\n return q, page, offset, limit\n\n\n@public\ndef work_search(\n query,\n sort=None,\n page=1,\n offset=0,\n limit=100,\n fields='*',\n facet=True,\n spellcheck_count=None,\n):\n \"\"\"\n params:\n query: dict\n sort: str editions|old|new|scans\n \"\"\"\n # Ensure we don't mutate the `query` passed in by reference\n query = copy.deepcopy(query)\n query['wt'] = 'json'\n if sort:\n sort = process_sort(sort)\n\n # deal with special /lists/ key queries\n query['q'], page, offset, limit = rewrite_list_editions_query(\n query['q'], page, offset, limit\n )\n try:\n (reply, solr_select, q_list) = run_solr_query(\n query,\n rows=limit,\n page=page,\n sort=sort,\n offset=offset,\n fields=fields,\n facet=facet,\n spellcheck_count=spellcheck_count,\n )\n response = json.loads(reply)['response'] or ''\n except (ValueError, OSError) as e:\n logger.error(\"Error in processing search API.\")\n response = dict(start=0, numFound=0, docs=[], error=str(e))\n\n # backward compatibility\n response['num_found'] = response['numFound']\n if fields == '*' or 'availability' in fields:\n response['docs'] = add_availability(response['docs'])\n return response\n\n\nclass search_json(delegate.page):\n path = \"/search\"\n encoding = \"json\"\n\n def GET(self):\n i = web.input(\n author_key=[],\n subject_facet=[],\n person_facet=[],\n place_facet=[],\n time_facet=[],\n first_publish_year=[],\n publisher_facet=[],\n language=[],\n public_scan_b=[],\n )\n if 'query' in i:\n query = json.loads(i.query)\n else:\n query = i\n\n sort = query.get('sort', None)\n\n limit = safeint(query.pop(\"limit\", \"100\"), default=100)\n if \"offset\" in query:\n offset = safeint(query.pop(\"offset\", 0), default=0)\n page = None\n else:\n offset = None\n page = safeint(query.pop(\"page\", \"1\"), default=1)\n\n fields = query.pop('fields', '*').split(',')\n facet = query.pop('_facet', 'true').lower() in ['true']\n spellcheck_count = safeint(\n query.pop(\"_spellcheck_count\", default_spellcheck_count),\n default=default_spellcheck_count,\n )\n\n # If the query is a /list/ key, create custom list_editions_query\n q = query.get('q', '')\n query['q'], page, offset, limit = rewrite_list_editions_query(\n q, page, offset, limit\n )\n response = work_search(\n query,\n sort=sort,\n page=page,\n offset=offset,\n limit=limit,\n fields=fields,\n facet=facet,\n spellcheck_count=spellcheck_count,\n )\n response['q'] = q\n response['offset'] = offset\n response['docs'] = response['docs']\n web.header('Content-Type', 'application/json')\n return delegate.RawText(json.dumps(response, indent=4))\n\n\ndef setup():\n from openlibrary.plugins.worksearch import subjects\n\n # subjects module needs read_author_facet and solr_select_url.\n # Importing this module to access them will result in circular import.\n # Setting them like this to avoid circular-import.\n subjects.read_author_facet = read_author_facet\n if hasattr(config, 'plugin_worksearch'):\n subjects.solr_select_url = solr_select_url\n\n subjects.setup()\n\n from openlibrary.plugins.worksearch import languages, publishers\n\n publishers.setup()\n languages.setup()\n\n\nsetup()\n", "path": "openlibrary/plugins/worksearch/code.py" } ]
[ { "content": "from datetime import datetime\nimport copy\nimport json\nimport logging\nimport random\nimport re\nimport string\nfrom typing import List, Tuple, Any, Union, Optional, Iterable, Dict\nfrom unicodedata import normalize\nfrom json import JSONDecodeError\nimport requests\nimport web\nfrom lxml.etree import XML, XMLSyntaxError\nfrom requests import Response\nfrom six.moves import urllib\n\nfrom infogami import config\nfrom infogami.utils import delegate, stats\nfrom infogami.utils.view import public, render, render_template, safeint\nfrom openlibrary.core.lending import add_availability, get_availability_of_ocaids\nfrom openlibrary.core.models import Edition # noqa: E402\nfrom openlibrary.plugins.inside.code import fulltext_search\nfrom openlibrary.plugins.openlibrary.lists import get_list_editions\nfrom openlibrary.plugins.openlibrary.processors import urlsafe\nfrom openlibrary.plugins.upstream.utils import urlencode\nfrom openlibrary.utils import escape_bracket\nfrom openlibrary.utils.ddc import (\n normalize_ddc,\n normalize_ddc_prefix,\n normalize_ddc_range,\n)\nfrom openlibrary.utils.isbn import normalize_isbn\nfrom openlibrary.utils.lcc import (\n normalize_lcc_prefix,\n normalize_lcc_range,\n short_lcc_to_sortable_lcc,\n)\n\nlogger = logging.getLogger(\"openlibrary.worksearch\")\n\nif hasattr(config, 'plugin_worksearch'):\n solr_select_url = (\n config.plugin_worksearch.get('solr_base_url', 'localhost') + '/select'\n )\n\n default_spellcheck_count = config.plugin_worksearch.get('spellcheck_count', 10)\n\n\nALL_FIELDS = [\n \"key\",\n \"redirects\",\n \"title\",\n \"subtitle\",\n \"alternative_title\",\n \"alternative_subtitle\",\n \"edition_key\",\n \"by_statement\",\n \"publish_date\",\n \"lccn\",\n \"ia\",\n \"oclc\",\n \"isbn\",\n \"contributor\",\n \"publish_place\",\n \"publisher\",\n \"first_sentence\",\n \"author_key\",\n \"author_name\",\n \"author_alternative_name\",\n \"subject\",\n \"person\",\n \"place\",\n \"time\",\n \"has_fulltext\",\n \"title_suggest\",\n \"edition_count\",\n \"publish_year\",\n \"language\",\n \"number_of_pages\",\n \"ia_count\",\n \"publisher_facet\",\n \"author_facet\",\n \"first_publish_year\",\n # Subjects\n \"subject_key\",\n \"person_key\",\n \"place_key\",\n \"time_key\",\n # Classifications\n \"lcc\",\n \"ddc\",\n \"lcc_sort\",\n \"ddc_sort\",\n]\nFACET_FIELDS = [\n \"has_fulltext\",\n \"author_facet\",\n \"language\",\n \"first_publish_year\",\n \"publisher_facet\",\n \"subject_facet\",\n \"person_facet\",\n \"place_facet\",\n \"time_facet\",\n \"public_scan_b\",\n]\nFIELD_NAME_MAP = {\n 'author': 'author_name',\n 'authors': 'author_name',\n 'by': 'author_name',\n 'publishers': 'publisher',\n # \"Private\" fields\n # This is private because we'll change it to a multi-valued field instead of a\n # plain string at the next opportunity, which will make it much more usable.\n '_ia_collection': 'ia_collection_s',\n}\nSORTS = {\n 'editions': 'edition_count desc',\n 'old': 'def(first_publish_year, 9999) asc',\n 'new': 'first_publish_year desc',\n 'scans': 'ia_count desc',\n # Classifications\n 'lcc_sort': 'lcc_sort asc',\n 'lcc_sort asc': 'lcc_sort asc',\n 'lcc_sort desc': 'lcc_sort desc',\n 'ddc_sort': 'ddc_sort asc',\n 'ddc_sort asc': 'ddc_sort asc',\n 'ddc_sort desc': 'ddc_sort desc',\n # Random\n 'random': 'random_1 asc',\n 'random asc': 'random_1 asc',\n 'random desc': 'random_1 desc',\n 'random.hourly': lambda: f'random_{datetime.now():%Y%m%dT%H} asc',\n 'random.daily': lambda: f'random_{datetime.now():%Y%m%d} asc',\n}\nDEFAULT_SEARCH_FIELDS = {\n 'key',\n 'author_name',\n 'author_key',\n 'title',\n 'subtitle',\n 'edition_count',\n 'ia',\n 'has_fulltext',\n 'first_publish_year',\n 'cover_i',\n 'cover_edition_key',\n 'public_scan_b',\n 'lending_edition_s',\n 'lending_identifier_s',\n 'language',\n 'ia_collection_s',\n # FIXME: These should be fetched from book_providers, but can't cause circular dep\n 'id_project_gutenberg',\n 'id_librivox',\n 'id_standard_ebooks',\n}\nOLID_URLS = {'A': 'authors', 'M': 'books', 'W': 'works'}\n\nre_to_esc = re.compile(r'[\\[\\]:/]')\nre_isbn_field = re.compile(r'^\\s*(?:isbn[:\\s]*)?([-0-9X]{9,})\\s*$', re.I)\nre_author_key = re.compile(r'(OL\\d+A)')\nre_fields = re.compile(r'(-?%s):' % '|'.join(ALL_FIELDS + list(FIELD_NAME_MAP)), re.I)\nre_op = re.compile(' +(OR|AND)$')\nre_range = re.compile(r'\\[(?P<start>.*) TO (?P<end>.*)\\]')\nre_author_facet = re.compile(r'^(OL\\d+A) (.*)$')\nre_pre = re.compile(r'<pre>(.*)</pre>', re.S)\nre_subject_types = re.compile('^(places|times|people)/(.*)')\nre_olid = re.compile(r'^OL\\d+([AMW])$')\n\nplurals = {f + 's': f for f in ('publisher', 'author')}\n\n\n@public\ndef get_solr_works(work_key: Iterable[str]) -> dict[str, dict]:\n from openlibrary.plugins.worksearch.search import get_solr\n\n return {\n doc['key']: doc\n for doc in get_solr().get_many(set(work_key), fields=DEFAULT_SEARCH_FIELDS)\n }\n\n\ndef process_sort(raw_sort):\n \"\"\"\n :param str raw_sort:\n :rtype: str\n\n >>> process_sort('editions')\n 'edition_count desc'\n >>> process_sort('editions, new')\n 'edition_count desc,first_publish_year desc'\n >>> process_sort('random')\n 'random_1 asc'\n >>> process_sort('random_custom_seed')\n 'random_custom_seed asc'\n >>> process_sort('random_custom_seed desc')\n 'random_custom_seed desc'\n >>> process_sort('random_custom_seed asc')\n 'random_custom_seed asc'\n \"\"\"\n\n def process_individual_sort(sort):\n if sort.startswith('random_'):\n return sort if ' ' in sort else sort + ' asc'\n else:\n solr_sort = SORTS[sort]\n return solr_sort() if callable(solr_sort) else solr_sort\n\n return ','.join(process_individual_sort(s.strip()) for s in raw_sort.split(','))\n\n\ndef read_author_facet(af):\n # example input: \"OL26783A Leo Tolstoy\"\n return re_author_facet.match(af).groups()\n\n\ndef get_language_name(code):\n lang = web.ctx.site.get('/languages/' + code)\n return lang.name if lang else \"'%s' unknown\" % code\n\n\ndef read_facets(root):\n e_facet_counts = root.find(\"lst[@name='facet_counts']\")\n e_facet_fields = e_facet_counts.find(\"lst[@name='facet_fields']\")\n facets = {}\n for e_lst in e_facet_fields:\n assert e_lst.tag == 'lst'\n name = e_lst.attrib['name']\n if name == 'author_facet':\n name = 'author_key'\n if name == 'has_fulltext': # boolean facets\n e_true = e_lst.find(\"int[@name='true']\")\n true_count = e_true.text if e_true is not None else 0\n e_false = e_lst.find(\"int[@name='false']\")\n false_count = e_false.text if e_false is not None else 0\n facets[name] = [\n ('true', 'yes', true_count),\n ('false', 'no', false_count),\n ]\n continue\n facets[name] = []\n for e in e_lst:\n if e.text == '0':\n continue\n k = e.attrib['name']\n if name == 'author_key':\n k, display = read_author_facet(k)\n elif name == 'language':\n display = get_language_name(k)\n else:\n display = k\n facets[name].append((k, display, e.text))\n return facets\n\n\ndef lcc_transform(raw):\n \"\"\"\n Transform the lcc search field value\n :param str raw:\n :rtype: str\n \"\"\"\n # e.g. lcc:[NC1 TO NC1000] to lcc:[NC-0001.00000000 TO NC-1000.00000000]\n # for proper range search\n m = re_range.match(raw)\n if m:\n lcc_range = [m.group('start').strip(), m.group('end').strip()]\n normed = normalize_lcc_range(*lcc_range)\n return f'[{normed[0] or lcc_range[0]} TO {normed[1] or lcc_range[1]}]'\n elif '*' in raw and not raw.startswith('*'):\n # Marshals human repr into solr repr\n # lcc:A720* should become A--0720*\n parts = raw.split('*', 1)\n lcc_prefix = normalize_lcc_prefix(parts[0])\n return (lcc_prefix or parts[0]) + '*' + parts[1]\n else:\n normed = short_lcc_to_sortable_lcc(raw.strip('\"'))\n if normed:\n use_quotes = ' ' in normed or raw.startswith('\"')\n return ('\"%s\"' if use_quotes else '%s*') % normed\n\n # If none of the transforms took\n return raw\n\n\ndef ddc_transform(raw):\n \"\"\"\n Transform the ddc search field value\n :param str raw:\n :rtype: str\n \"\"\"\n m = re_range.match(raw)\n if m:\n raw = [m.group('start').strip(), m.group('end').strip()]\n normed = normalize_ddc_range(*raw)\n return f'[{normed[0] or raw[0]} TO {normed[1] or raw[1]}]'\n elif raw.endswith('*'):\n return normalize_ddc_prefix(raw[:-1]) + '*'\n else:\n normed = normalize_ddc(raw.strip('\"'))\n if normed:\n return normed[0]\n\n # if none of the transforms took\n return raw\n\n\ndef ia_collection_s_transform(raw):\n \"\"\"\n Because this field is not a multi-valued field in solr, but a simple ;-separate\n string, we have to do searches like this for now.\n \"\"\"\n result = raw\n if not result.startswith('*'):\n result = '*' + result\n if not result.endswith('*'):\n result += '*'\n return result\n\n\ndef parse_query_fields(q):\n found = [(m.start(), m.end()) for m in re_fields.finditer(q)]\n first = q[: found[0][0]].strip() if found else q.strip()\n if first:\n yield {'field': 'text', 'value': first.replace(':', r'\\:')}\n for field_num in range(len(found)):\n op_found = None\n f = found[field_num]\n field_name = q[f[0] : f[1] - 1].lower()\n if field_name in FIELD_NAME_MAP:\n field_name = FIELD_NAME_MAP[field_name]\n if field_num == len(found) - 1:\n v = q[f[1] :].strip()\n else:\n v = q[f[1] : found[field_num + 1][0]].strip()\n m = re_op.search(v)\n if m:\n v = v[: -len(m.group(0))]\n op_found = m.group(1)\n if field_name == 'isbn':\n isbn = normalize_isbn(v)\n if isbn:\n v = isbn\n if field_name in ('lcc', 'lcc_sort'):\n v = lcc_transform(v)\n if field_name == ('ddc', 'ddc_sort'):\n v = ddc_transform(v)\n if field_name == 'ia_collection_s':\n v = ia_collection_s_transform(v)\n\n yield {'field': field_name, 'value': v.replace(':', r'\\:')}\n if op_found:\n yield {'op': op_found}\n\n\ndef build_q_list(param):\n q_list = []\n if 'q' in param:\n # Solr 4+ has support for regexes (eg `key:/foo.*/`)! But for now, let's not\n # expose that and escape all '/'. Otherwise `key:/works/OL1W` is interpreted as\n # a regex.\n q_param = param['q'].strip().replace('/', '\\\\/')\n else:\n q_param = None\n use_dismax = False\n if q_param:\n if q_param == '*:*':\n q_list.append(q_param)\n elif 'NOT ' in q_param: # this is a hack\n q_list.append(q_param.strip())\n elif re_fields.search(q_param):\n q_list.extend(\n i['op'] if 'op' in i else '{}:({})'.format(i['field'], i['value'])\n for i in parse_query_fields(q_param)\n )\n else:\n isbn = normalize_isbn(q_param)\n if isbn and len(isbn) in (10, 13):\n q_list.append('isbn:(%s)' % isbn)\n else:\n q_list.append(q_param.strip().replace(':', r'\\:'))\n use_dismax = True\n else:\n if 'author' in param:\n v = param['author'].strip()\n m = re_author_key.search(v)\n if m:\n q_list.append(\"author_key:(%s)\" % m.group(1))\n else:\n v = re_to_esc.sub(r'\\\\\\g<0>', v)\n # Somehow v can be empty at this point,\n # passing the following with empty strings causes a severe error in SOLR\n if v:\n q_list.append(\n \"(author_name:({name}) OR author_alternative_name:({name}))\".format(\n name=v\n )\n )\n\n check_params = [\n 'title',\n 'publisher',\n 'oclc',\n 'lccn',\n 'contributor',\n 'subject',\n 'place',\n 'person',\n 'time',\n ]\n q_list += [\n '{}:({})'.format(k, re_to_esc.sub(r'\\\\\\g<0>', param[k]))\n for k in check_params\n if k in param\n ]\n if param.get('isbn'):\n q_list.append(\n 'isbn:(%s)' % (normalize_isbn(param['isbn']) or param['isbn'])\n )\n return (q_list, use_dismax)\n\n\ndef execute_solr_query(\n solr_path: str, params: Union[dict, list[tuple[str, Any]]]\n) -> Optional[Response]:\n stats.begin(\"solr\", url=f'{solr_path}?{urlencode(params)}')\n try:\n response = requests.get(solr_path, params=params, timeout=10)\n response.raise_for_status()\n except requests.HTTPError:\n logger.exception(\"Failed solr query\")\n return None\n finally:\n stats.end()\n return response\n\n\ndef parse_json_from_solr_query(\n solr_path: str, params: Union[dict, list[tuple[str, Any]]]\n) -> Optional[dict]:\n \"\"\"\n Returns a json.loaded Python object or None\n \"\"\"\n response = execute_solr_query(solr_path, params)\n if not response:\n logger.error(\"Error parsing empty search engine response\")\n return None\n try:\n return response.json()\n except JSONDecodeError:\n logger.exception(\"Error parsing search engine response\")\n return None\n\n\ndef run_solr_query(\n param=None,\n rows=100,\n page=1,\n sort=None,\n spellcheck_count=None,\n offset=None,\n fields=None,\n facet=True,\n):\n param = param or {}\n\n # use page when offset is not specified\n if offset is None:\n offset = rows * (page - 1)\n\n (q_list, use_dismax) = build_q_list(param)\n params = [\n ('fl', ','.join(fields or DEFAULT_SEARCH_FIELDS)),\n ('fq', 'type:work'),\n ('q.op', 'AND'),\n ('start', offset),\n ('rows', rows),\n ]\n\n if spellcheck_count is None:\n spellcheck_count = default_spellcheck_count\n\n if spellcheck_count:\n params.append(('spellcheck', 'true'))\n params.append(('spellcheck.count', spellcheck_count))\n\n if facet:\n params.append(('facet', 'true'))\n for facet in FACET_FIELDS:\n params.append(('facet.field', facet))\n\n if q_list:\n if use_dismax:\n params.append(('q', ' '.join(q_list)))\n params.append(('defType', 'dismax'))\n params.append(('qf', 'text title^20 author_name^20'))\n params.append(('bf', 'min(100,edition_count)'))\n else:\n params.append(('q', ' '.join(q_list + ['_val_:\"sqrt(edition_count)\"^10'])))\n\n if 'public_scan' in param:\n v = param.pop('public_scan').lower()\n if v in ('true', 'false'):\n if v == 'false':\n # also constrain on print disabled since the index may not be in sync\n param.setdefault('print_disabled', 'false')\n params.append(('fq', 'public_scan_b:%s' % v))\n\n if 'print_disabled' in param:\n v = param.pop('print_disabled').lower()\n if v in ('true', 'false'):\n minus = '-' if v == 'false' else ''\n params.append(('fq', '%ssubject_key:protected_daisy' % minus))\n\n if 'has_fulltext' in param:\n v = param['has_fulltext'].lower()\n if v not in ('true', 'false'):\n del param['has_fulltext']\n params.append(('fq', 'has_fulltext:%s' % v))\n\n for field in FACET_FIELDS:\n if field == 'has_fulltext':\n continue\n if field == 'author_facet':\n field = 'author_key'\n if field not in param:\n continue\n values = param[field]\n params += [('fq', f'{field}:\"{val}\"') for val in values if val]\n\n if sort:\n params.append(('sort', sort))\n\n if 'wt' in param:\n params.append(('wt', param.get('wt')))\n url = f'{solr_select_url}?{urlencode(params)}'\n\n response = execute_solr_query(solr_select_url, params)\n solr_result = response.content if response else None # bytes or None\n return (solr_result, url, q_list)\n\n\ndef do_search(param, sort, page=1, rows=100, spellcheck_count=None):\n if sort:\n sort = process_sort(sort)\n (solr_result, solr_select, q_list) = run_solr_query(\n param, rows, page, sort, spellcheck_count\n )\n is_bad = False\n if not solr_result or solr_result.startswith(b'<html'):\n is_bad = True\n if not is_bad:\n try:\n root = XML(solr_result)\n except XMLSyntaxError:\n is_bad = True\n if is_bad:\n m = re_pre.search(solr_result)\n return web.storage(\n facet_counts=None,\n docs=[],\n is_advanced=bool(param.get('q')),\n num_found=None,\n solr_select=solr_select,\n q_list=q_list,\n error=(web.htmlunquote(m.group(1)) if m else solr_result),\n )\n\n spellcheck = root.find(\"lst[@name='spellcheck']\")\n spell_map = {}\n if spellcheck is not None and len(spellcheck):\n for e in spellcheck.find(\"lst[@name='suggestions']\"):\n assert e.tag == 'lst'\n a = e.attrib['name']\n if a in spell_map or a in ('sqrt', 'edition_count'):\n continue\n spell_map[a] = [i.text for i in e.find(\"arr[@name='suggestion']\")]\n\n docs = root.find('result')\n return web.storage(\n facet_counts=read_facets(root),\n docs=docs,\n is_advanced=bool(param.get('q')),\n num_found=(int(docs.attrib['numFound']) if docs is not None else None),\n solr_select=solr_select,\n q_list=q_list,\n error=None,\n spellcheck=spell_map,\n )\n\n\ndef get_doc(doc): # called from work_search template\n e_ia = doc.find(\"arr[@name='ia']\")\n e_id_project_gutenberg = doc.find(\"arr[@name='id_project_gutenberg']\") or []\n e_id_librivox = doc.find(\"arr[@name='id_librivox']\") or []\n e_id_standard_ebooks = doc.find(\"arr[@name='id_standard_ebooks']\") or []\n\n first_pub = None\n e_first_pub = doc.find(\"int[@name='first_publish_year']\")\n if e_first_pub is not None:\n first_pub = e_first_pub.text\n e_first_edition = doc.find(\"str[@name='first_edition']\")\n first_edition = None\n if e_first_edition is not None:\n first_edition = e_first_edition.text\n\n work_subtitle = None\n e_subtitle = doc.find(\"str[@name='subtitle']\")\n if e_subtitle is not None:\n work_subtitle = e_subtitle.text\n\n if doc.find(\"arr[@name='author_key']\") is None:\n assert doc.find(\"arr[@name='author_name']\") is None\n authors = []\n else:\n ak = [e.text for e in doc.find(\"arr[@name='author_key']\")]\n an = [e.text for e in doc.find(\"arr[@name='author_name']\")]\n authors = [\n web.storage(\n key=key,\n name=name,\n url=\"/authors/{}/{}\".format(\n key, (urlsafe(name) if name is not None else 'noname')\n ),\n )\n for key, name in zip(ak, an)\n ]\n cover = doc.find(\"str[@name='cover_edition_key']\")\n languages = doc.find(\"arr[@name='language']\")\n e_public_scan = doc.find(\"bool[@name='public_scan_b']\")\n e_lending_edition = doc.find(\"str[@name='lending_edition_s']\")\n e_lending_identifier = doc.find(\"str[@name='lending_identifier_s']\")\n e_collection = doc.find(\"str[@name='ia_collection_s']\")\n collections = set()\n if e_collection is not None:\n collections = set(e_collection.text.split(';'))\n\n doc = web.storage(\n key=doc.find(\"str[@name='key']\").text,\n title=doc.find(\"str[@name='title']\").text,\n edition_count=int(doc.find(\"int[@name='edition_count']\").text),\n ia=[e.text for e in (e_ia if e_ia is not None else [])],\n has_fulltext=(doc.find(\"bool[@name='has_fulltext']\").text == 'true'),\n public_scan=(\n (e_public_scan.text == 'true')\n if e_public_scan is not None\n else (e_ia is not None)\n ),\n lending_edition=(\n e_lending_edition.text if e_lending_edition is not None else None\n ),\n lending_identifier=(\n e_lending_identifier.text if e_lending_identifier is not None else None\n ),\n collections=collections,\n authors=authors,\n first_publish_year=first_pub,\n first_edition=first_edition,\n subtitle=work_subtitle,\n cover_edition_key=(cover.text if cover is not None else None),\n languages=languages and [lang.text for lang in languages],\n id_project_gutenberg=[e.text for e in e_id_project_gutenberg],\n id_librivox=[e.text for e in e_id_librivox],\n id_standard_ebooks=[e.text for e in e_id_standard_ebooks],\n )\n\n doc.url = doc.key + '/' + urlsafe(doc.title)\n return doc\n\n\ndef work_object(w): # called by works_by_author\n ia = w.get('ia', [])\n obj = dict(\n authors=[\n web.storage(key='/authors/' + k, name=n)\n for k, n in zip(w['author_key'], w['author_name'])\n ],\n edition_count=w['edition_count'],\n key=w['key'],\n title=w['title'],\n public_scan=w.get('public_scan_b', bool(ia)),\n lending_edition=w.get('lending_edition_s', ''),\n lending_identifier=w.get('lending_identifier_s', ''),\n collections=set(\n w['ia_collection_s'].split(';') if 'ia_collection_s' in w else []\n ),\n url=w['key'] + '/' + urlsafe(w['title']),\n cover_edition_key=w.get('cover_edition_key'),\n first_publish_year=(\n w['first_publish_year'] if 'first_publish_year' in w else None\n ),\n ia=w.get('ia', []),\n cover_i=w.get('cover_i'),\n id_project_gutenberg=w.get('id_project_gutenberg'),\n id_librivox=w.get('id_librivox'),\n id_standard_ebooks=w.get('id_standard_ebooks'),\n )\n\n for f in 'has_fulltext', 'subtitle':\n if w.get(f):\n obj[f] = w[f]\n return web.storage(obj)\n\n\nclass scan(delegate.page):\n \"\"\"\n Experimental EAN barcode scanner page to scan and add/view books by their barcodes.\n \"\"\"\n\n path = \"/barcodescanner\"\n\n def GET(self):\n return render.barcodescanner()\n\n\nclass search(delegate.page):\n def redirect_if_needed(self, i):\n params = {}\n need_redirect = False\n for k, v in i.items():\n if k in plurals:\n params[k] = None\n k = plurals[k]\n need_redirect = True\n if isinstance(v, list):\n if v == []:\n continue\n clean = [normalize('NFC', b.strip()) for b in v]\n if clean != v:\n need_redirect = True\n if len(clean) == 1 and clean[0] == '':\n clean = None\n else:\n clean = normalize('NFC', v.strip())\n if clean == '':\n need_redirect = True\n clean = None\n if clean != v:\n need_redirect = True\n params[k] = clean\n if need_redirect:\n raise web.seeother(web.changequery(**params))\n\n def isbn_redirect(self, isbn_param):\n isbn = normalize_isbn(isbn_param)\n if not isbn:\n return\n\n ed = Edition.from_isbn(isbn)\n if ed:\n web.seeother(ed.key)\n\n def GET(self):\n # Enable patrons to search for query q2 within collection q\n # q2 param gets removed and prepended to q via a redirect\n _i = web.input(q='', q2='')\n if _i.q.strip() and _i.q2.strip():\n _i.q = _i.q2.strip() + ' ' + _i.q.strip()\n _i.pop('q2')\n raise web.seeother('/search?' + urllib.parse.urlencode(_i))\n\n i = web.input(\n author_key=[],\n language=[],\n first_publish_year=[],\n publisher_facet=[],\n subject_facet=[],\n person_facet=[],\n place_facet=[],\n time_facet=[],\n public_scan_b=[],\n )\n\n # Send to full-text Search Inside if checkbox checked\n if i.get('search-fulltext'):\n raise web.seeother(\n '/search/inside?' + urllib.parse.urlencode({'q': i.get('q', '')})\n )\n\n if i.get('wisbn'):\n i.isbn = i.wisbn\n\n self.redirect_if_needed(i)\n\n if 'isbn' in i:\n self.isbn_redirect(i.isbn)\n\n q_list = []\n q = i.get('q', '').strip()\n if q:\n m = re_olid.match(q)\n if m:\n raise web.seeother(f'/{OLID_URLS[m.group(1)]}/{q}')\n m = re_isbn_field.match(q)\n if m:\n self.isbn_redirect(m.group(1))\n q_list.append(q)\n for k in ('title', 'author', 'isbn', 'subject', 'place', 'person', 'publisher'):\n if k in i:\n v = re_to_esc.sub(r'\\\\\\g<0>', i[k].strip())\n q_list.append(k + ':' + v)\n return render.work_search(\n i,\n ' '.join(q_list),\n do_search,\n get_doc,\n get_availability_of_ocaids,\n fulltext_search,\n FACET_FIELDS,\n )\n\n\ndef works_by_author(\n akey, sort='editions', page=1, rows=100, has_fulltext=False, query=None\n):\n # called by merge_author_works\n q = 'author_key:' + akey\n if query:\n q = query\n\n offset = rows * (page - 1)\n params = [\n ('fq', 'author_key:' + akey),\n ('fq', 'type:work'),\n ('q', q),\n ('start', offset),\n ('rows', rows),\n (\n 'fl',\n ','.join(\n [\n 'key',\n 'author_name',\n 'author_key',\n 'title',\n 'subtitle',\n 'edition_count',\n 'ia',\n 'cover_edition_key',\n 'has_fulltext',\n 'language',\n 'first_publish_year',\n 'public_scan_b',\n 'lending_edition_s',\n 'lending_identifier_s',\n 'ia_collection_s',\n 'id_project_gutenberg',\n 'id_librivox',\n 'id_standard_ebooks',\n 'cover_i',\n ]\n ),\n ),\n ('wt', 'json'),\n ('q.op', 'AND'),\n ('facet', 'true'),\n ('facet.mincount', 1),\n ('f.author_facet.facet.sort', 'count'),\n ('f.publish_year.facet.limit', -1),\n ('facet.limit', 25),\n ]\n\n if has_fulltext:\n params.append(('fq', 'has_fulltext:true'))\n\n if sort == \"editions\":\n params.append(('sort', 'edition_count desc'))\n elif sort.startswith('old'):\n params.append(('sort', 'first_publish_year asc'))\n elif sort.startswith('new'):\n params.append(('sort', 'first_publish_year desc'))\n elif sort.startswith('title'):\n params.append(('sort', 'title asc'))\n\n facet_fields = [\n \"author_facet\",\n \"language\",\n \"publish_year\",\n \"publisher_facet\",\n \"subject_facet\",\n \"person_facet\",\n \"place_facet\",\n \"time_facet\",\n ]\n for f in facet_fields:\n params.append((\"facet.field\", f))\n\n reply = parse_json_from_solr_query(solr_select_url, params)\n if reply is None:\n return web.storage(\n num_found=0,\n works=[],\n years=[],\n get_facet=[],\n sort=sort,\n )\n # TODO: Deep JSON structure defense - for now, let it blow up so easier to detect\n facets = reply['facet_counts']['facet_fields']\n works = [work_object(w) for w in reply['response']['docs']]\n\n def get_facet(f, limit=None):\n return list(web.group(facets[f][: limit * 2] if limit else facets[f], 2))\n\n return web.storage(\n num_found=int(reply['response']['numFound']),\n works=add_availability(works),\n years=[(int(k), v) for k, v in get_facet('publish_year')],\n get_facet=get_facet,\n sort=sort,\n )\n\n\ndef sorted_work_editions(wkey, json_data=None):\n \"\"\"Setting json_data to a real value simulates getting SOLR data back, i.e. for testing (but ick!)\"\"\"\n q = 'key:' + wkey\n if json_data:\n reply = json.loads(json_data)\n else:\n reply = parse_json_from_solr_query(\n solr_select_url,\n {\n 'q.op': 'AND',\n 'q': q,\n 'rows': 10,\n 'fl': 'edition_key',\n 'qt': 'standard',\n 'wt': 'json',\n },\n )\n if reply is None or reply.get('response', {}).get('numFound', 0) == 0:\n return []\n # TODO: Deep JSON structure defense - for now, let it blow up so easier to detect\n return reply[\"response\"]['docs'][0].get('edition_key', [])\n\n\ndef top_books_from_author(akey, rows=5, offset=0):\n q = 'author_key:(' + akey + ')'\n json_result = parse_json_from_solr_query(\n solr_select_url,\n {\n 'q': q,\n 'start': offset,\n 'rows': rows,\n 'fl': 'key,title,edition_count,first_publish_year',\n 'sort': 'edition_count desc',\n 'wt': 'json',\n },\n )\n if json_result is None:\n return {'books': [], 'total': 0}\n # TODO: Deep JSON structure defense - for now, let it blow up so easier to detect\n response = json_result['response']\n return {\n 'books': [web.storage(doc) for doc in response['docs']],\n 'total': response['numFound'],\n }\n\n\nclass advancedsearch(delegate.page):\n path = \"/advancedsearch\"\n\n def GET(self):\n return render_template(\"search/advancedsearch.html\")\n\n\ndef escape_colon(q, vf):\n if ':' not in q:\n return q\n parts = q.split(':')\n result = parts.pop(0)\n while parts:\n if not any(result.endswith(f) for f in vf):\n result += '\\\\'\n result += ':' + parts.pop(0)\n return result\n\n\ndef run_solr_search(solr_select: str, params: dict):\n response = execute_solr_query(solr_select, params)\n json_data = response.content if response else None # bytes or None\n return parse_search_response(json_data)\n\n\ndef parse_search_response(json_data):\n \"\"\"Construct response for any input\"\"\"\n if json_data is None:\n return {'error': 'Error parsing empty search engine response'}\n try:\n return json.loads(json_data)\n except json.JSONDecodeError:\n logger.exception(\"Error parsing search engine response\")\n m = re_pre.search(json_data)\n if m is None:\n return {'error': 'Error parsing search engine response'}\n error = web.htmlunquote(m.group(1))\n solr_error = 'org.apache.lucene.queryParser.ParseException: '\n if error.startswith(solr_error):\n error = error[len(solr_error) :]\n return {'error': error}\n\n\nclass list_search(delegate.page):\n path = '/search/lists'\n\n def GET(self):\n i = web.input(q='', offset='0', limit='10')\n\n lists = self.get_results(i.q, i.offset, i.limit)\n\n return render_template('search/lists.tmpl', q=i.q, lists=lists)\n\n def get_results(self, q, offset=0, limit=100):\n if 'env' not in web.ctx:\n delegate.fakeload()\n\n keys = web.ctx.site.things(\n {\n \"type\": \"/type/list\",\n \"name~\": q,\n \"limit\": int(limit),\n \"offset\": int(offset),\n }\n )\n\n return web.ctx.site.get_many(keys)\n\n\nclass list_search_json(list_search):\n path = '/search/lists'\n encoding = 'json'\n\n def GET(self):\n i = web.input(q='', offset=0, limit=10)\n offset = safeint(i.offset, 0)\n limit = safeint(i.limit, 10)\n limit = min(100, limit)\n\n docs = self.get_results(i.q, offset=offset, limit=limit)\n\n response = {'start': offset, 'docs': [doc.preview() for doc in docs]}\n\n web.header('Content-Type', 'application/json')\n return delegate.RawText(json.dumps(response))\n\n\nclass subject_search(delegate.page):\n path = '/search/subjects'\n\n def GET(self):\n return render_template('search/subjects.tmpl', self.get_results)\n\n def get_results(self, q, offset=0, limit=100):\n valid_fields = ['key', 'name', 'subject_type', 'work_count']\n q = escape_colon(escape_bracket(q), valid_fields)\n\n results = run_solr_search(\n solr_select_url,\n {\n \"fq\": \"type:subject\",\n \"q.op\": \"AND\",\n \"q\": q,\n \"start\": offset,\n \"rows\": limit,\n \"fl\": \",\".join(valid_fields),\n \"qt\": \"standard\",\n \"wt\": \"json\",\n \"sort\": \"work_count desc\",\n },\n )\n response = results['response']\n\n for doc in response['docs']:\n doc['type'] = doc.get('subject_type', 'subject')\n doc['count'] = doc.get('work_count', 0)\n\n return results\n\n\nclass subject_search_json(subject_search):\n path = '/search/subjects'\n encoding = 'json'\n\n def GET(self):\n i = web.input(q='', offset=0, limit=100)\n offset = safeint(i.offset, 0)\n limit = safeint(i.limit, 100)\n limit = min(1000, limit) # limit limit to 1000.\n\n response = self.get_results(i.q, offset=offset, limit=limit)['response']\n web.header('Content-Type', 'application/json')\n return delegate.RawText(json.dumps(response))\n\n\nclass author_search(delegate.page):\n path = '/search/authors'\n\n def GET(self):\n return render_template('search/authors.tmpl', self.get_results)\n\n def get_results(self, q, offset=0, limit=100):\n valid_fields = [\n 'key',\n 'name',\n 'alternate_names',\n 'birth_date',\n 'death_date',\n 'date',\n 'work_count',\n ]\n q = escape_colon(escape_bracket(q), valid_fields)\n q_has_fields = ':' in q.replace(r'\\:', '')\n\n d = run_solr_search(\n solr_select_url,\n {\n 'fq': 'type:author',\n 'q.op': 'AND',\n 'q': q,\n 'start': offset,\n 'rows': limit,\n 'fl': '*',\n 'qt': 'standard',\n 'sort': 'work_count desc',\n 'wt': 'json',\n **(\n {}\n if q_has_fields\n else {'defType': 'dismax', 'qf': 'name alternate_names'}\n ),\n },\n )\n\n docs = d.get('response', {}).get('docs', [])\n for doc in docs:\n # replace /authors/OL1A with OL1A\n # The template still expects the key to be in the old format\n doc['key'] = doc['key'].split(\"/\")[-1]\n return d\n\n\nclass author_search_json(author_search):\n path = '/search/authors'\n encoding = 'json'\n\n def GET(self):\n i = web.input(q='', offset=0, limit=100)\n offset = safeint(i.offset, 0)\n limit = safeint(i.limit, 100)\n limit = min(1000, limit) # limit limit to 1000.\n\n response = self.get_results(i.q, offset=offset, limit=limit)['response']\n web.header('Content-Type', 'application/json')\n return delegate.RawText(json.dumps(response))\n\n\n@public\ndef random_author_search(limit=10):\n \"\"\"\n Returns a dict that contains a random list of authors. Amount of authors\n returned is set be the given limit.\n \"\"\"\n letters_and_digits = string.ascii_letters + string.digits\n seed = ''.join(random.choice(letters_and_digits) for _ in range(10))\n\n search_results = run_solr_search(\n solr_select_url,\n {\n 'q': 'type:author',\n 'rows': limit,\n 'sort': f'random_{seed} desc',\n 'wt': 'json',\n },\n )\n\n docs = search_results.get('response', {}).get('docs', [])\n\n assert docs, f\"random_author_search({limit}) returned no docs\"\n assert (\n len(docs) == limit\n ), f\"random_author_search({limit}) returned {len(docs)} docs\"\n\n for doc in docs:\n # replace /authors/OL1A with OL1A\n # The template still expects the key to be in the old format\n doc['key'] = doc['key'].split(\"/\")[-1]\n\n return search_results['response']\n\n\ndef rewrite_list_editions_query(q, page, offset, limit):\n \"\"\"Takes a solr query. If it doesn't contain a /lists/ key, then\n return the query, unchanged, exactly as it entered the\n function. If it does contain a lists key, then use the pagination\n information to fetch the right block of keys from the\n lists_editions API and then feed these editions resulting work\n keys into solr with the form key:(OL123W, OL234W). This way, we\n can use the solr API to fetch list works and render them in\n carousels in the right format.\n \"\"\"\n if '/lists/' in q:\n editions = get_list_editions(q, offset=offset, limit=limit)\n work_ids = [ed.get('works')[0]['key'] for ed in editions]\n q = 'key:(' + ' OR '.join(work_ids) + ')'\n # We've applied the offset to fetching get_list_editions to\n # produce the right set of discrete work IDs. We don't want\n # it applied to paginate our resulting solr query.\n offset = 0\n page = 1\n return q, page, offset, limit\n\n\n@public\ndef work_search(\n query,\n sort=None,\n page=1,\n offset=0,\n limit=100,\n fields='*',\n facet=True,\n spellcheck_count=None,\n):\n \"\"\"\n params:\n query: dict\n sort: str editions|old|new|scans\n \"\"\"\n # Ensure we don't mutate the `query` passed in by reference\n query = copy.deepcopy(query)\n query['wt'] = 'json'\n if sort:\n sort = process_sort(sort)\n\n # deal with special /lists/ key queries\n query['q'], page, offset, limit = rewrite_list_editions_query(\n query['q'], page, offset, limit\n )\n try:\n (reply, solr_select, q_list) = run_solr_query(\n query,\n rows=limit,\n page=page,\n sort=sort,\n offset=offset,\n fields=fields,\n facet=facet,\n spellcheck_count=spellcheck_count,\n )\n response = json.loads(reply)['response'] or ''\n except (ValueError, OSError) as e:\n logger.error(\"Error in processing search API.\")\n response = dict(start=0, numFound=0, docs=[], error=str(e))\n\n # backward compatibility\n response['num_found'] = response['numFound']\n if fields == '*' or 'availability' in fields:\n response['docs'] = add_availability(response['docs'])\n return response\n\n\nclass search_json(delegate.page):\n path = \"/search\"\n encoding = \"json\"\n\n def GET(self):\n i = web.input(\n author_key=[],\n subject_facet=[],\n person_facet=[],\n place_facet=[],\n time_facet=[],\n first_publish_year=[],\n publisher_facet=[],\n language=[],\n public_scan_b=[],\n )\n if 'query' in i:\n query = json.loads(i.query)\n else:\n query = i\n\n sort = query.get('sort', None)\n\n limit = safeint(query.pop(\"limit\", \"100\"), default=100)\n if \"offset\" in query:\n offset = safeint(query.pop(\"offset\", 0), default=0)\n page = None\n else:\n offset = None\n page = safeint(query.pop(\"page\", \"1\"), default=1)\n\n fields = query.pop('fields', '*').split(',')\n facet = query.pop('_facet', 'true').lower() in ['true']\n spellcheck_count = safeint(\n query.pop(\"_spellcheck_count\", default_spellcheck_count),\n default=default_spellcheck_count,\n )\n\n # If the query is a /list/ key, create custom list_editions_query\n q = query.get('q', '')\n query['q'], page, offset, limit = rewrite_list_editions_query(\n q, page, offset, limit\n )\n response = work_search(\n query,\n sort=sort,\n page=page,\n offset=offset,\n limit=limit,\n fields=fields,\n facet=facet,\n spellcheck_count=spellcheck_count,\n )\n response['q'] = q\n response['offset'] = offset\n response['docs'] = response['docs']\n web.header('Content-Type', 'application/json')\n return delegate.RawText(json.dumps(response, indent=4))\n\n\ndef setup():\n from openlibrary.plugins.worksearch import subjects\n\n # subjects module needs read_author_facet and solr_select_url.\n # Importing this module to access them will result in circular import.\n # Setting them like this to avoid circular-import.\n subjects.read_author_facet = read_author_facet\n if hasattr(config, 'plugin_worksearch'):\n subjects.solr_select_url = solr_select_url\n\n subjects.setup()\n\n from openlibrary.plugins.worksearch import languages, publishers\n\n publishers.setup()\n languages.setup()\n\n\nsetup()\n", "path": "openlibrary/plugins/worksearch/code.py" } ]
diff --git a/openlibrary/plugins/worksearch/code.py b/openlibrary/plugins/worksearch/code.py index 7f1991f46bf..8f2f002034d 100644 --- a/openlibrary/plugins/worksearch/code.py +++ b/openlibrary/plugins/worksearch/code.py @@ -116,7 +116,7 @@ } SORTS = { 'editions': 'edition_count desc', - 'old': 'first_publish_year asc', + 'old': 'def(first_publish_year, 9999) asc', 'new': 'first_publish_year desc', 'scans': 'ia_count desc', # Classifications
openstates__openstates-scrapers-2384
MN failing since at least 2018-06-15 MN has been failing since 2018-06-15 Based on automated runs it appears that MN has not run successfully in 2 days (2018-06-15). ``` /opt/openstates/venv-pupa/lib/python3.6/site-packages/psycopg2/__init__.py:144: UserWarning: The psycopg2 wheel package will be renamed from release 2.8; in order to keep installing from binary please use "pip install psycopg2-binary" instead. For details see: <http://initd.org/psycopg/docs/install.html#binary-install-from-pypi>. """) 02:01:53 CRITICAL pupa: Session(s) 91st Legislature, 2019-2020 were reported by Minnesota.get_session_list() but were not found in Minnesota.legislative_sessions or Minnesota.ignored_scraped_sessions. loaded Open States pupa settings... mn (scrape, import) bills: {} committees: {} people: {} vote_events: {} ``` Visit http://bobsled.openstates.org for more info.
[ { "content": "from pupa.scrape import Jurisdiction, Organization\n\nfrom openstates.utils import url_xpath\n\nfrom .bills import MNBillScraper\nfrom .committees import MNCommitteeScraper\nfrom .people import MNPersonScraper\nfrom .vote_events import MNVoteScraper\n# from .events import MNEventScraper\n\n\"\"\"\nMinnesota legislative data can be found at the Office of the Revisor\nof Statutes:\nhttps://www.revisor.mn.gov/\n\nVotes:\nThere are not detailed vote data for Senate votes, simply yes and no counts.\nBill pages have vote counts and links to House details, so it makes more\nsense to get vote data from the bill pages.\n\"\"\"\n\n\nclass Minnesota(Jurisdiction):\n division_id = \"ocd-division/country:us/state:mn\"\n classification = \"government\"\n name = \"Minnesota\"\n url = \"http://state.mn.us/\"\n scrapers = {\n \"bills\": MNBillScraper,\n \"committees\": MNCommitteeScraper,\n \"people\": MNPersonScraper,\n \"vote_events\": MNVoteScraper,\n # \"events\": MNEventScraper,\n }\n legislative_sessions = [\n {\n '_scraped_name': '86th Legislature, 2009-2010',\n 'classification': 'primary',\n 'identifier': '2009-2010',\n 'name': '2009-2010 Regular Session'\n },\n {\n '_scraped_name': '86th Legislature, 2010 1st Special Session',\n 'classification': 'special',\n 'identifier': '2010 1st Special Session',\n 'name': '2010, 1st Special Session'\n },\n {\n '_scraped_name': '86th Legislature, 2010 2nd Special Session',\n 'classification': 'special',\n 'identifier': '2010 2nd Special Session',\n 'name': '2010, 2nd Special Session'\n },\n {\n '_scraped_name': '87th Legislature, 2011-2012',\n 'classification': 'primary',\n 'identifier': '2011-2012',\n 'name': '2011-2012 Regular Session'\n },\n {\n '_scraped_name': '87th Legislature, 2011 1st Special Session',\n 'classification': 'special',\n 'identifier': '2011s1',\n 'name': '2011, 1st Special Session'\n },\n {\n '_scraped_name': '87th Legislature, 2012 1st Special Session',\n 'classification': 'special',\n 'identifier': '2012s1',\n 'name': '2012, 1st Special Session'\n },\n {\n '_scraped_name': '88th Legislature, 2013-2014',\n 'classification': 'primary',\n 'identifier': '2013-2014',\n 'name': '2013-2014 Regular Session'\n },\n {\n '_scraped_name': '88th Legislature, 2013 1st Special Session',\n 'classification': 'special',\n 'identifier': '2013s1',\n 'name': '2013, 1st Special Session'\n },\n {\n '_scraped_name': '89th Legislature, 2015-2016',\n 'classification': 'primary',\n 'identifier': '2015-2016',\n 'name': '2015-2016 Regular Session'\n },\n {\n '_scraped_name': '89th Legislature, 2015 1st Special Session',\n 'classification': 'special',\n 'identifier': '2015s1',\n 'name': '2015, 1st Special Session'\n },\n {\n '_scraped_name': '90th Legislature, 2017 1st Special Session',\n 'classification': 'special',\n 'identifier': '2017s1',\n 'name': '2017, 1st Special Session'\n },\n {\n '_scraped_name': '90th Legislature, 2017-2018',\n 'classification': 'primary',\n 'identifier': '2017-2018',\n 'name': '2017-2018 Regular Session',\n 'start_date': '2017-01-03',\n 'end_date': '2018-05-21'\n },\n ]\n ignored_scraped_sessions = [\n '85th Legislature, 2007-2008',\n '85th Legislature, 2007 1st Special Session',\n '84th Legislature, 2005-2006',\n '84th Legislature, 2005 1st Special Session',\n '83rd Legislature, 2003-2004',\n '83rd Legislature, 2003 1st Special Session',\n '82nd Legislature, 2001-2002',\n '82nd Legislature, 2002 1st Special Session',\n '82nd Legislature, 2001 1st Special Session',\n '81st Legislature, 1999-2000',\n '80th Legislature, 1997-1998',\n '80th Legislature, 1998 1st Special Session',\n '80th Legislature, 1997 3rd Special Session',\n '80th Legislature, 1997 2nd Special Session',\n '80th Legislature, 1997 1st Special Session',\n '79th Legislature, 1995-1996',\n '79th Legislature, 1995 1st Special Session',\n '89th Legislature, 2015-2016',\n ]\n\n def get_organizations(self):\n legis = Organization('Minnesota Legislature', classification='legislature')\n\n upper = Organization('Minnesota Senate', classification='upper',\n parent_id=legis._id)\n lower = Organization('Minnesota House of Representatives',\n classification='lower', parent_id=legis._id)\n\n for n in range(1, 68):\n upper.add_post(label=str(n), role='Senator',\n division_id='ocd-division/country:us/state:mn/sldu:{}'.format(n))\n lower.add_post(label=str(n) + 'A', role='Representative',\n division_id='ocd-division/country:us/state:mn/sldl:{}a'.format(n))\n lower.add_post(label=str(n) + 'B', role='Representative',\n division_id='ocd-division/country:us/state:mn/sldl:{}b'.format(n))\n\n yield Organization('Governor of Minnesota', classification='executive')\n yield legis\n yield upper\n yield lower\n\n def get_session_list(self):\n return url_xpath('https://www.revisor.mn.gov/bills/'\n 'status_search.php?body=House',\n '//select[@name=\"session\"]/option/text()', verify=False)\n", "path": "openstates/mn/__init__.py" } ]
[ { "content": "from pupa.scrape import Jurisdiction, Organization\n\nfrom openstates.utils import url_xpath\n\nfrom .bills import MNBillScraper\nfrom .committees import MNCommitteeScraper\nfrom .people import MNPersonScraper\nfrom .vote_events import MNVoteScraper\n# from .events import MNEventScraper\n\n\"\"\"\nMinnesota legislative data can be found at the Office of the Revisor\nof Statutes:\nhttps://www.revisor.mn.gov/\n\nVotes:\nThere are not detailed vote data for Senate votes, simply yes and no counts.\nBill pages have vote counts and links to House details, so it makes more\nsense to get vote data from the bill pages.\n\"\"\"\n\n\nclass Minnesota(Jurisdiction):\n division_id = \"ocd-division/country:us/state:mn\"\n classification = \"government\"\n name = \"Minnesota\"\n url = \"http://state.mn.us/\"\n scrapers = {\n \"bills\": MNBillScraper,\n \"committees\": MNCommitteeScraper,\n \"people\": MNPersonScraper,\n \"vote_events\": MNVoteScraper,\n # \"events\": MNEventScraper,\n }\n legislative_sessions = [\n {\n '_scraped_name': '86th Legislature, 2009-2010',\n 'classification': 'primary',\n 'identifier': '2009-2010',\n 'name': '2009-2010 Regular Session'\n },\n {\n '_scraped_name': '86th Legislature, 2010 1st Special Session',\n 'classification': 'special',\n 'identifier': '2010 1st Special Session',\n 'name': '2010, 1st Special Session'\n },\n {\n '_scraped_name': '86th Legislature, 2010 2nd Special Session',\n 'classification': 'special',\n 'identifier': '2010 2nd Special Session',\n 'name': '2010, 2nd Special Session'\n },\n {\n '_scraped_name': '87th Legislature, 2011-2012',\n 'classification': 'primary',\n 'identifier': '2011-2012',\n 'name': '2011-2012 Regular Session'\n },\n {\n '_scraped_name': '87th Legislature, 2011 1st Special Session',\n 'classification': 'special',\n 'identifier': '2011s1',\n 'name': '2011, 1st Special Session'\n },\n {\n '_scraped_name': '87th Legislature, 2012 1st Special Session',\n 'classification': 'special',\n 'identifier': '2012s1',\n 'name': '2012, 1st Special Session'\n },\n {\n '_scraped_name': '88th Legislature, 2013-2014',\n 'classification': 'primary',\n 'identifier': '2013-2014',\n 'name': '2013-2014 Regular Session'\n },\n {\n '_scraped_name': '88th Legislature, 2013 1st Special Session',\n 'classification': 'special',\n 'identifier': '2013s1',\n 'name': '2013, 1st Special Session'\n },\n {\n '_scraped_name': '89th Legislature, 2015-2016',\n 'classification': 'primary',\n 'identifier': '2015-2016',\n 'name': '2015-2016 Regular Session'\n },\n {\n '_scraped_name': '89th Legislature, 2015 1st Special Session',\n 'classification': 'special',\n 'identifier': '2015s1',\n 'name': '2015, 1st Special Session'\n },\n {\n '_scraped_name': '90th Legislature, 2017 1st Special Session',\n 'classification': 'special',\n 'identifier': '2017s1',\n 'name': '2017, 1st Special Session'\n },\n {\n '_scraped_name': '90th Legislature, 2017-2018',\n 'classification': 'primary',\n 'identifier': '2017-2018',\n 'name': '2017-2018 Regular Session',\n 'start_date': '2017-01-03',\n 'end_date': '2018-05-21'\n },\n ]\n ignored_scraped_sessions = [\n '85th Legislature, 2007-2008',\n '85th Legislature, 2007 1st Special Session',\n '84th Legislature, 2005-2006',\n '84th Legislature, 2005 1st Special Session',\n '83rd Legislature, 2003-2004',\n '83rd Legislature, 2003 1st Special Session',\n '82nd Legislature, 2001-2002',\n '82nd Legislature, 2002 1st Special Session',\n '82nd Legislature, 2001 1st Special Session',\n '81st Legislature, 1999-2000',\n '80th Legislature, 1997-1998',\n '80th Legislature, 1998 1st Special Session',\n '80th Legislature, 1997 3rd Special Session',\n '80th Legislature, 1997 2nd Special Session',\n '80th Legislature, 1997 1st Special Session',\n '79th Legislature, 1995-1996',\n '79th Legislature, 1995 1st Special Session',\n '89th Legislature, 2015-2016',\n '91st Legislature, 2019-2020',\n ]\n\n def get_organizations(self):\n legis = Organization('Minnesota Legislature', classification='legislature')\n\n upper = Organization('Minnesota Senate', classification='upper',\n parent_id=legis._id)\n lower = Organization('Minnesota House of Representatives',\n classification='lower', parent_id=legis._id)\n\n for n in range(1, 68):\n upper.add_post(label=str(n), role='Senator',\n division_id='ocd-division/country:us/state:mn/sldu:{}'.format(n))\n lower.add_post(label=str(n) + 'A', role='Representative',\n division_id='ocd-division/country:us/state:mn/sldl:{}a'.format(n))\n lower.add_post(label=str(n) + 'B', role='Representative',\n division_id='ocd-division/country:us/state:mn/sldl:{}b'.format(n))\n\n yield Organization('Governor of Minnesota', classification='executive')\n yield legis\n yield upper\n yield lower\n\n def get_session_list(self):\n return url_xpath('https://www.revisor.mn.gov/bills/'\n 'status_search.php?body=House',\n '//select[@name=\"session\"]/option/text()', verify=False)\n", "path": "openstates/mn/__init__.py" } ]
diff --git a/openstates/mn/__init__.py b/openstates/mn/__init__.py index c6dadd14f7..f5a168769b 100644 --- a/openstates/mn/__init__.py +++ b/openstates/mn/__init__.py @@ -127,6 +127,7 @@ class Minnesota(Jurisdiction): '79th Legislature, 1995-1996', '79th Legislature, 1995 1st Special Session', '89th Legislature, 2015-2016', + '91st Legislature, 2019-2020', ] def get_organizations(self):
mlflow__mlflow-11463
[DOC-FIX] Document that attribute RunInfo.lifecycle_stage is of type LifecycleStage ### Willingness to contribute No. I cannot contribute a documentation fix at this time. ### URL(s) with the issue https://mlflow.org/docs/latest/python_api/mlflow.entities.html#mlflow.entities.RunInfo.lifecycle_stage ### Description of proposal (what needs changing) For [documentation on RunInfo](https://mlflow.org/docs/latest/python_api/mlflow.entities.html#mlflow.entities.RunInfo) class. For the `RunInfo.lifecycle_stage` attribute we should mention that it's type is enum LifecycleStage. Analogous to the documentation for the `RunInfo.stage` attribute. Should be ``` property lifecycle_stage[source] One of the values in mlflow.entities.lifecycle_stage.LifecycleStage describing the lifecycle stage of the run. ``` similar to the existing ``` property status[source] One of the values in mlflow.entities.RunStatus describing the status of the run. ```
[ { "content": "from mlflow.entities._mlflow_object import _MLflowObject\nfrom mlflow.entities.lifecycle_stage import LifecycleStage\nfrom mlflow.entities.run_status import RunStatus\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE\nfrom mlflow.protos.service_pb2 import RunInfo as ProtoRunInfo\n\n\ndef check_run_is_active(run_info):\n if run_info.lifecycle_stage != LifecycleStage.ACTIVE:\n raise MlflowException(\n f\"The run {run_info.run_id} must be in 'active' lifecycle_stage.\",\n error_code=INVALID_PARAMETER_VALUE,\n )\n\n\nclass searchable_attribute(property):\n # Wrapper class over property to designate some of the properties as searchable\n # run attributes\n pass\n\n\nclass orderable_attribute(property):\n # Wrapper class over property to designate some of the properties as orderable\n # run attributes\n pass\n\n\nclass RunInfo(_MLflowObject):\n \"\"\"\n Metadata about a run.\n \"\"\"\n\n def __init__(\n self,\n run_uuid,\n experiment_id,\n user_id,\n status,\n start_time,\n end_time,\n lifecycle_stage,\n artifact_uri=None,\n run_id=None,\n run_name=None,\n ):\n if experiment_id is None:\n raise Exception(\"experiment_id cannot be None\")\n if user_id is None:\n raise Exception(\"user_id cannot be None\")\n if status is None:\n raise Exception(\"status cannot be None\")\n if start_time is None:\n raise Exception(\"start_time cannot be None\")\n actual_run_id = run_id or run_uuid\n if actual_run_id is None:\n raise Exception(\"run_id and run_uuid cannot both be None\")\n self._run_uuid = actual_run_id\n self._run_id = actual_run_id\n self._experiment_id = experiment_id\n self._user_id = user_id\n self._status = status\n self._start_time = start_time\n self._end_time = end_time\n self._lifecycle_stage = lifecycle_stage\n self._artifact_uri = artifact_uri\n self._run_name = run_name\n\n def __eq__(self, other):\n if type(other) is type(self):\n # TODO deep equality here?\n return self.__dict__ == other.__dict__\n return False\n\n def _copy_with_overrides(self, status=None, end_time=None, lifecycle_stage=None, run_name=None):\n \"\"\"A copy of the RunInfo with certain attributes modified.\"\"\"\n proto = self.to_proto()\n if status:\n proto.status = status\n if end_time:\n proto.end_time = end_time\n if lifecycle_stage:\n proto.lifecycle_stage = lifecycle_stage\n if run_name:\n proto.run_name = run_name\n return RunInfo.from_proto(proto)\n\n @property\n def run_uuid(self):\n \"\"\"[Deprecated, use run_id instead] String containing run UUID.\"\"\"\n return self._run_uuid\n\n @searchable_attribute\n def run_id(self):\n \"\"\"String containing run id.\"\"\"\n return self._run_id\n\n @property\n def experiment_id(self):\n \"\"\"String ID of the experiment for the current run.\"\"\"\n return self._experiment_id\n\n @searchable_attribute\n def run_name(self):\n \"\"\"String containing run name.\"\"\"\n return self._run_name\n\n def _set_run_name(self, new_name):\n self._run_name = new_name\n\n @searchable_attribute\n def user_id(self):\n \"\"\"String ID of the user who initiated this run.\"\"\"\n return self._user_id\n\n @searchable_attribute\n def status(self):\n \"\"\"\n One of the values in :py:class:`mlflow.entities.RunStatus`\n describing the status of the run.\n \"\"\"\n return self._status\n\n @searchable_attribute\n def start_time(self):\n \"\"\"Start time of the run, in number of milliseconds since the UNIX epoch.\"\"\"\n return self._start_time\n\n @searchable_attribute\n def end_time(self):\n \"\"\"End time of the run, in number of milliseconds since the UNIX epoch.\"\"\"\n return self._end_time\n\n @searchable_attribute\n def artifact_uri(self):\n \"\"\"String root artifact URI of the run.\"\"\"\n return self._artifact_uri\n\n @property\n def lifecycle_stage(self):\n return self._lifecycle_stage\n\n def to_proto(self):\n proto = ProtoRunInfo()\n proto.run_uuid = self.run_uuid\n proto.run_id = self.run_id\n if self.run_name is not None:\n proto.run_name = self.run_name\n proto.experiment_id = self.experiment_id\n proto.user_id = self.user_id\n proto.status = RunStatus.from_string(self.status)\n proto.start_time = self.start_time\n if self.end_time:\n proto.end_time = self.end_time\n if self.artifact_uri:\n proto.artifact_uri = self.artifact_uri\n proto.lifecycle_stage = self.lifecycle_stage\n return proto\n\n @classmethod\n def from_proto(cls, proto):\n end_time = proto.end_time\n # The proto2 default scalar value of zero indicates that the run's end time is absent.\n # An absent end time is represented with a NoneType in the `RunInfo` class\n if end_time == 0:\n end_time = None\n return cls(\n run_uuid=proto.run_uuid,\n run_id=proto.run_id,\n run_name=proto.run_name,\n experiment_id=proto.experiment_id,\n user_id=proto.user_id,\n status=RunStatus.to_string(proto.status),\n start_time=proto.start_time,\n end_time=end_time,\n lifecycle_stage=proto.lifecycle_stage,\n artifact_uri=proto.artifact_uri,\n )\n\n @classmethod\n def get_searchable_attributes(cls):\n return sorted(\n [p for p in cls.__dict__ if isinstance(getattr(cls, p), searchable_attribute)]\n )\n\n @classmethod\n def get_orderable_attributes(cls):\n # Note that all searchable attributes are also orderable.\n return sorted(\n [\n p\n for p in cls.__dict__\n if isinstance(getattr(cls, p), (searchable_attribute, orderable_attribute))\n ]\n )\n", "path": "mlflow/entities/run_info.py" } ]
[ { "content": "from mlflow.entities._mlflow_object import _MLflowObject\nfrom mlflow.entities.lifecycle_stage import LifecycleStage\nfrom mlflow.entities.run_status import RunStatus\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE\nfrom mlflow.protos.service_pb2 import RunInfo as ProtoRunInfo\n\n\ndef check_run_is_active(run_info):\n if run_info.lifecycle_stage != LifecycleStage.ACTIVE:\n raise MlflowException(\n f\"The run {run_info.run_id} must be in 'active' lifecycle_stage.\",\n error_code=INVALID_PARAMETER_VALUE,\n )\n\n\nclass searchable_attribute(property):\n # Wrapper class over property to designate some of the properties as searchable\n # run attributes\n pass\n\n\nclass orderable_attribute(property):\n # Wrapper class over property to designate some of the properties as orderable\n # run attributes\n pass\n\n\nclass RunInfo(_MLflowObject):\n \"\"\"\n Metadata about a run.\n \"\"\"\n\n def __init__(\n self,\n run_uuid,\n experiment_id,\n user_id,\n status,\n start_time,\n end_time,\n lifecycle_stage,\n artifact_uri=None,\n run_id=None,\n run_name=None,\n ):\n if experiment_id is None:\n raise Exception(\"experiment_id cannot be None\")\n if user_id is None:\n raise Exception(\"user_id cannot be None\")\n if status is None:\n raise Exception(\"status cannot be None\")\n if start_time is None:\n raise Exception(\"start_time cannot be None\")\n actual_run_id = run_id or run_uuid\n if actual_run_id is None:\n raise Exception(\"run_id and run_uuid cannot both be None\")\n self._run_uuid = actual_run_id\n self._run_id = actual_run_id\n self._experiment_id = experiment_id\n self._user_id = user_id\n self._status = status\n self._start_time = start_time\n self._end_time = end_time\n self._lifecycle_stage = lifecycle_stage\n self._artifact_uri = artifact_uri\n self._run_name = run_name\n\n def __eq__(self, other):\n if type(other) is type(self):\n # TODO deep equality here?\n return self.__dict__ == other.__dict__\n return False\n\n def _copy_with_overrides(self, status=None, end_time=None, lifecycle_stage=None, run_name=None):\n \"\"\"A copy of the RunInfo with certain attributes modified.\"\"\"\n proto = self.to_proto()\n if status:\n proto.status = status\n if end_time:\n proto.end_time = end_time\n if lifecycle_stage:\n proto.lifecycle_stage = lifecycle_stage\n if run_name:\n proto.run_name = run_name\n return RunInfo.from_proto(proto)\n\n @property\n def run_uuid(self):\n \"\"\"[Deprecated, use run_id instead] String containing run UUID.\"\"\"\n return self._run_uuid\n\n @searchable_attribute\n def run_id(self):\n \"\"\"String containing run id.\"\"\"\n return self._run_id\n\n @property\n def experiment_id(self):\n \"\"\"String ID of the experiment for the current run.\"\"\"\n return self._experiment_id\n\n @searchable_attribute\n def run_name(self):\n \"\"\"String containing run name.\"\"\"\n return self._run_name\n\n def _set_run_name(self, new_name):\n self._run_name = new_name\n\n @searchable_attribute\n def user_id(self):\n \"\"\"String ID of the user who initiated this run.\"\"\"\n return self._user_id\n\n @searchable_attribute\n def status(self):\n \"\"\"\n One of the values in :py:class:`mlflow.entities.RunStatus`\n describing the status of the run.\n \"\"\"\n return self._status\n\n @searchable_attribute\n def start_time(self):\n \"\"\"Start time of the run, in number of milliseconds since the UNIX epoch.\"\"\"\n return self._start_time\n\n @searchable_attribute\n def end_time(self):\n \"\"\"End time of the run, in number of milliseconds since the UNIX epoch.\"\"\"\n return self._end_time\n\n @searchable_attribute\n def artifact_uri(self):\n \"\"\"String root artifact URI of the run.\"\"\"\n return self._artifact_uri\n\n @property\n def lifecycle_stage(self):\n \"\"\"\n One of the values in :py:class:`mlflow.entities.lifecycle_stage.LifecycleStage`\n describing the lifecycle stage of the run.\n \"\"\"\n return self._lifecycle_stage\n\n def to_proto(self):\n proto = ProtoRunInfo()\n proto.run_uuid = self.run_uuid\n proto.run_id = self.run_id\n if self.run_name is not None:\n proto.run_name = self.run_name\n proto.experiment_id = self.experiment_id\n proto.user_id = self.user_id\n proto.status = RunStatus.from_string(self.status)\n proto.start_time = self.start_time\n if self.end_time:\n proto.end_time = self.end_time\n if self.artifact_uri:\n proto.artifact_uri = self.artifact_uri\n proto.lifecycle_stage = self.lifecycle_stage\n return proto\n\n @classmethod\n def from_proto(cls, proto):\n end_time = proto.end_time\n # The proto2 default scalar value of zero indicates that the run's end time is absent.\n # An absent end time is represented with a NoneType in the `RunInfo` class\n if end_time == 0:\n end_time = None\n return cls(\n run_uuid=proto.run_uuid,\n run_id=proto.run_id,\n run_name=proto.run_name,\n experiment_id=proto.experiment_id,\n user_id=proto.user_id,\n status=RunStatus.to_string(proto.status),\n start_time=proto.start_time,\n end_time=end_time,\n lifecycle_stage=proto.lifecycle_stage,\n artifact_uri=proto.artifact_uri,\n )\n\n @classmethod\n def get_searchable_attributes(cls):\n return sorted(\n [p for p in cls.__dict__ if isinstance(getattr(cls, p), searchable_attribute)]\n )\n\n @classmethod\n def get_orderable_attributes(cls):\n # Note that all searchable attributes are also orderable.\n return sorted(\n [\n p\n for p in cls.__dict__\n if isinstance(getattr(cls, p), (searchable_attribute, orderable_attribute))\n ]\n )\n", "path": "mlflow/entities/run_info.py" } ]
diff --git a/mlflow/entities/run_info.py b/mlflow/entities/run_info.py index 1d22e402e754b..5fca3152ae814 100644 --- a/mlflow/entities/run_info.py +++ b/mlflow/entities/run_info.py @@ -138,6 +138,10 @@ def artifact_uri(self): @property def lifecycle_stage(self): + """ + One of the values in :py:class:`mlflow.entities.lifecycle_stage.LifecycleStage` + describing the lifecycle stage of the run. + """ return self._lifecycle_stage def to_proto(self):
ansible__ansible-17707
lookup properties <!--- Verify first that your issue/request is not already reported in GitHub --> ##### ISSUE TYPE <!--- Pick one below and delete the rest: --> - Bug Report ##### ANSIBLE VERSION <!--- Paste verbatim output from “ansible --version” between quotes below --> ``` ansible 2.2.0 (devel 4e369a31db) last updated 2016/07/02 15:01:01 (GMT +400) lib/ansible/modules/core: (detached HEAD 1d0d5db97a) last updated 2016/07/02 15:01:12 (GMT +400) lib/ansible/modules/extras: (detached HEAD 00b8b96906) last updated 2016/07/02 15:01:12 (GMT +400) config file = /etc/ansible/ansible.cfg configured module search path = Default w/o overrides ``` ##### CONFIGURATION ##### OS / ENVIRONMENT "N/A" ##### SUMMARY [commit](https://github.com/ansible/ansible/commit/4ba60d00c8d7e62912a37ec24f90f6e5d0770c4d) this commit breaks `lookup` for `type=properties` ##### STEPS TO REPRODUCE just try to read some properties through `lookup` ``` - name: lookup vars: property_value: "{{ lookup('ini', 'some.java.property type=properties file=config.properties') }}" debug: msg="{{ property_value }}" ``` ##### EXPECTED RESULTS read correct value from property file ##### ACTUAL RESULTS ``` File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/ConfigParser.py", line 512, in _read raise MissingSectionHeaderError(fpname, lineno, line) MissingSectionHeaderError: File contains no section headers. file: /config.properties, line: 3 'environment=dev\n' fatal: [localhost]: FAILED! => {"failed": true, "msg": "Unexpected failure during module execution.", "stdout": ""} NO MORE HOSTS LEFT ************************************************************* to retry, use: --limit @test.retry PLAY RECAP ********************************************************************* localhost : ok=1 changed=0 unreachable=0 failed=1 ``` By the way, it would be great to implement 'autofill' properties ``` host=127.0.0.1 api.host=${host} ``` cc @jctanner
[ { "content": "# (c) 2015, Yannig Perre <yannig.perre(at)gmail.com>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nfrom io import StringIO\nimport os\nimport re\n\ntry:\n # python2\n import ConfigParser as configparser\nexcept ImportError:\n # python3\n import configparser\n\nfrom ansible.errors import AnsibleError\nfrom ansible.plugins.lookup import LookupBase\nfrom ansible.module_utils._text import to_bytes, to_text\n\n\ndef _parse_params(term):\n '''Safely split parameter term to preserve spaces'''\n\n keys = ['key', 'section', 'file', 're']\n params = {}\n for k in keys:\n params[k] = ''\n\n thiskey = 'key'\n for idp,phrase in enumerate(term.split()):\n for k in keys:\n if ('%s=' % k) in phrase:\n thiskey = k\n if idp == 0 or not params[thiskey]:\n params[thiskey] = phrase\n else:\n params[thiskey] += ' ' + phrase\n\n rparams = [params[x] for x in keys if params[x]]\n return rparams\n\n\nclass LookupModule(LookupBase):\n\n def read_properties(self, filename, key, dflt, is_regexp):\n config = StringIO()\n current_cfg_file = open(to_bytes(filename, errors='surrogate_or_strict'), 'rb')\n\n config.write(u'[java_properties]\\n' + to_text(current_cfg_file.read(), errors='surrogate_or_strict'))\n config.seek(0, os.SEEK_SET)\n self.cp.readfp(config)\n return self.get_value(key, 'java_properties', dflt, is_regexp)\n\n def read_ini(self, filename, key, section, dflt, is_regexp):\n self.cp.readfp(open(to_bytes(filename, errors='surrogate_or_strict')))\n return self.get_value(key, section, dflt, is_regexp)\n\n def get_value(self, key, section, dflt, is_regexp):\n # Retrieve all values from a section using a regexp\n if is_regexp:\n return [v for k, v in self.cp.items(section) if re.match(key, k)]\n value = None\n # Retrieve a single value\n try:\n value = self.cp.get(section, key)\n except configparser.NoOptionError:\n return dflt\n return value\n\n def run(self, terms, variables=None, **kwargs):\n\n basedir = self.get_basedir(variables)\n self.basedir = basedir\n self.cp = configparser.ConfigParser()\n\n ret = []\n for term in terms:\n params = _parse_params(term)\n key = params[0]\n\n paramvals = {\n 'file' : 'ansible.ini',\n 're' : False,\n 'default' : None,\n 'section' : \"global\",\n 'type' : \"ini\",\n }\n\n # parameters specified?\n try:\n for param in params[1:]:\n name, value = param.split('=')\n assert(name in paramvals)\n paramvals[name] = value\n except (ValueError, AssertionError) as e:\n raise AnsibleError(e)\n\n path = self.find_file_in_search_path(variables, 'files', paramvals['file'])\n if paramvals['type'] == \"properties\":\n var = self.read_properties(path, key, paramvals['default'], paramvals['re'])\n else:\n var = self.read_ini(path, key, paramvals['section'], paramvals['default'], paramvals['re'])\n if var is not None:\n if type(var) is list:\n for v in var:\n ret.append(v)\n else:\n ret.append(var)\n return ret\n", "path": "lib/ansible/plugins/lookup/ini.py" } ]
[ { "content": "# (c) 2015, Yannig Perre <yannig.perre(at)gmail.com>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nfrom io import StringIO\nimport os\nimport re\n\ntry:\n # python2\n import ConfigParser as configparser\nexcept ImportError:\n # python3\n import configparser\n\nfrom ansible.errors import AnsibleError\nfrom ansible.plugins.lookup import LookupBase\nfrom ansible.module_utils._text import to_bytes, to_text\n\n\ndef _parse_params(term):\n '''Safely split parameter term to preserve spaces'''\n\n keys = ['key', 'type', 'section', 'file', 're']\n params = {}\n for k in keys:\n params[k] = ''\n\n thiskey = 'key'\n for idp,phrase in enumerate(term.split()):\n for k in keys:\n if ('%s=' % k) in phrase:\n thiskey = k\n if idp == 0 or not params[thiskey]:\n params[thiskey] = phrase\n else:\n params[thiskey] += ' ' + phrase\n\n rparams = [params[x] for x in keys if params[x]]\n return rparams\n\n\nclass LookupModule(LookupBase):\n\n def read_properties(self, filename, key, dflt, is_regexp):\n config = StringIO()\n current_cfg_file = open(to_bytes(filename, errors='surrogate_or_strict'), 'rb')\n\n config.write(u'[java_properties]\\n' + to_text(current_cfg_file.read(), errors='surrogate_or_strict'))\n config.seek(0, os.SEEK_SET)\n self.cp.readfp(config)\n return self.get_value(key, 'java_properties', dflt, is_regexp)\n\n def read_ini(self, filename, key, section, dflt, is_regexp):\n self.cp.readfp(open(to_bytes(filename, errors='surrogate_or_strict')))\n return self.get_value(key, section, dflt, is_regexp)\n\n def get_value(self, key, section, dflt, is_regexp):\n # Retrieve all values from a section using a regexp\n if is_regexp:\n return [v for k, v in self.cp.items(section) if re.match(key, k)]\n value = None\n # Retrieve a single value\n try:\n value = self.cp.get(section, key)\n except configparser.NoOptionError:\n return dflt\n return value\n\n def run(self, terms, variables=None, **kwargs):\n\n basedir = self.get_basedir(variables)\n self.basedir = basedir\n self.cp = configparser.ConfigParser()\n\n ret = []\n for term in terms:\n params = _parse_params(term)\n key = params[0]\n\n paramvals = {\n 'file' : 'ansible.ini',\n 're' : False,\n 'default' : None,\n 'section' : \"global\",\n 'type' : \"ini\",\n }\n\n # parameters specified?\n try:\n for param in params[1:]:\n name, value = param.split('=')\n assert(name in paramvals)\n paramvals[name] = value\n except (ValueError, AssertionError) as e:\n raise AnsibleError(e)\n\n path = self.find_file_in_search_path(variables, 'files', paramvals['file'])\n if paramvals['type'] == \"properties\":\n var = self.read_properties(path, key, paramvals['default'], paramvals['re'])\n else:\n var = self.read_ini(path, key, paramvals['section'], paramvals['default'], paramvals['re'])\n if var is not None:\n if type(var) is list:\n for v in var:\n ret.append(v)\n else:\n ret.append(var)\n return ret\n", "path": "lib/ansible/plugins/lookup/ini.py" } ]
diff --git a/lib/ansible/plugins/lookup/ini.py b/lib/ansible/plugins/lookup/ini.py index cbe0d13e6d5d83..11900c0d089523 100644 --- a/lib/ansible/plugins/lookup/ini.py +++ b/lib/ansible/plugins/lookup/ini.py @@ -36,7 +36,7 @@ def _parse_params(term): '''Safely split parameter term to preserve spaces''' - keys = ['key', 'section', 'file', 're'] + keys = ['key', 'type', 'section', 'file', 're'] params = {} for k in keys: params[k] = ''
beetbox__beets-535
mpdstats: last_played is documented but not implemented As pointed out [on the mailing list](https://groups.google.com/d/msg/beets-users/VW0pxtCVZG4/sq9gGsNS9zEJ), the mpdstats plugin (paging @pscn and @kljohann) does not seem to set the `last_played` field, even though the field is described in [the plugin's docs](http://beets.readthedocs.org/en/v1.3.2/plugins/mpdstats.html). Grepping in mpdstats.py for "last_played" shows that doesn't seem to be implemented. We should probably either add it to the plugin or remove it from the docs.
[ { "content": "# coding=utf-8\n# This file is part of beets.\n# Copyright 2013, Peter Schnebel and Johann Klähn.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\nimport logging\nimport mpd\nimport socket\nimport select\nimport time\nimport os\n\nfrom beets import ui\nfrom beets import config\nfrom beets import plugins\nfrom beets import library\nfrom beets.util import displayable_path\n\nlog = logging.getLogger('beets')\n\n# If we lose the connection, how many times do we want to retry and how\n# much time should we wait between retries?\nRETRIES = 10\nRETRY_INTERVAL = 5\n\n\ndef is_url(path):\n \"\"\"Try to determine if the path is an URL.\n \"\"\"\n return path.split('://', 1)[0] in ['http', 'https']\n\n\n# Use the MPDClient internals to get unicode.\n# see http://www.tarmack.eu/code/mpdunicode.py for the general idea\nclass MPDClient(mpd.MPDClient):\n def _write_command(self, command, args=[]):\n args = [unicode(arg).encode('utf-8') for arg in args]\n super(MPDClient, self)._write_command(command, args)\n\n def _read_line(self):\n line = super(MPDClient, self)._read_line()\n if line is not None:\n return line.decode('utf-8')\n return None\n\n\nclass MPDClientWrapper(object):\n def __init__(self):\n self.music_directory = (\n config['mpdstats']['music_directory'].get(unicode))\n\n self.client = MPDClient()\n\n def connect(self):\n \"\"\"Connect to the MPD.\n \"\"\"\n host = config['mpd']['host'].get(unicode)\n port = config['mpd']['port'].get(int)\n\n if host[0] in ['/', '~']:\n host = os.path.expanduser(host)\n\n log.info(u'mpdstats: connecting to {0}:{1}'.format(host, port))\n try:\n self.client.connect(host, port)\n except socket.error as e:\n raise ui.UserError('could not connect to MPD: {0}'.format(e))\n\n password = config['mpd']['password'].get(unicode)\n if password:\n try:\n self.client.password(password)\n except mpd.CommandError as e:\n raise ui.UserError(\n 'could not authenticate to MPD: {0}'.format(e)\n )\n\n def disconnect(self):\n \"\"\"Disconnect from the MPD.\n \"\"\"\n self.client.close()\n self.client.disconnect()\n\n def get(self, command, retries=RETRIES):\n \"\"\"Wrapper for requests to the MPD server. Tries to re-connect if the\n connection was lost (f.ex. during MPD's library refresh).\n \"\"\"\n try:\n return getattr(self.client, command)()\n except (select.error, mpd.ConnectionError) as err:\n log.error(u'mpdstats: {0}'.format(err))\n\n if retries <= 0:\n # if we exited without breaking, we couldn't reconnect in time :(\n raise ui.UserError(u'communication with MPD server failed')\n\n time.sleep(RETRY_INTERVAL)\n\n try:\n self.disconnect()\n except mpd.ConnectionError:\n pass\n\n self.connect()\n return self.get(command, retries=retries - 1)\n\n def playlist(self):\n \"\"\"Return the currently active playlist. Prefixes paths with the\n music_directory, to get the absolute path.\n \"\"\"\n result = {}\n for entry in self.get('playlistinfo'):\n if not is_url(entry['file']):\n result[entry['id']] = os.path.join(\n self.music_directory, entry['file'])\n else:\n result[entry['id']] = entry['file']\n return result\n\n def status(self):\n \"\"\"Return the current status of the MPD.\n \"\"\"\n return self.get('status')\n\n def events(self):\n \"\"\"Return list of events. This may block a long time while waiting for\n an answer from MPD.\n \"\"\"\n return self.get('idle')\n\n\nclass MPDStats(object):\n def __init__(self, lib):\n self.lib = lib\n\n self.do_rating = config['mpdstats']['rating'].get(bool)\n self.rating_mix = config['mpdstats']['rating_mix'].get(float)\n self.time_threshold = 10.0 # TODO: maybe add config option?\n\n self.now_playing = None\n self.mpd = MPDClientWrapper()\n\n def rating(self, play_count, skip_count, rating, skipped):\n \"\"\"Calculate a new rating for a song based on play count, skip count,\n old rating and the fact if it was skipped or not.\n \"\"\"\n if skipped:\n rolling = (rating - rating / 2.0)\n else:\n rolling = (rating + (1.0 - rating) / 2.0)\n stable = (play_count + 1.0) / (play_count + skip_count + 2.0)\n return (self.rating_mix * stable\n + (1.0 - self.rating_mix) * rolling)\n\n def get_item(self, path):\n \"\"\"Return the beets item related to path.\n \"\"\"\n query = library.PathQuery('path', path)\n item = self.lib.items(query).get()\n if item:\n return item\n else:\n log.info(u'mpdstats: item not found: {0}'.format(\n displayable_path(path)\n ))\n\n @staticmethod\n def update_item(item, attribute, value=None, increment=None):\n \"\"\"Update the beets item. Set attribute to value or increment the value\n of attribute. If the increment argument is used the value is cast to the\n corresponding type.\n \"\"\"\n if item is None:\n return\n\n if increment is not None:\n item.load()\n value = type(increment)(item.get(attribute, 0)) + increment\n\n if value is not None:\n item[attribute] = value\n item.store()\n\n log.debug(u'mpdstats: updated: {0} = {1} [{2}]'.format(\n attribute,\n item[attribute],\n displayable_path(item.path),\n ))\n\n def update_rating(self, item, skipped):\n \"\"\"Update the rating for a beets item.\n \"\"\"\n item.load()\n rating = self.rating(\n int(item.get('play_count', 0)),\n int(item.get('skip_count', 0)),\n float(item.get('rating', 0.5)),\n skipped)\n\n self.update_item(item, 'rating', rating)\n\n def handle_song_change(self, song):\n \"\"\"Determine if a song was skipped or not and update its attributes.\n To this end the difference between the song's supposed end time\n and the current time is calculated. If it's greater than a threshold,\n the song is considered skipped.\n \"\"\"\n diff = abs(song['remaining'] - (time.time() - song['started']))\n\n skipped = diff >= self.time_threshold\n\n if skipped:\n self.handle_skipped(song)\n else:\n self.handle_played(song)\n\n if self.do_rating:\n self.update_rating(song['beets_item'], skipped)\n\n def handle_played(self, song):\n \"\"\"Updates the play count of a song.\n \"\"\"\n self.update_item(song['beets_item'], 'play_count', increment=1)\n log.info(u'mpdstats: played {0}'.format(\n displayable_path(song['path'])\n ))\n\n def handle_skipped(self, song):\n \"\"\"Updates the skip count of a song.\n \"\"\"\n self.update_item(song['beets_item'], 'skip_count', increment=1)\n log.info(u'mpdstats: skipped {0}'.format(\n displayable_path(song['path'])\n ))\n\n def on_stop(self, status):\n log.info(u'mpdstats: stop')\n self.now_playing = None\n\n def on_pause(self, status):\n log.info(u'mpdstats: pause')\n self.now_playing = None\n\n def on_play(self, status):\n playlist = self.mpd.playlist()\n path = playlist.get(status['songid'])\n\n if not path:\n return\n\n if is_url(path):\n log.info(u'mpdstats: playing stream {0}'.format(\n displayable_path(path)\n ))\n return\n\n played, duration = map(int, status['time'].split(':', 1))\n remaining = duration - played\n\n if self.now_playing and self.now_playing['path'] != path:\n self.handle_song_change(self.now_playing)\n\n log.info(u'mpdstats: playing {0}'.format(\n displayable_path(path)\n ))\n\n self.now_playing = {\n 'started': time.time(),\n 'remaining': remaining,\n 'path': path,\n 'beets_item': self.get_item(path),\n }\n\n def run(self):\n self.mpd.connect()\n events = ['player']\n\n while True:\n if 'player' in events:\n status = self.mpd.status()\n\n handler = getattr(self, 'on_' + status['state'], None)\n\n if handler:\n handler(status)\n else:\n log.debug(u'mpdstats: unhandled status \"{0}\"'.format(status))\n\n events = self.mpd.events()\n\n\nclass MPDStatsPlugin(plugins.BeetsPlugin):\n def __init__(self):\n super(MPDStatsPlugin, self).__init__()\n self.config.add({\n 'music_directory': config['directory'].as_filename(),\n 'rating': True,\n 'rating_mix': 0.75,\n })\n config['mpd'].add({\n 'host': u'localhost',\n 'port': 6600,\n 'password': u'',\n })\n\n def commands(self):\n cmd = ui.Subcommand(\n 'mpdstats',\n help='run a MPD client to gather play statistics')\n cmd.parser.add_option(\n '--host', dest='host', type='string',\n help='set the hostname of the server to connect to')\n cmd.parser.add_option(\n '--port', dest='port', type='int',\n help='set the port of the MPD server to connect to')\n cmd.parser.add_option(\n '--password', dest='password', type='string',\n help='set the password of the MPD server to connect to')\n\n def func(lib, opts, args):\n self.config.set_args(opts)\n\n # Overrides for MPD settings.\n if opts.host:\n config['mpd']['host'] = opts.host.decode('utf8')\n if opts.port:\n config['mpd']['host'] = int(opts.port)\n if opts.password:\n config['mpd']['password'] = opts.password.decode('utf8')\n\n try:\n MPDStats(lib).run()\n except KeyboardInterrupt:\n pass\n\n cmd.func = func\n return [cmd]\n", "path": "beetsplug/mpdstats.py" } ]
[ { "content": "# coding=utf-8\n# This file is part of beets.\n# Copyright 2013, Peter Schnebel and Johann Klähn.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\nimport logging\nimport mpd\nimport socket\nimport select\nimport time\nimport os\n\nfrom beets import ui\nfrom beets import config\nfrom beets import plugins\nfrom beets import library\nfrom beets.util import displayable_path\n\nlog = logging.getLogger('beets')\n\n# If we lose the connection, how many times do we want to retry and how\n# much time should we wait between retries?\nRETRIES = 10\nRETRY_INTERVAL = 5\n\n\ndef is_url(path):\n \"\"\"Try to determine if the path is an URL.\n \"\"\"\n return path.split('://', 1)[0] in ['http', 'https']\n\n\n# Use the MPDClient internals to get unicode.\n# see http://www.tarmack.eu/code/mpdunicode.py for the general idea\nclass MPDClient(mpd.MPDClient):\n def _write_command(self, command, args=[]):\n args = [unicode(arg).encode('utf-8') for arg in args]\n super(MPDClient, self)._write_command(command, args)\n\n def _read_line(self):\n line = super(MPDClient, self)._read_line()\n if line is not None:\n return line.decode('utf-8')\n return None\n\n\nclass MPDClientWrapper(object):\n def __init__(self):\n self.music_directory = (\n config['mpdstats']['music_directory'].get(unicode))\n\n self.client = MPDClient()\n\n def connect(self):\n \"\"\"Connect to the MPD.\n \"\"\"\n host = config['mpd']['host'].get(unicode)\n port = config['mpd']['port'].get(int)\n\n if host[0] in ['/', '~']:\n host = os.path.expanduser(host)\n\n log.info(u'mpdstats: connecting to {0}:{1}'.format(host, port))\n try:\n self.client.connect(host, port)\n except socket.error as e:\n raise ui.UserError('could not connect to MPD: {0}'.format(e))\n\n password = config['mpd']['password'].get(unicode)\n if password:\n try:\n self.client.password(password)\n except mpd.CommandError as e:\n raise ui.UserError(\n 'could not authenticate to MPD: {0}'.format(e)\n )\n\n def disconnect(self):\n \"\"\"Disconnect from the MPD.\n \"\"\"\n self.client.close()\n self.client.disconnect()\n\n def get(self, command, retries=RETRIES):\n \"\"\"Wrapper for requests to the MPD server. Tries to re-connect if the\n connection was lost (f.ex. during MPD's library refresh).\n \"\"\"\n try:\n return getattr(self.client, command)()\n except (select.error, mpd.ConnectionError) as err:\n log.error(u'mpdstats: {0}'.format(err))\n\n if retries <= 0:\n # if we exited without breaking, we couldn't reconnect in time :(\n raise ui.UserError(u'communication with MPD server failed')\n\n time.sleep(RETRY_INTERVAL)\n\n try:\n self.disconnect()\n except mpd.ConnectionError:\n pass\n\n self.connect()\n return self.get(command, retries=retries - 1)\n\n def playlist(self):\n \"\"\"Return the currently active playlist. Prefixes paths with the\n music_directory, to get the absolute path.\n \"\"\"\n result = {}\n for entry in self.get('playlistinfo'):\n if not is_url(entry['file']):\n result[entry['id']] = os.path.join(\n self.music_directory, entry['file'])\n else:\n result[entry['id']] = entry['file']\n return result\n\n def status(self):\n \"\"\"Return the current status of the MPD.\n \"\"\"\n return self.get('status')\n\n def events(self):\n \"\"\"Return list of events. This may block a long time while waiting for\n an answer from MPD.\n \"\"\"\n return self.get('idle')\n\n\nclass MPDStats(object):\n def __init__(self, lib):\n self.lib = lib\n\n self.do_rating = config['mpdstats']['rating'].get(bool)\n self.rating_mix = config['mpdstats']['rating_mix'].get(float)\n self.time_threshold = 10.0 # TODO: maybe add config option?\n\n self.now_playing = None\n self.mpd = MPDClientWrapper()\n\n def rating(self, play_count, skip_count, rating, skipped):\n \"\"\"Calculate a new rating for a song based on play count, skip count,\n old rating and the fact if it was skipped or not.\n \"\"\"\n if skipped:\n rolling = (rating - rating / 2.0)\n else:\n rolling = (rating + (1.0 - rating) / 2.0)\n stable = (play_count + 1.0) / (play_count + skip_count + 2.0)\n return (self.rating_mix * stable\n + (1.0 - self.rating_mix) * rolling)\n\n def get_item(self, path):\n \"\"\"Return the beets item related to path.\n \"\"\"\n query = library.PathQuery('path', path)\n item = self.lib.items(query).get()\n if item:\n return item\n else:\n log.info(u'mpdstats: item not found: {0}'.format(\n displayable_path(path)\n ))\n\n @staticmethod\n def update_item(item, attribute, value=None, increment=None):\n \"\"\"Update the beets item. Set attribute to value or increment the value\n of attribute. If the increment argument is used the value is cast to the\n corresponding type.\n \"\"\"\n if item is None:\n return\n\n if increment is not None:\n item.load()\n value = type(increment)(item.get(attribute, 0)) + increment\n\n if value is not None:\n item[attribute] = value\n item.store()\n\n log.debug(u'mpdstats: updated: {0} = {1} [{2}]'.format(\n attribute,\n item[attribute],\n displayable_path(item.path),\n ))\n\n def update_rating(self, item, skipped):\n \"\"\"Update the rating for a beets item.\n \"\"\"\n item.load()\n rating = self.rating(\n int(item.get('play_count', 0)),\n int(item.get('skip_count', 0)),\n float(item.get('rating', 0.5)),\n skipped)\n\n self.update_item(item, 'rating', rating)\n\n def handle_song_change(self, song):\n \"\"\"Determine if a song was skipped or not and update its attributes.\n To this end the difference between the song's supposed end time\n and the current time is calculated. If it's greater than a threshold,\n the song is considered skipped.\n \"\"\"\n diff = abs(song['remaining'] - (time.time() - song['started']))\n\n skipped = diff >= self.time_threshold\n\n if skipped:\n self.handle_skipped(song)\n else:\n self.handle_played(song)\n\n if self.do_rating:\n self.update_rating(song['beets_item'], skipped)\n\n def handle_played(self, song):\n \"\"\"Updates the play count of a song.\n \"\"\"\n self.update_item(song['beets_item'], 'play_count', increment=1)\n log.info(u'mpdstats: played {0}'.format(\n displayable_path(song['path'])\n ))\n\n def handle_skipped(self, song):\n \"\"\"Updates the skip count of a song.\n \"\"\"\n self.update_item(song['beets_item'], 'skip_count', increment=1)\n log.info(u'mpdstats: skipped {0}'.format(\n displayable_path(song['path'])\n ))\n\n def on_stop(self, status):\n log.info(u'mpdstats: stop')\n self.now_playing = None\n\n def on_pause(self, status):\n log.info(u'mpdstats: pause')\n self.now_playing = None\n\n def on_play(self, status):\n playlist = self.mpd.playlist()\n path = playlist.get(status['songid'])\n\n if not path:\n return\n\n if is_url(path):\n log.info(u'mpdstats: playing stream {0}'.format(\n displayable_path(path)\n ))\n return\n\n played, duration = map(int, status['time'].split(':', 1))\n remaining = duration - played\n\n if self.now_playing and self.now_playing['path'] != path:\n self.handle_song_change(self.now_playing)\n\n log.info(u'mpdstats: playing {0}'.format(\n displayable_path(path)\n ))\n\n self.now_playing = {\n 'started': time.time(),\n 'remaining': remaining,\n 'path': path,\n 'beets_item': self.get_item(path),\n }\n\n self.update_item(self.now_playing['beets_item'],\n 'last_played', value=int(time.time()))\n\n def run(self):\n self.mpd.connect()\n events = ['player']\n\n while True:\n if 'player' in events:\n status = self.mpd.status()\n\n handler = getattr(self, 'on_' + status['state'], None)\n\n if handler:\n handler(status)\n else:\n log.debug(u'mpdstats: unhandled status \"{0}\"'.format(status))\n\n events = self.mpd.events()\n\n\nclass MPDStatsPlugin(plugins.BeetsPlugin):\n def __init__(self):\n super(MPDStatsPlugin, self).__init__()\n self.config.add({\n 'music_directory': config['directory'].as_filename(),\n 'rating': True,\n 'rating_mix': 0.75,\n })\n config['mpd'].add({\n 'host': u'localhost',\n 'port': 6600,\n 'password': u'',\n })\n\n def commands(self):\n cmd = ui.Subcommand(\n 'mpdstats',\n help='run a MPD client to gather play statistics')\n cmd.parser.add_option(\n '--host', dest='host', type='string',\n help='set the hostname of the server to connect to')\n cmd.parser.add_option(\n '--port', dest='port', type='int',\n help='set the port of the MPD server to connect to')\n cmd.parser.add_option(\n '--password', dest='password', type='string',\n help='set the password of the MPD server to connect to')\n\n def func(lib, opts, args):\n self.config.set_args(opts)\n\n # Overrides for MPD settings.\n if opts.host:\n config['mpd']['host'] = opts.host.decode('utf8')\n if opts.port:\n config['mpd']['host'] = int(opts.port)\n if opts.password:\n config['mpd']['password'] = opts.password.decode('utf8')\n\n try:\n MPDStats(lib).run()\n except KeyboardInterrupt:\n pass\n\n cmd.func = func\n return [cmd]\n", "path": "beetsplug/mpdstats.py" } ]
diff --git a/beetsplug/mpdstats.py b/beetsplug/mpdstats.py index e2f990f447..04355fb31e 100644 --- a/beetsplug/mpdstats.py +++ b/beetsplug/mpdstats.py @@ -281,6 +281,9 @@ def on_play(self, status): 'beets_item': self.get_item(path), } + self.update_item(self.now_playing['beets_item'], + 'last_played', value=int(time.time())) + def run(self): self.mpd.connect() events = ['player']
spotify__luigi-2679
Is there a reason python-dateutil is pinned to v2.7.5? In this [commit](https://github.com/spotify/luigi/commit/ca0aa9afedecda539339e51974ef38cecf180d4b), I can see that python-dateutil has been pinned to version 2.7.5 - is this strictly necessary? Version 2.8.0 was released a couple of weeks ago and It's causing `ContextualVersionConflict` errors for us.
[ { "content": "# Copyright (c) 2012 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\nimport os\nimport sys\n\nfrom setuptools import setup\n\n\ndef get_static_files(path):\n return [os.path.join(dirpath.replace(\"luigi/\", \"\"), ext)\n for (dirpath, dirnames, filenames) in os.walk(path)\n for ext in [\"*.html\", \"*.js\", \"*.css\", \"*.png\",\n \"*.eot\", \"*.svg\", \"*.ttf\", \"*.woff\", \"*.woff2\"]]\n\n\nluigi_package_data = sum(map(get_static_files, [\"luigi/static\", \"luigi/templates\"]), [])\n\nreadme_note = \"\"\"\\\n.. note::\n\n For the latest source, discussion, etc, please visit the\n `GitHub repository <https://github.com/spotify/luigi>`_\\n\\n\n\"\"\"\n\nwith open('README.rst') as fobj:\n long_description = readme_note + fobj.read()\n\ninstall_requires = [\n 'tornado>=4.0,<5',\n # https://pagure.io/python-daemon/issue/18\n 'python-daemon<2.2.0',\n 'python-dateutil==2.7.5',\n]\n\n# Note: To support older versions of setuptools, we're explicitly not\n# using conditional syntax (i.e. 'enum34>1.1.0;python_version<\"3.4\"').\n# This syntax is a problem for setuptools as recent as `20.1.1`,\n# published Feb 16, 2016.\nif sys.version_info[:2] < (3, 4):\n install_requires.append('enum34>1.1.0')\n\nif os.environ.get('READTHEDOCS', None) == 'True':\n # So that we can build documentation for luigi.db_task_history and luigi.contrib.sqla\n install_requires.append('sqlalchemy')\n # readthedocs don't like python-daemon, see #1342\n install_requires.remove('python-daemon<2.2.0')\n install_requires.append('sphinx>=1.4.4') # Value mirrored in doc/conf.py\n\nsetup(\n name='luigi',\n version='2.8.3',\n description='Workflow mgmgt + task scheduling + dependency resolution',\n long_description=long_description,\n author='The Luigi Authors',\n url='https://github.com/spotify/luigi',\n license='Apache License 2.0',\n packages=[\n 'luigi',\n 'luigi.configuration',\n 'luigi.contrib',\n 'luigi.contrib.hdfs',\n 'luigi.tools'\n ],\n package_data={\n 'luigi': luigi_package_data\n },\n entry_points={\n 'console_scripts': [\n 'luigi = luigi.cmdline:luigi_run',\n 'luigid = luigi.cmdline:luigid',\n 'luigi-grep = luigi.tools.luigi_grep:main',\n 'luigi-deps = luigi.tools.deps:main',\n 'luigi-deps-tree = luigi.tools.deps_tree:main'\n ]\n },\n install_requires=install_requires,\n extras_require={\n 'toml': ['toml<2.0.0'],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: System :: Monitoring',\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "# Copyright (c) 2012 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\nimport os\nimport sys\n\nfrom setuptools import setup\n\n\ndef get_static_files(path):\n return [os.path.join(dirpath.replace(\"luigi/\", \"\"), ext)\n for (dirpath, dirnames, filenames) in os.walk(path)\n for ext in [\"*.html\", \"*.js\", \"*.css\", \"*.png\",\n \"*.eot\", \"*.svg\", \"*.ttf\", \"*.woff\", \"*.woff2\"]]\n\n\nluigi_package_data = sum(map(get_static_files, [\"luigi/static\", \"luigi/templates\"]), [])\n\nreadme_note = \"\"\"\\\n.. note::\n\n For the latest source, discussion, etc, please visit the\n `GitHub repository <https://github.com/spotify/luigi>`_\\n\\n\n\"\"\"\n\nwith open('README.rst') as fobj:\n long_description = readme_note + fobj.read()\n\ninstall_requires = [\n 'tornado>=4.0,<5',\n # https://pagure.io/python-daemon/issue/18\n 'python-daemon<2.2.0',\n 'python-dateutil>=2.7.5,<3',\n]\n\n# Note: To support older versions of setuptools, we're explicitly not\n# using conditional syntax (i.e. 'enum34>1.1.0;python_version<\"3.4\"').\n# This syntax is a problem for setuptools as recent as `20.1.1`,\n# published Feb 16, 2016.\nif sys.version_info[:2] < (3, 4):\n install_requires.append('enum34>1.1.0')\n\nif os.environ.get('READTHEDOCS', None) == 'True':\n # So that we can build documentation for luigi.db_task_history and luigi.contrib.sqla\n install_requires.append('sqlalchemy')\n # readthedocs don't like python-daemon, see #1342\n install_requires.remove('python-daemon<2.2.0')\n install_requires.append('sphinx>=1.4.4') # Value mirrored in doc/conf.py\n\nsetup(\n name='luigi',\n version='2.8.3',\n description='Workflow mgmgt + task scheduling + dependency resolution',\n long_description=long_description,\n author='The Luigi Authors',\n url='https://github.com/spotify/luigi',\n license='Apache License 2.0',\n packages=[\n 'luigi',\n 'luigi.configuration',\n 'luigi.contrib',\n 'luigi.contrib.hdfs',\n 'luigi.tools'\n ],\n package_data={\n 'luigi': luigi_package_data\n },\n entry_points={\n 'console_scripts': [\n 'luigi = luigi.cmdline:luigi_run',\n 'luigid = luigi.cmdline:luigid',\n 'luigi-grep = luigi.tools.luigi_grep:main',\n 'luigi-deps = luigi.tools.deps:main',\n 'luigi-deps-tree = luigi.tools.deps_tree:main'\n ]\n },\n install_requires=install_requires,\n extras_require={\n 'toml': ['toml<2.0.0'],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: System :: Monitoring',\n ],\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 03a22f485c..75b7fe13bc 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ def get_static_files(path): 'tornado>=4.0,<5', # https://pagure.io/python-daemon/issue/18 'python-daemon<2.2.0', - 'python-dateutil==2.7.5', + 'python-dateutil>=2.7.5,<3', ] # Note: To support older versions of setuptools, we're explicitly not
dask__distributed-1170
self.workers.remove(w) fails trying to remove worker. Probably a nit, but this code: ```python import dask.array as da from distributed import (Client, LocalCluster) import numpy as np cluster = LocalCluster() client = Client(cluster.scheduler_address) ones = da.ones(shape=(1000,1000), chunks=(100,100), dtype=np.float64) A = client.persist(ones) print A f = client.compute(A) print f.result() client.shutdown() cluster.close() ``` fails in the following way: ```bash dask.array<wrapped, shape=(1000, 1000), dtype=float64, chunksize=(100, 100)> [[ 1. 1. 1. ..., 1. 1. 1.] [ 1. 1. 1. ..., 1. 1. 1.] [ 1. 1. 1. ..., 1. 1. 1.] ..., [ 1. 1. 1. ..., 1. 1. 1.] [ 1. 1. 1. ..., 1. 1. 1.] [ 1. 1. 1. ..., 1. 1. 1.]] tornado.application - ERROR - Exception in callback <functools.partial object at 0x7f7b0b0f24c8> Traceback (most recent call last): File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 605, in _run_callback ret = callback() File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/stack_context.py", line 277, in null_wrapper return fn(*args, **kwargs) File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 626, in _discard_future_result future.result() File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/gen.py", line 1069, in run yielded = self.gen.send(value) File "/home/sperkins/work/ska/code/distributed/distributed/deploy/local.py", line 206, in _stop_worker self.workers.remove(w) ValueError: list.remove(x): x not in list tornado.application - ERROR - Exception in callback <functools.partial object at 0x7f7b0a012af8> Traceback (most recent call last): File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 605, in _run_callback ret = callback() File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/stack_context.py", line 277, in null_wrapper return fn(*args, **kwargs) File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 626, in _discard_future_result future.result() File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/gen.py", line 1069, in run yielded = self.gen.send(value) File "/home/sperkins/work/ska/code/distributed/distributed/deploy/local.py", line 206, in _stop_worker self.workers.remove(w) ValueError: list.remove(x): x not in list tornado.application - ERROR - Exception in callback <functools.partial object at 0x7f7b0a012db8> Traceback (most recent call last): File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 605, in _run_callback ret = callback() File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/stack_context.py", line 277, in null_wrapper return fn(*args, **kwargs) File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 626, in _discard_future_result future.result() File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/gen.py", line 1069, in run yielded = self.gen.send(value) File "/home/sperkins/work/ska/code/distributed/distributed/deploy/local.py", line 206, in _stop_worker self.workers.remove(w) ValueError: list.remove(x): x not in list tornado.application - ERROR - Exception in callback <functools.partial object at 0x7f7b0b12d158> Traceback (most recent call last): File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 605, in _run_callback ret = callback() File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/stack_context.py", line 277, in null_wrapper return fn(*args, **kwargs) File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 626, in _discard_future_result future.result() File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/gen.py", line 1069, in run yielded = self.gen.send(value) File "/home/sperkins/work/ska/code/distributed/distributed/deploy/local.py", line 206, in _stop_worker self.workers.remove(w) ValueError: list.remove(x): x not in list tornado.application - ERROR - Exception in callback <functools.partial object at 0x7f7b0a021e68> Traceback (most recent call last): File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 605, in _run_callback ret = callback() File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/stack_context.py", line 277, in null_wrapper return fn(*args, **kwargs) File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 626, in _discard_future_result future.result() File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/gen.py", line 1069, in run yielded = self.gen.send(value) File "/home/sperkins/work/ska/code/distributed/distributed/deploy/local.py", line 206, in _stop_worker self.workers.remove(w) ValueError: list.remove(x): x not in list tornado.application - ERROR - Exception in callback <functools.partial object at 0x7f7b09f70368> Traceback (most recent call last): File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 605, in _run_callback ret = callback() File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/stack_context.py", line 277, in null_wrapper return fn(*args, **kwargs) File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 626, in _discard_future_result future.result() File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/gen.py", line 1069, in run yielded = self.gen.send(value) File "/home/sperkins/work/ska/code/distributed/distributed/deploy/local.py", line 206, in _stop_worker self.workers.remove(w) ValueError: list.remove(x): x not in list tornado.application - ERROR - Exception in callback <functools.partial object at 0x7f7b0b0f28e8> Traceback (most recent call last): File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 605, in _run_callback ret = callback() File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/stack_context.py", line 277, in null_wrapper return fn(*args, **kwargs) File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 626, in _discard_future_result future.result() File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/gen.py", line 1069, in run yielded = self.gen.send(value) File "/home/sperkins/work/ska/code/distributed/distributed/deploy/local.py", line 206, in _stop_worker self.workers.remove(w) ValueError: list.remove(x): x not in list tornado.application - ERROR - Exception in callback <functools.partial object at 0x7f7b0b0f2d08> Traceback (most recent call last): File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 605, in _run_callback ret = callback() File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/stack_context.py", line 277, in null_wrapper return fn(*args, **kwargs) File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/ioloop.py", line 626, in _discard_future_result future.result() File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/concurrent.py", line 238, in result raise_exc_info(self._exc_info) File "/home/sperkins/venv/mb/local/lib/python2.7/site-packages/tornado/gen.py", line 1069, in run yielded = self.gen.send(value) File "/home/sperkins/work/ska/code/distributed/distributed/deploy/local.py", line 206, in _stop_worker self.workers.remove(w) ValueError: list.remove(x): x not in list ```
[ { "content": "from __future__ import print_function, division, absolute_import\n\nimport atexit\nimport logging\nimport math\nfrom threading import Thread\nfrom time import sleep\nimport warnings\nimport weakref\n\nfrom tornado import gen\nfrom tornado.ioloop import IOLoop\n\nfrom ..core import CommClosedError\nfrom ..utils import sync, ignoring, All, silence_logging\nfrom ..nanny import Nanny\nfrom ..scheduler import Scheduler\nfrom ..worker import Worker, _ncores\n\nlogger = logging.getLogger(__name__)\n\n\nclass LocalCluster(object):\n \"\"\" Create local Scheduler and Workers\n\n This creates a \"cluster\" of a scheduler and workers running on the local\n machine.\n\n Parameters\n ----------\n n_workers: int\n Number of workers to start\n processes: bool\n Whether to use processes (True) or threads (False). Defaults to True\n threads_per_worker: int\n Number of threads per each worker\n scheduler_port: int\n Port of the scheduler. 8786 by default, use 0 to choose a random port\n silence_logs: logging level\n Level of logs to print out to stdout. ``logging.CRITICAL`` by default.\n Use a falsey value like False or None for no change.\n ip: string\n IP address on which the scheduler will listen, defaults to only localhost\n kwargs: dict\n Extra worker arguments, will be passed to the Worker constructor.\n\n Examples\n --------\n >>> c = LocalCluster() # Create a local cluster with as many workers as cores # doctest: +SKIP\n >>> c # doctest: +SKIP\n LocalCluster(\"127.0.0.1:8786\", workers=8, ncores=8)\n\n >>> c = Client(c) # connect to local cluster # doctest: +SKIP\n\n Add a new worker to the cluster\n >>> w = c.start_worker(ncores=2) # doctest: +SKIP\n\n Shut down the extra worker\n >>> c.remove_worker(w) # doctest: +SKIP\n \"\"\"\n def __init__(self, n_workers=None, threads_per_worker=None, processes=True,\n loop=None, start=True, ip=None, scheduler_port=0,\n silence_logs=logging.CRITICAL, diagnostics_port=8787,\n services={}, worker_services={}, nanny=None, **worker_kwargs):\n if nanny is not None:\n warnings.warning(\"nanny has been deprecated, used processes=\")\n processes = nanny\n self.status = None\n self.processes = processes\n self.silence_logs = silence_logs\n if silence_logs:\n silence_logging(level=silence_logs)\n if n_workers is None and threads_per_worker is None:\n if processes:\n n_workers = _ncores\n threads_per_worker = 1\n else:\n n_workers = 1\n threads_per_worker = _ncores\n if n_workers is None and threads_per_worker is not None:\n n_workers = max(1, _ncores // threads_per_worker)\n if n_workers and threads_per_worker is None:\n # Overcommit threads per worker, rather than undercommit\n threads_per_worker = max(1, int(math.ceil(_ncores / n_workers)))\n\n self.loop = loop or IOLoop()\n if start and not self.loop._running:\n self._thread = Thread(target=self.loop.start,\n name=\"LocalCluster loop\")\n self._thread.daemon = True\n self._thread.start()\n while not self.loop._running:\n sleep(0.001)\n\n if diagnostics_port is not None:\n try:\n from distributed.bokeh.scheduler import BokehScheduler\n from distributed.bokeh.worker import BokehWorker\n except ImportError:\n logger.debug(\"To start diagnostics web server please install Bokeh\")\n else:\n services[('bokeh', diagnostics_port)] = BokehScheduler\n worker_services[('bokeh', 0)] = BokehWorker\n\n self.scheduler = Scheduler(loop=self.loop,\n services=services)\n self.scheduler_port = scheduler_port\n\n self.workers = []\n self.n_workers = n_workers\n self.threads_per_worker = threads_per_worker\n self.worker_services = worker_services\n self.worker_kwargs = worker_kwargs\n\n if start:\n sync(self.loop, self._start, ip)\n\n clusters_to_close.add(self)\n\n def __str__(self):\n return ('LocalCluster(%r, workers=%d, ncores=%d)' %\n (self.scheduler_address, len(self.workers),\n sum(w.ncores for w in self.workers))\n )\n\n __repr__ = __str__\n\n @gen.coroutine\n def _start(self, ip=None):\n \"\"\"\n Start all cluster services.\n Wait on this if you passed `start=False` to the LocalCluster\n constructor.\n \"\"\"\n if self.status == 'running':\n return\n if ip is None and not self.scheduler_port and not self.processes:\n # Use inproc transport for optimization\n scheduler_address = 'inproc://'\n else:\n if ip is None:\n ip = '127.0.0.1'\n scheduler_address = (ip, self.scheduler_port)\n self.scheduler.start(scheduler_address)\n\n yield self._start_all_workers(\n self.n_workers, ncores=self.threads_per_worker,\n services=self.worker_services, **self.worker_kwargs)\n\n self.status = 'running'\n\n @gen.coroutine\n def _start_all_workers(self, n_workers, **kwargs):\n yield [self._start_worker(**kwargs) for i in range(n_workers)]\n\n @gen.coroutine\n def _start_worker(self, port=0, processes=None, death_timeout=60, **kwargs):\n if processes is not None:\n raise ValueError(\"overriding `processes` for individual workers \"\n \"in a LocalCluster is not supported anymore\")\n if port:\n raise ValueError(\"overriding `port` for individual workers \"\n \"in a LocalCluster is not supported anymore\")\n if self.processes:\n W = Nanny\n kwargs['quiet'] = True\n else:\n W = Worker\n\n w = W(self.scheduler.address, loop=self.loop,\n death_timeout=death_timeout,\n silence_logs=self.silence_logs, **kwargs)\n yield w._start()\n\n self.workers.append(w)\n\n while w.worker_address not in self.scheduler.worker_info:\n yield gen.sleep(0.01)\n\n raise gen.Return(w)\n\n def start_worker(self, ncores=0, **kwargs):\n \"\"\" Add a new worker to the running cluster\n\n Parameters\n ----------\n port: int (optional)\n Port on which to serve the worker, defaults to 0 or random\n ncores: int (optional)\n Number of threads to use. Defaults to number of logical cores\n\n Examples\n --------\n >>> c = LocalCluster() # doctest: +SKIP\n >>> c.start_worker(ncores=2) # doctest: +SKIP\n\n Returns\n -------\n The created Worker or Nanny object. Can be discarded.\n \"\"\"\n return sync(self.loop, self._start_worker, ncores=ncores, **kwargs)\n\n @gen.coroutine\n def _stop_worker(self, w):\n yield w._close()\n self.workers.remove(w)\n\n def stop_worker(self, w):\n \"\"\" Stop a running worker\n\n Examples\n --------\n >>> c = LocalCluster() # doctest: +SKIP\n >>> w = c.start_worker(ncores=2) # doctest: +SKIP\n >>> c.stop_worker(w) # doctest: +SKIP\n \"\"\"\n sync(self.loop, self._stop_worker, w)\n\n @gen.coroutine\n def _close(self):\n if self.status == 'closed':\n return\n\n with ignoring(gen.TimeoutError, CommClosedError, OSError):\n yield All([w._close() for w in self.workers])\n with ignoring(gen.TimeoutError, CommClosedError, OSError):\n yield self.scheduler.close(fast=True)\n del self.workers[:]\n self.status = 'closed'\n\n def close(self):\n \"\"\" Close the cluster \"\"\"\n if self.status == 'closed':\n return\n\n for w in self.workers:\n self.loop.add_callback(self._stop_worker, w)\n for i in range(10):\n if not self.workers:\n break\n else:\n sleep(0.01)\n if self.loop._running:\n sync(self.loop, self._close)\n if hasattr(self, '_thread'):\n sync(self.loop, self.loop.stop)\n self._thread.join(timeout=1)\n self.loop.close()\n del self._thread\n\n @gen.coroutine\n def scale_up(self, n, **kwargs):\n \"\"\" Bring the total count of workers up to ``n``\n\n This function/coroutine should bring the total number of workers up to\n the number ``n``.\n\n This can be implemented either as a function or as a Tornado coroutine.\n \"\"\"\n yield [self._start_worker(**kwargs)\n for i in range(n - len(self.workers))]\n\n @gen.coroutine\n def scale_down(self, workers):\n \"\"\" Remove ``workers`` from the cluster\n\n Given a list of worker addresses this function should remove those\n workers from the cluster. This may require tracking which jobs are\n associated to which worker address.\n\n This can be implemented either as a function or as a Tornado coroutine.\n \"\"\"\n workers = set(workers)\n yield [self._stop_worker(w)\n for w in self.workers\n if w.worker_address in workers]\n while workers & set(self.workers):\n yield gen.sleep(0.01)\n\n def __del__(self):\n self.close()\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n self.close()\n\n @property\n def scheduler_address(self):\n try:\n return self.scheduler.address\n except ValueError:\n return '<unstarted>'\n\n\nclusters_to_close = weakref.WeakSet()\n\n\[email protected]\ndef close_clusters():\n for cluster in clusters_to_close:\n cluster.close()\n", "path": "distributed/deploy/local.py" } ]
[ { "content": "from __future__ import print_function, division, absolute_import\n\nimport atexit\nimport logging\nimport math\nfrom threading import Thread\nfrom time import sleep\nimport warnings\nimport weakref\n\nfrom tornado import gen\nfrom tornado.ioloop import IOLoop\n\nfrom ..core import CommClosedError\nfrom ..utils import sync, ignoring, All, silence_logging\nfrom ..nanny import Nanny\nfrom ..scheduler import Scheduler\nfrom ..worker import Worker, _ncores\n\nlogger = logging.getLogger(__name__)\n\n\nclass LocalCluster(object):\n \"\"\" Create local Scheduler and Workers\n\n This creates a \"cluster\" of a scheduler and workers running on the local\n machine.\n\n Parameters\n ----------\n n_workers: int\n Number of workers to start\n processes: bool\n Whether to use processes (True) or threads (False). Defaults to True\n threads_per_worker: int\n Number of threads per each worker\n scheduler_port: int\n Port of the scheduler. 8786 by default, use 0 to choose a random port\n silence_logs: logging level\n Level of logs to print out to stdout. ``logging.CRITICAL`` by default.\n Use a falsey value like False or None for no change.\n ip: string\n IP address on which the scheduler will listen, defaults to only localhost\n kwargs: dict\n Extra worker arguments, will be passed to the Worker constructor.\n\n Examples\n --------\n >>> c = LocalCluster() # Create a local cluster with as many workers as cores # doctest: +SKIP\n >>> c # doctest: +SKIP\n LocalCluster(\"127.0.0.1:8786\", workers=8, ncores=8)\n\n >>> c = Client(c) # connect to local cluster # doctest: +SKIP\n\n Add a new worker to the cluster\n >>> w = c.start_worker(ncores=2) # doctest: +SKIP\n\n Shut down the extra worker\n >>> c.remove_worker(w) # doctest: +SKIP\n \"\"\"\n def __init__(self, n_workers=None, threads_per_worker=None, processes=True,\n loop=None, start=True, ip=None, scheduler_port=0,\n silence_logs=logging.CRITICAL, diagnostics_port=8787,\n services={}, worker_services={}, nanny=None, **worker_kwargs):\n if nanny is not None:\n warnings.warning(\"nanny has been deprecated, used processes=\")\n processes = nanny\n self.status = None\n self.processes = processes\n self.silence_logs = silence_logs\n if silence_logs:\n silence_logging(level=silence_logs)\n if n_workers is None and threads_per_worker is None:\n if processes:\n n_workers = _ncores\n threads_per_worker = 1\n else:\n n_workers = 1\n threads_per_worker = _ncores\n if n_workers is None and threads_per_worker is not None:\n n_workers = max(1, _ncores // threads_per_worker)\n if n_workers and threads_per_worker is None:\n # Overcommit threads per worker, rather than undercommit\n threads_per_worker = max(1, int(math.ceil(_ncores / n_workers)))\n\n self.loop = loop or IOLoop()\n if start and not self.loop._running:\n self._thread = Thread(target=self.loop.start,\n name=\"LocalCluster loop\")\n self._thread.daemon = True\n self._thread.start()\n while not self.loop._running:\n sleep(0.001)\n\n if diagnostics_port is not None:\n try:\n from distributed.bokeh.scheduler import BokehScheduler\n from distributed.bokeh.worker import BokehWorker\n except ImportError:\n logger.debug(\"To start diagnostics web server please install Bokeh\")\n else:\n services[('bokeh', diagnostics_port)] = BokehScheduler\n worker_services[('bokeh', 0)] = BokehWorker\n\n self.scheduler = Scheduler(loop=self.loop,\n services=services)\n self.scheduler_port = scheduler_port\n\n self.workers = []\n self.n_workers = n_workers\n self.threads_per_worker = threads_per_worker\n self.worker_services = worker_services\n self.worker_kwargs = worker_kwargs\n\n if start:\n sync(self.loop, self._start, ip)\n\n clusters_to_close.add(self)\n\n def __str__(self):\n return ('LocalCluster(%r, workers=%d, ncores=%d)' %\n (self.scheduler_address, len(self.workers),\n sum(w.ncores for w in self.workers))\n )\n\n __repr__ = __str__\n\n @gen.coroutine\n def _start(self, ip=None):\n \"\"\"\n Start all cluster services.\n Wait on this if you passed `start=False` to the LocalCluster\n constructor.\n \"\"\"\n if self.status == 'running':\n return\n if ip is None and not self.scheduler_port and not self.processes:\n # Use inproc transport for optimization\n scheduler_address = 'inproc://'\n else:\n if ip is None:\n ip = '127.0.0.1'\n scheduler_address = (ip, self.scheduler_port)\n self.scheduler.start(scheduler_address)\n\n yield self._start_all_workers(\n self.n_workers, ncores=self.threads_per_worker,\n services=self.worker_services, **self.worker_kwargs)\n\n self.status = 'running'\n\n @gen.coroutine\n def _start_all_workers(self, n_workers, **kwargs):\n yield [self._start_worker(**kwargs) for i in range(n_workers)]\n\n @gen.coroutine\n def _start_worker(self, port=0, processes=None, death_timeout=60, **kwargs):\n if processes is not None:\n raise ValueError(\"overriding `processes` for individual workers \"\n \"in a LocalCluster is not supported anymore\")\n if port:\n raise ValueError(\"overriding `port` for individual workers \"\n \"in a LocalCluster is not supported anymore\")\n if self.processes:\n W = Nanny\n kwargs['quiet'] = True\n else:\n W = Worker\n\n w = W(self.scheduler.address, loop=self.loop,\n death_timeout=death_timeout,\n silence_logs=self.silence_logs, **kwargs)\n yield w._start()\n\n self.workers.append(w)\n\n while w.worker_address not in self.scheduler.worker_info:\n yield gen.sleep(0.01)\n\n raise gen.Return(w)\n\n def start_worker(self, ncores=0, **kwargs):\n \"\"\" Add a new worker to the running cluster\n\n Parameters\n ----------\n port: int (optional)\n Port on which to serve the worker, defaults to 0 or random\n ncores: int (optional)\n Number of threads to use. Defaults to number of logical cores\n\n Examples\n --------\n >>> c = LocalCluster() # doctest: +SKIP\n >>> c.start_worker(ncores=2) # doctest: +SKIP\n\n Returns\n -------\n The created Worker or Nanny object. Can be discarded.\n \"\"\"\n return sync(self.loop, self._start_worker, ncores=ncores, **kwargs)\n\n @gen.coroutine\n def _stop_worker(self, w):\n yield w._close()\n if w in self.workers:\n self.workers.remove(w)\n\n def stop_worker(self, w):\n \"\"\" Stop a running worker\n\n Examples\n --------\n >>> c = LocalCluster() # doctest: +SKIP\n >>> w = c.start_worker(ncores=2) # doctest: +SKIP\n >>> c.stop_worker(w) # doctest: +SKIP\n \"\"\"\n sync(self.loop, self._stop_worker, w)\n\n @gen.coroutine\n def _close(self):\n if self.status == 'closed':\n return\n\n with ignoring(gen.TimeoutError, CommClosedError, OSError):\n yield All([w._close() for w in self.workers])\n with ignoring(gen.TimeoutError, CommClosedError, OSError):\n yield self.scheduler.close(fast=True)\n del self.workers[:]\n self.status = 'closed'\n\n def close(self):\n \"\"\" Close the cluster \"\"\"\n if self.status == 'closed':\n return\n\n for w in self.workers:\n self.loop.add_callback(self._stop_worker, w)\n for i in range(10):\n if not self.workers:\n break\n else:\n sleep(0.01)\n if self.loop._running:\n sync(self.loop, self._close)\n if hasattr(self, '_thread'):\n sync(self.loop, self.loop.stop)\n self._thread.join(timeout=1)\n self.loop.close()\n del self._thread\n\n @gen.coroutine\n def scale_up(self, n, **kwargs):\n \"\"\" Bring the total count of workers up to ``n``\n\n This function/coroutine should bring the total number of workers up to\n the number ``n``.\n\n This can be implemented either as a function or as a Tornado coroutine.\n \"\"\"\n yield [self._start_worker(**kwargs)\n for i in range(n - len(self.workers))]\n\n @gen.coroutine\n def scale_down(self, workers):\n \"\"\" Remove ``workers`` from the cluster\n\n Given a list of worker addresses this function should remove those\n workers from the cluster. This may require tracking which jobs are\n associated to which worker address.\n\n This can be implemented either as a function or as a Tornado coroutine.\n \"\"\"\n workers = set(workers)\n yield [self._stop_worker(w)\n for w in self.workers\n if w.worker_address in workers]\n while workers & set(self.workers):\n yield gen.sleep(0.01)\n\n def __del__(self):\n self.close()\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n self.close()\n\n @property\n def scheduler_address(self):\n try:\n return self.scheduler.address\n except ValueError:\n return '<unstarted>'\n\n\nclusters_to_close = weakref.WeakSet()\n\n\[email protected]\ndef close_clusters():\n for cluster in clusters_to_close:\n cluster.close()\n", "path": "distributed/deploy/local.py" } ]
diff --git a/distributed/deploy/local.py b/distributed/deploy/local.py index 182a0041df1..d1458c4f824 100644 --- a/distributed/deploy/local.py +++ b/distributed/deploy/local.py @@ -203,7 +203,8 @@ def start_worker(self, ncores=0, **kwargs): @gen.coroutine def _stop_worker(self, w): yield w._close() - self.workers.remove(w) + if w in self.workers: + self.workers.remove(w) def stop_worker(self, w): """ Stop a running worker diff --git a/distributed/deploy/tests/test_local.py b/distributed/deploy/tests/test_local.py index 99a148fce8e..c1a7784cf25 100644 --- a/distributed/deploy/tests/test_local.py +++ b/distributed/deploy/tests/test_local.py @@ -15,7 +15,8 @@ from distributed.deploy.local import LocalCluster from distributed.metrics import time from distributed.utils_test import (inc, loop, raises, gen_test, pristine_loop, - assert_can_connect_locally_4, assert_can_connect_from_everywhere_4_6) + assert_can_connect_locally_4, assert_can_connect_from_everywhere_4_6, + captured_logger) from distributed.utils import ignoring, sync from distributed.worker import TOTAL_MEMORY, _ncores @@ -32,6 +33,20 @@ def test_simple(loop): assert any(w.data == {x.key: 2} for w in c.workers) +def test_close_twice(loop): + cluster = LocalCluster() + with Client(cluster.scheduler_address) as client: + f = client.map(inc, range(100)) + client.gather(f) + with captured_logger('tornado.application') as log: + cluster.close() + cluster.close() + sleep(0.5) + log = log.getvalue() + print(log) + assert not log + + @pytest.mark.skipif('sys.version_info[0] == 2', reason='multi-loop') def test_procs(loop): with LocalCluster(2, scheduler_port=0, processes=False, threads_per_worker=3, diff --git a/distributed/utils_test.py b/distributed/utils_test.py index cdcfb4b28b6..9dad79e9d52 100644 --- a/distributed/utils_test.py +++ b/distributed/utils_test.py @@ -797,6 +797,8 @@ def assert_can_connect_locally_6(port, timeout=None, connection_args=None): def captured_logger(logger): """Capture output from the given Logger. """ + if isinstance(logger, str): + logger = logging.getLogger(logger) orig_handlers = logger.handlers[:] sio = six.StringIO() logger.handlers[:] = [logging.StreamHandler(sio)]
ansible__ansible-modules-extras-3339
Marker in blockinfile is a line prefix, not a whole line ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME `blockinfile` ##### ANSIBLE VERSION ``` ansible 2.1.0 (devel 2e529d7a51) last updated 2016/03/29 20:29:18 (GMT +100) lib/ansible/modules/core: (detached HEAD 0268864211) last updated 2016/03/29 20:30:38 (GMT +100) lib/ansible/modules/extras: (detached HEAD 6978984244) last updated 2016/03/29 20:30:38 (GMT +100) config file = /etc/ansible/ansible.cfg configured module search path = Default w/o overrides ``` ##### CONFIGURATION N/A. ##### OS / ENVIRONMENT N/A. ##### SUMMARY The `blockinfile` documentation talks about `marker` being a "line template", but actually it doesn't match against a whole line, it looks for this marker at the start of the line. This causes trouble when one marker happens to be a leading substring of another marker. ##### STEPS TO REPRODUCE Run the following play twice: ``` yaml - hosts: localhost tasks: - blockinfile: dest: /tmp/example block: this is an example block to insert create: true - blockinfile: dest: /tmp/example block: this is a different block marker: '# {mark} ANSIBLE MANAGED BLOCK: non-default marker' ``` ##### EXPECTED RESULTS File `/tmp/example` has the following contents: ``` # BEGIN ANSIBLE MANAGED BLOCK this is an example block to insert # END ANSIBLE MANAGED BLOCK # BEGIN ANSIBLE MANAGED BLOCK: non-default marker this is a different block # END ANSIBLE MANAGED BLOCK: non-default marker ``` ##### ACTUAL RESULTS File `/tmp/example` has the following contents: ``` # BEGIN ANSIBLE MANAGED BLOCK this is an example block to insert # END ANSIBLE MANAGED BLOCK # BEGIN ANSIBLE MANAGED BLOCK this is an example block to insert # END ANSIBLE MANAGED BLOCK # BEGIN ANSIBLE MANAGED BLOCK: non-default marker this is a different block # END ANSIBLE MANAGED BLOCK: non-default marker ``` Here, on the second run, the first task has overwritten the second block, and the second task has reinserted the second block.
[ { "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2014, 2015 YAEGASHI Takeshi <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = \"\"\"\n---\nmodule: blockinfile\nauthor:\n - 'YAEGASHI Takeshi (@yaegashi)'\nextends_documentation_fragment:\n - files\n - validate\nshort_description: Insert/update/remove a text block\n surrounded by marker lines.\nversion_added: '2.0'\ndescription:\n - This module will insert/update/remove a block of multi-line text\n surrounded by customizable marker lines.\nnotes:\n - This module supports check mode.\n - When using 'with_*' loops be aware that if you do not set a unique mark the block will be overwritten on each iteration.\noptions:\n dest:\n aliases: [ name, destfile ]\n required: true\n description:\n - The file to modify.\n state:\n required: false\n choices: [ present, absent ]\n default: present\n description:\n - Whether the block should be there or not.\n marker:\n required: false\n default: '# {mark} ANSIBLE MANAGED BLOCK'\n description:\n - The marker line template.\n \"{mark}\" will be replaced with \"BEGIN\" or \"END\".\n block:\n aliases: [ content ]\n required: false\n default: ''\n description:\n - The text to insert inside the marker lines.\n If it's missing or an empty string,\n the block will be removed as if C(state) were specified to C(absent).\n insertafter:\n required: false\n default: EOF\n description:\n - If specified, the block will be inserted after the last match of\n specified regular expression. A special value is available; C(EOF) for\n inserting the block at the end of the file. If specified regular\n expresion has no matches, C(EOF) will be used instead.\n choices: [ 'EOF', '*regex*' ]\n insertbefore:\n required: false\n default: None\n description:\n - If specified, the block will be inserted before the last match of\n specified regular expression. A special value is available; C(BOF) for\n inserting the block at the beginning of the file. If specified regular\n expresion has no matches, the block will be inserted at the end of the\n file.\n choices: [ 'BOF', '*regex*' ]\n create:\n required: false\n default: 'no'\n choices: [ 'yes', 'no' ]\n description:\n - Create a new file if it doesn't exist.\n backup:\n required: false\n default: 'no'\n choices: [ 'yes', 'no' ]\n description:\n - Create a backup file including the timestamp information so you can\n get the original file back if you somehow clobbered it incorrectly.\n follow:\n required: false\n default: \"no\"\n choices: [ \"yes\", \"no\" ]\n description:\n - 'This flag indicates that filesystem links, if they exist, should be followed.'\n version_added: \"2.1\"\n\"\"\"\n\nEXAMPLES = r\"\"\"\n- name: insert/update \"Match User\" configuation block in /etc/ssh/sshd_config\n blockinfile:\n dest: /etc/ssh/sshd_config\n block: |\n Match User ansible-agent\n PasswordAuthentication no\n\n- name: insert/update eth0 configuration stanza in /etc/network/interfaces\n (it might be better to copy files into /etc/network/interfaces.d/)\n blockinfile:\n dest: /etc/network/interfaces\n block: |\n iface eth0 inet static\n address 192.0.2.23\n netmask 255.255.255.0\n\n- name: insert/update HTML surrounded by custom markers after <body> line\n blockinfile:\n dest: /var/www/html/index.html\n marker: \"<!-- {mark} ANSIBLE MANAGED BLOCK -->\"\n insertafter: \"<body>\"\n content: |\n <h1>Welcome to {{ansible_hostname}}</h1>\n <p>Last updated on {{ansible_date_time.iso8601}}</p>\n\n- name: remove HTML as well as surrounding markers\n blockinfile:\n dest: /var/www/html/index.html\n marker: \"<!-- {mark} ANSIBLE MANAGED BLOCK -->\"\n content: \"\"\n\n- name: Add mappings to /etc/hosts\n blockinfile:\n dest: /etc/hosts\n block: |\n {{item.ip}} {{item.name}}\n marker: \"# {mark} ANSIBLE MANAGED BLOCK {{item.name}}\"\n with_items:\n - { name: host1, ip: 10.10.1.10 }\n - { name: host2, ip: 10.10.1.11 }\n - { name: host3, ip: 10.10.1.12 }\n\"\"\"\n\nimport re\nimport os\nimport tempfile\nfrom ansible.module_utils.six import b\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils._text import to_bytes\n\ndef write_changes(module, contents, dest):\n\n tmpfd, tmpfile = tempfile.mkstemp()\n f = os.fdopen(tmpfd, 'wb')\n f.write(contents)\n f.close()\n\n validate = module.params.get('validate', None)\n valid = not validate\n if validate:\n if \"%s\" not in validate:\n module.fail_json(msg=\"validate must contain %%s: %s\" % (validate))\n (rc, out, err) = module.run_command(validate % tmpfile)\n valid = rc == 0\n if rc != 0:\n module.fail_json(msg='failed to validate: '\n 'rc:%s error:%s' % (rc, err))\n if valid:\n module.atomic_move(tmpfile, dest, unsafe_writes=module.params['unsafe_writes'])\n\n\ndef check_file_attrs(module, changed, message):\n\n file_args = module.load_file_common_arguments(module.params)\n if module.set_file_attributes_if_different(file_args, False):\n\n if changed:\n message += \" and \"\n changed = True\n message += \"ownership, perms or SE linux context changed\"\n\n return message, changed\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n dest=dict(required=True, aliases=['name', 'destfile'], type='path'),\n state=dict(default='present', choices=['absent', 'present']),\n marker=dict(default='# {mark} ANSIBLE MANAGED BLOCK', type='str'),\n block=dict(default='', type='str', aliases=['content']),\n insertafter=dict(default=None),\n insertbefore=dict(default=None),\n create=dict(default=False, type='bool'),\n backup=dict(default=False, type='bool'),\n validate=dict(default=None, type='str'),\n ),\n mutually_exclusive=[['insertbefore', 'insertafter']],\n add_file_common_args=True,\n supports_check_mode=True\n )\n\n params = module.params\n dest = params['dest']\n if module.boolean(params.get('follow', None)):\n dest = os.path.realpath(dest)\n\n if os.path.isdir(dest):\n module.fail_json(rc=256,\n msg='Destination %s is a directory !' % dest)\n\n path_exists = os.path.exists(dest)\n if not path_exists:\n if not module.boolean(params['create']):\n module.fail_json(rc=257,\n msg='Destination %s does not exist !' % dest)\n original = None\n lines = []\n else:\n f = open(dest, 'rb')\n original = f.read()\n f.close()\n lines = original.splitlines()\n\n insertbefore = params['insertbefore']\n insertafter = params['insertafter']\n block = to_bytes(params['block'])\n marker = to_bytes(params['marker'])\n present = params['state'] == 'present'\n\n if not present and not path_exists:\n module.exit_json(changed=False, msg=\"File not present\")\n\n if insertbefore is None and insertafter is None:\n insertafter = 'EOF'\n\n if insertafter not in (None, 'EOF'):\n insertre = re.compile(insertafter)\n elif insertbefore not in (None, 'BOF'):\n insertre = re.compile(insertbefore)\n else:\n insertre = None\n\n marker0 = re.sub(b(r'{mark}'), b('BEGIN'), marker)\n marker1 = re.sub(b(r'{mark}'), b('END'), marker)\n if present and block:\n # Escape seqeuences like '\\n' need to be handled in Ansible 1.x\n if module.ansible_version.startswith('1.'):\n block = re.sub('', block, '')\n blocklines = [marker0] + block.splitlines() + [marker1]\n else:\n blocklines = []\n\n n0 = n1 = None\n for i, line in enumerate(lines):\n if line.startswith(marker0):\n n0 = i\n if line.startswith(marker1):\n n1 = i\n\n if None in (n0, n1):\n n0 = None\n if insertre is not None:\n for i, line in enumerate(lines):\n if insertre.search(line):\n n0 = i\n if n0 is None:\n n0 = len(lines)\n elif insertafter is not None:\n n0 += 1\n elif insertbefore is not None:\n n0 = 0 # insertbefore=BOF\n else:\n n0 = len(lines) # insertafter=EOF\n elif n0 < n1:\n lines[n0:n1+1] = []\n else:\n lines[n1:n0+1] = []\n n0 = n1\n\n lines[n0:n0] = blocklines\n\n if lines:\n result = b('\\n').join(lines)\n if original is None or original.endswith(b('\\n')):\n result += b('\\n')\n else:\n result = ''\n if original == result:\n msg = ''\n changed = False\n elif original is None:\n msg = 'File created'\n changed = True\n elif not blocklines:\n msg = 'Block removed'\n changed = True\n else:\n msg = 'Block inserted'\n changed = True\n\n if changed and not module.check_mode:\n if module.boolean(params['backup']) and path_exists:\n module.backup_local(dest)\n write_changes(module, result, dest)\n\n if module.check_mode and not path_exists:\n module.exit_json(changed=changed, msg=msg)\n\n msg, changed = check_file_attrs(module, changed, msg)\n module.exit_json(changed=changed, msg=msg)\n\n\nif __name__ == '__main__':\n main()\n", "path": "files/blockinfile.py" } ]
[ { "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2014, 2015 YAEGASHI Takeshi <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = \"\"\"\n---\nmodule: blockinfile\nauthor:\n - 'YAEGASHI Takeshi (@yaegashi)'\nextends_documentation_fragment:\n - files\n - validate\nshort_description: Insert/update/remove a text block\n surrounded by marker lines.\nversion_added: '2.0'\ndescription:\n - This module will insert/update/remove a block of multi-line text\n surrounded by customizable marker lines.\nnotes:\n - This module supports check mode.\n - When using 'with_*' loops be aware that if you do not set a unique mark the block will be overwritten on each iteration.\noptions:\n dest:\n aliases: [ name, destfile ]\n required: true\n description:\n - The file to modify.\n state:\n required: false\n choices: [ present, absent ]\n default: present\n description:\n - Whether the block should be there or not.\n marker:\n required: false\n default: '# {mark} ANSIBLE MANAGED BLOCK'\n description:\n - The marker line template.\n \"{mark}\" will be replaced with \"BEGIN\" or \"END\".\n block:\n aliases: [ content ]\n required: false\n default: ''\n description:\n - The text to insert inside the marker lines.\n If it's missing or an empty string,\n the block will be removed as if C(state) were specified to C(absent).\n insertafter:\n required: false\n default: EOF\n description:\n - If specified, the block will be inserted after the last match of\n specified regular expression. A special value is available; C(EOF) for\n inserting the block at the end of the file. If specified regular\n expresion has no matches, C(EOF) will be used instead.\n choices: [ 'EOF', '*regex*' ]\n insertbefore:\n required: false\n default: None\n description:\n - If specified, the block will be inserted before the last match of\n specified regular expression. A special value is available; C(BOF) for\n inserting the block at the beginning of the file. If specified regular\n expresion has no matches, the block will be inserted at the end of the\n file.\n choices: [ 'BOF', '*regex*' ]\n create:\n required: false\n default: 'no'\n choices: [ 'yes', 'no' ]\n description:\n - Create a new file if it doesn't exist.\n backup:\n required: false\n default: 'no'\n choices: [ 'yes', 'no' ]\n description:\n - Create a backup file including the timestamp information so you can\n get the original file back if you somehow clobbered it incorrectly.\n follow:\n required: false\n default: \"no\"\n choices: [ \"yes\", \"no\" ]\n description:\n - 'This flag indicates that filesystem links, if they exist, should be followed.'\n version_added: \"2.1\"\n\"\"\"\n\nEXAMPLES = r\"\"\"\n- name: insert/update \"Match User\" configuation block in /etc/ssh/sshd_config\n blockinfile:\n dest: /etc/ssh/sshd_config\n block: |\n Match User ansible-agent\n PasswordAuthentication no\n\n- name: insert/update eth0 configuration stanza in /etc/network/interfaces\n (it might be better to copy files into /etc/network/interfaces.d/)\n blockinfile:\n dest: /etc/network/interfaces\n block: |\n iface eth0 inet static\n address 192.0.2.23\n netmask 255.255.255.0\n\n- name: insert/update HTML surrounded by custom markers after <body> line\n blockinfile:\n dest: /var/www/html/index.html\n marker: \"<!-- {mark} ANSIBLE MANAGED BLOCK -->\"\n insertafter: \"<body>\"\n content: |\n <h1>Welcome to {{ansible_hostname}}</h1>\n <p>Last updated on {{ansible_date_time.iso8601}}</p>\n\n- name: remove HTML as well as surrounding markers\n blockinfile:\n dest: /var/www/html/index.html\n marker: \"<!-- {mark} ANSIBLE MANAGED BLOCK -->\"\n content: \"\"\n\n- name: Add mappings to /etc/hosts\n blockinfile:\n dest: /etc/hosts\n block: |\n {{item.ip}} {{item.name}}\n marker: \"# {mark} ANSIBLE MANAGED BLOCK {{item.name}}\"\n with_items:\n - { name: host1, ip: 10.10.1.10 }\n - { name: host2, ip: 10.10.1.11 }\n - { name: host3, ip: 10.10.1.12 }\n\"\"\"\n\nimport re\nimport os\nimport tempfile\nfrom ansible.module_utils.six import b\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils._text import to_bytes\n\ndef write_changes(module, contents, dest):\n\n tmpfd, tmpfile = tempfile.mkstemp()\n f = os.fdopen(tmpfd, 'wb')\n f.write(contents)\n f.close()\n\n validate = module.params.get('validate', None)\n valid = not validate\n if validate:\n if \"%s\" not in validate:\n module.fail_json(msg=\"validate must contain %%s: %s\" % (validate))\n (rc, out, err) = module.run_command(validate % tmpfile)\n valid = rc == 0\n if rc != 0:\n module.fail_json(msg='failed to validate: '\n 'rc:%s error:%s' % (rc, err))\n if valid:\n module.atomic_move(tmpfile, dest, unsafe_writes=module.params['unsafe_writes'])\n\n\ndef check_file_attrs(module, changed, message):\n\n file_args = module.load_file_common_arguments(module.params)\n if module.set_file_attributes_if_different(file_args, False):\n\n if changed:\n message += \" and \"\n changed = True\n message += \"ownership, perms or SE linux context changed\"\n\n return message, changed\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n dest=dict(required=True, aliases=['name', 'destfile'], type='path'),\n state=dict(default='present', choices=['absent', 'present']),\n marker=dict(default='# {mark} ANSIBLE MANAGED BLOCK', type='str'),\n block=dict(default='', type='str', aliases=['content']),\n insertafter=dict(default=None),\n insertbefore=dict(default=None),\n create=dict(default=False, type='bool'),\n backup=dict(default=False, type='bool'),\n validate=dict(default=None, type='str'),\n ),\n mutually_exclusive=[['insertbefore', 'insertafter']],\n add_file_common_args=True,\n supports_check_mode=True\n )\n\n params = module.params\n dest = params['dest']\n if module.boolean(params.get('follow', None)):\n dest = os.path.realpath(dest)\n\n if os.path.isdir(dest):\n module.fail_json(rc=256,\n msg='Destination %s is a directory !' % dest)\n\n path_exists = os.path.exists(dest)\n if not path_exists:\n if not module.boolean(params['create']):\n module.fail_json(rc=257,\n msg='Destination %s does not exist !' % dest)\n original = None\n lines = []\n else:\n f = open(dest, 'rb')\n original = f.read()\n f.close()\n lines = original.splitlines()\n\n insertbefore = params['insertbefore']\n insertafter = params['insertafter']\n block = to_bytes(params['block'])\n marker = to_bytes(params['marker'])\n present = params['state'] == 'present'\n\n if not present and not path_exists:\n module.exit_json(changed=False, msg=\"File not present\")\n\n if insertbefore is None and insertafter is None:\n insertafter = 'EOF'\n\n if insertafter not in (None, 'EOF'):\n insertre = re.compile(insertafter)\n elif insertbefore not in (None, 'BOF'):\n insertre = re.compile(insertbefore)\n else:\n insertre = None\n\n marker0 = re.sub(b(r'{mark}'), b('BEGIN'), marker)\n marker1 = re.sub(b(r'{mark}'), b('END'), marker)\n if present and block:\n # Escape seqeuences like '\\n' need to be handled in Ansible 1.x\n if module.ansible_version.startswith('1.'):\n block = re.sub('', block, '')\n blocklines = [marker0] + block.splitlines() + [marker1]\n else:\n blocklines = []\n\n n0 = n1 = None\n for i, line in enumerate(lines):\n if line == marker0:\n n0 = i\n if line == marker1:\n n1 = i\n\n if None in (n0, n1):\n n0 = None\n if insertre is not None:\n for i, line in enumerate(lines):\n if insertre.search(line):\n n0 = i\n if n0 is None:\n n0 = len(lines)\n elif insertafter is not None:\n n0 += 1\n elif insertbefore is not None:\n n0 = 0 # insertbefore=BOF\n else:\n n0 = len(lines) # insertafter=EOF\n elif n0 < n1:\n lines[n0:n1+1] = []\n else:\n lines[n1:n0+1] = []\n n0 = n1\n\n lines[n0:n0] = blocklines\n\n if lines:\n result = b('\\n').join(lines)\n if original is None or original.endswith(b('\\n')):\n result += b('\\n')\n else:\n result = ''\n if original == result:\n msg = ''\n changed = False\n elif original is None:\n msg = 'File created'\n changed = True\n elif not blocklines:\n msg = 'Block removed'\n changed = True\n else:\n msg = 'Block inserted'\n changed = True\n\n if changed and not module.check_mode:\n if module.boolean(params['backup']) and path_exists:\n module.backup_local(dest)\n write_changes(module, result, dest)\n\n if module.check_mode and not path_exists:\n module.exit_json(changed=changed, msg=msg)\n\n msg, changed = check_file_attrs(module, changed, msg)\n module.exit_json(changed=changed, msg=msg)\n\n\nif __name__ == '__main__':\n main()\n", "path": "files/blockinfile.py" } ]
diff --git a/files/blockinfile.py b/files/blockinfile.py index 96f430cf14a..ecee4800117 100755 --- a/files/blockinfile.py +++ b/files/blockinfile.py @@ -258,9 +258,9 @@ def main(): n0 = n1 = None for i, line in enumerate(lines): - if line.startswith(marker0): + if line == marker0: n0 = i - if line.startswith(marker1): + if line == marker1: n1 = i if None in (n0, n1):
pydantic__pydantic-738
duplicated errors when validators raise ValidationError # Bug As a work around for #619 I tried the following ```py from pydantic import VERSION, BaseModel, Union, validator from typing_extensions import Literal print('pydantic version:', VERSION) class Foo(BaseModel): model_type: Literal['foo'] f: int class Bar(BaseModel): model_type: Literal['bar'] b: int class MyModel(BaseModel): foobar: Union[Foo, Bar] @validator('foobar', pre=True) def check_action(cls, v): if isinstance(v, dict): model_type = v.get('model_type') if model_type == 'foo': return Foo(**v) if model_type == 'var': return Bar(**v) return v MyModel(foobar={'model_type': 'foo', 'f': 'x'}) ``` Output: ``` pydantic version: 0.32.1 Traceback (most recent call last): File "test.py", line 31, in <module> MyModel(foobar={'model_type': 'foo', 'f': 'x'}) File "pydantic/main.py", line 275, in pydantic.main.BaseModel.__init__ File "pydantic/main.py", line 785, in pydantic.main.validate_model pydantic.error_wrappers.ValidationError: 2 validation errors for MyModel foobar -> f value is not a valid integer (type=type_error.integer) foobar -> f value is not a valid integer (type=type_error.integer) ``` When validators raise `ValidationError` the errors are duplicated. Won't be that common, but should be fixed. Repeated error when validator raises an exception # Bug Please complete: * OS: **Ubuntu** * Python version `import sys; print(sys.version)`: **3.7.4** * Pydantic version `import pydantic; print(pydantic.VERSION)`: **v0.32.1** ```py from typing import Optional from pydantic import BaseModel, validator class Foobar(BaseModel): foo: Optional[str] = None @validator('foo', always=True) def check_foo(cls, v): if not v: raise ValueError('custom error, foo is required') return v print(Foobar(foo='x')) print(Foobar()) ``` Outputs: ``` pydantic.error_wrappers.ValidationError: 2 validation errors for Foobar foo none is not an allowed value (type=type_error.none.not_allowed) foo custom error, foo is required (type=value_error) ``` If i add `pre=True`, the error is even weirder: ``` pydantic.error_wrappers.ValidationError: 2 validation errors for Foobar foo custom error, foo is required (type=value_error) foo custom error, foo is required (type=value_error) ```
[ { "content": "import json\nfrom functools import lru_cache\nfrom typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, Sequence, Tuple, Type, Union\n\nif TYPE_CHECKING: # pragma: no cover\n from pydantic import BaseConfig # noqa: F401\n\n__all__ = ('ErrorWrapper', 'ValidationError')\n\n\nclass ErrorWrapper:\n __slots__ = 'exc', 'type_', 'loc', 'msg_template'\n\n def __init__(\n self, exc: Exception, *, loc: Union[Tuple[str, ...], str], config: Optional[Type['BaseConfig']] = None\n ) -> None:\n self.exc = exc\n self.type_ = get_exc_type(type(exc))\n self.loc: Tuple[str, ...] = loc if isinstance(loc, tuple) else (loc,) # type: ignore\n self.msg_template = config.error_msg_templates.get(self.type_) if config else None\n\n @property\n def ctx(self) -> Dict[str, Any]:\n return getattr(self.exc, 'ctx', None)\n\n @property\n def msg(self) -> str:\n default_msg_template = getattr(self.exc, 'msg_template', None)\n msg_template = self.msg_template or default_msg_template\n if msg_template:\n return msg_template.format(**self.ctx or {})\n\n return str(self.exc)\n\n def dict(self, *, loc_prefix: Optional[Tuple[str, ...]] = None) -> Dict[str, Any]:\n loc = self.loc if loc_prefix is None else loc_prefix + self.loc\n\n d: Dict[str, Any] = {'loc': loc, 'msg': self.msg, 'type': self.type_}\n\n if self.ctx is not None:\n d['ctx'] = self.ctx\n\n return d\n\n\n# ErrorList is something like Union[List[Union[List[ErrorWrapper], ErrorWrapper]], ErrorWrapper]\n# but recursive, therefore just use:\nErrorList = Union[Sequence[Any], ErrorWrapper]\n\n\nclass ValidationError(ValueError):\n __slots__ = ('raw_errors', 'model')\n\n def __init__(self, errors: Sequence[ErrorList], model: Type[Any]) -> None:\n self.raw_errors = errors\n self.model = model\n\n @lru_cache()\n def errors(self) -> List[Dict[str, Any]]:\n return list(flatten_errors(self.raw_errors))\n\n def json(self, *, indent: Union[None, int, str] = 2) -> str:\n return json.dumps(self.errors(), indent=indent)\n\n def __str__(self) -> str:\n errors = self.errors()\n no_errors = len(errors)\n return (\n f'{no_errors} validation error{\"\" if no_errors == 1 else \"s\"} for {self.model.__name__}\\n'\n f'{display_errors(errors)}'\n )\n\n\ndef display_errors(errors: List[Dict[str, Any]]) -> str:\n return '\\n'.join(f'{_display_error_loc(e)}\\n {e[\"msg\"]} ({_display_error_type_and_ctx(e)})' for e in errors)\n\n\ndef _display_error_loc(error: Dict[str, Any]) -> str:\n return ' -> '.join(str(l) for l in error['loc'])\n\n\ndef _display_error_type_and_ctx(error: Dict[str, Any]) -> str:\n t = 'type=' + error['type']\n ctx = error.get('ctx')\n if ctx:\n return t + ''.join(f'; {k}={v}' for k, v in ctx.items())\n else:\n return t\n\n\ndef flatten_errors(\n errors: Sequence[Any], *, loc: Optional[Tuple[str, ...]] = None\n) -> Generator[Dict[str, Any], None, None]:\n for error in errors:\n if isinstance(error, ErrorWrapper):\n if isinstance(error.exc, ValidationError):\n if loc is not None:\n error_loc = loc + error.loc\n else:\n error_loc = error.loc\n yield from flatten_errors(error.exc.raw_errors, loc=error_loc)\n else:\n yield error.dict(loc_prefix=loc)\n elif isinstance(error, list):\n yield from flatten_errors(error)\n else:\n raise RuntimeError(f'Unknown error object: {error}')\n\n\n@lru_cache()\ndef get_exc_type(cls: Type[Exception]) -> str:\n\n base_name = 'type_error' if issubclass(cls, TypeError) else 'value_error'\n if cls in (TypeError, ValueError):\n # just TypeError or ValueError, no extra code\n return base_name\n\n # if it's not a TypeError or ValueError, we just take the lowercase of the exception name\n # no chaining or snake case logic, use \"code\" for more complex error types.\n code = getattr(cls, 'code', None) or cls.__name__.replace('Error', '').lower()\n return base_name + '.' + code\n", "path": "pydantic/error_wrappers.py" } ]
[ { "content": "import json\nfrom functools import lru_cache\nfrom typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, Sequence, Tuple, Type, Union\n\nif TYPE_CHECKING: # pragma: no cover\n from pydantic import BaseConfig # noqa: F401\n\n__all__ = ('ErrorWrapper', 'ValidationError')\n\n\nclass ErrorWrapper:\n __slots__ = 'exc', 'type_', 'loc', 'msg_template'\n\n def __init__(\n self, exc: Exception, *, loc: Union[Tuple[str, ...], str], config: Optional[Type['BaseConfig']] = None\n ) -> None:\n self.exc = exc\n self.type_ = get_exc_type(type(exc))\n self.loc: Tuple[str, ...] = loc if isinstance(loc, tuple) else (loc,) # type: ignore\n self.msg_template = config.error_msg_templates.get(self.type_) if config else None\n\n @property\n def ctx(self) -> Dict[str, Any]:\n return getattr(self.exc, 'ctx', None)\n\n @property\n def msg(self) -> str:\n default_msg_template = getattr(self.exc, 'msg_template', None)\n msg_template = self.msg_template or default_msg_template\n if msg_template:\n return msg_template.format(**self.ctx or {})\n\n return str(self.exc)\n\n def dict(self, *, loc_prefix: Optional[Tuple[str, ...]] = None) -> Dict[str, Any]:\n loc = self.loc if loc_prefix is None else loc_prefix + self.loc\n\n d: Dict[str, Any] = {'loc': loc, 'msg': self.msg, 'type': self.type_}\n\n if self.ctx is not None:\n d['ctx'] = self.ctx\n\n return d\n\n def __repr__(self) -> str:\n return f'<ErrorWrapper {self.dict()}>'\n\n\n# ErrorList is something like Union[List[Union[List[ErrorWrapper], ErrorWrapper]], ErrorWrapper]\n# but recursive, therefore just use:\nErrorList = Union[Sequence[Any], ErrorWrapper]\n\n\nclass ValidationError(ValueError):\n __slots__ = ('raw_errors', 'model')\n\n def __init__(self, errors: Sequence[ErrorList], model: Type[Any]) -> None:\n self.raw_errors = errors\n self.model = model\n\n @lru_cache()\n def errors(self) -> List[Dict[str, Any]]:\n return list(flatten_errors(self.raw_errors))\n\n def json(self, *, indent: Union[None, int, str] = 2) -> str:\n return json.dumps(self.errors(), indent=indent)\n\n def __str__(self) -> str:\n errors = self.errors()\n no_errors = len(errors)\n return (\n f'{no_errors} validation error{\"\" if no_errors == 1 else \"s\"} for {self.model.__name__}\\n'\n f'{display_errors(errors)}'\n )\n\n\ndef display_errors(errors: List[Dict[str, Any]]) -> str:\n return '\\n'.join(f'{_display_error_loc(e)}\\n {e[\"msg\"]} ({_display_error_type_and_ctx(e)})' for e in errors)\n\n\ndef _display_error_loc(error: Dict[str, Any]) -> str:\n return ' -> '.join(str(l) for l in error['loc'])\n\n\ndef _display_error_type_and_ctx(error: Dict[str, Any]) -> str:\n t = 'type=' + error['type']\n ctx = error.get('ctx')\n if ctx:\n return t + ''.join(f'; {k}={v}' for k, v in ctx.items())\n else:\n return t\n\n\ndef flatten_errors(\n errors: Sequence[Any], *, loc: Optional[Tuple[str, ...]] = None\n) -> Generator[Dict[str, Any], None, None]:\n for error in errors:\n if isinstance(error, ErrorWrapper):\n if isinstance(error.exc, ValidationError):\n if loc is not None:\n error_loc = loc + error.loc\n else:\n error_loc = error.loc\n yield from flatten_errors(error.exc.raw_errors, loc=error_loc)\n else:\n yield error.dict(loc_prefix=loc)\n elif isinstance(error, list):\n yield from flatten_errors(error)\n else:\n raise RuntimeError(f'Unknown error object: {error}')\n\n\n@lru_cache()\ndef get_exc_type(cls: Type[Exception]) -> str:\n\n base_name = 'type_error' if issubclass(cls, TypeError) else 'value_error'\n if cls in (TypeError, ValueError):\n # just TypeError or ValueError, no extra code\n return base_name\n\n # if it's not a TypeError or ValueError, we just take the lowercase of the exception name\n # no chaining or snake case logic, use \"code\" for more complex error types.\n code = getattr(cls, 'code', None) or cls.__name__.replace('Error', '').lower()\n return base_name + '.' + code\n", "path": "pydantic/error_wrappers.py" } ]
diff --git a/changes/738-samuelcolvin.rst b/changes/738-samuelcolvin.rst new file mode 100644 index 00000000000..af8154f43a7 --- /dev/null +++ b/changes/738-samuelcolvin.rst @@ -0,0 +1 @@ +add ``__repr__`` method to ``ErrorWrapper`` diff --git a/pydantic/error_wrappers.py b/pydantic/error_wrappers.py index f483fa6e605..3b83b67cdf6 100644 --- a/pydantic/error_wrappers.py +++ b/pydantic/error_wrappers.py @@ -42,6 +42,9 @@ def dict(self, *, loc_prefix: Optional[Tuple[str, ...]] = None) -> Dict[str, Any return d + def __repr__(self) -> str: + return f'<ErrorWrapper {self.dict()}>' + # ErrorList is something like Union[List[Union[List[ErrorWrapper], ErrorWrapper]], ErrorWrapper] # but recursive, therefore just use: diff --git a/tests/test_errors.py b/tests/test_errors.py index bf3a68b1717..6bf7ef4fe3c 100644 --- a/tests/test_errors.py +++ b/tests/test_errors.py @@ -1,6 +1,13 @@ +from typing import Optional, Union + import pytest -from pydantic import PydanticTypeError +from pydantic import BaseModel, PydanticTypeError, ValidationError, validator + +try: + from typing_extensions import Literal +except ImportError: + Literal = None def test_pydantic_error(): @@ -14,3 +21,61 @@ def __init__(self, *, test_ctx: int) -> None: with pytest.raises(TestError) as exc_info: raise TestError(test_ctx='test_value') assert str(exc_info.value) == 'test message template "test_value"' + + [email protected](not Literal, reason='typing_extensions not installed') +def test_interval_validation_error(): + class Foo(BaseModel): + model_type: Literal['foo'] + f: int + + class Bar(BaseModel): + model_type: Literal['bar'] + b: int + + class MyModel(BaseModel): + foobar: Union[Foo, Bar] + + @validator('foobar', pre=True, whole=True) + def check_action(cls, v): + if isinstance(v, dict): + model_type = v.get('model_type') + if model_type == 'foo': + return Foo(**v) + if model_type == 'bar': + return Bar(**v) + raise ValueError('not valid Foo or Bar') + + m1 = MyModel(foobar={'model_type': 'foo', 'f': '1'}) + assert m1.foobar.f == 1 + assert isinstance(m1.foobar, Foo) + + m2 = MyModel(foobar={'model_type': 'bar', 'b': '2'}) + assert m2.foobar.b == 2 + assert isinstance(m2.foobar, BaseModel) + + with pytest.raises(ValidationError) as exc_info: + MyModel(foobar={'model_type': 'foo', 'f': 'x'}) + assert exc_info.value.errors() == [ + {'loc': ('foobar', 'f'), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'} + ] + + +def test_error_on_optional(): + class Foobar(BaseModel): + foo: Optional[str] = None + + @validator('foo', always=True, whole=True) + def check_foo(cls, v): + raise ValueError('custom error') + + with pytest.raises(ValidationError) as exc_info: + Foobar(foo='x') + assert exc_info.value.errors() == [{'loc': ('foo',), 'msg': 'custom error', 'type': 'value_error'}] + assert repr(exc_info.value.raw_errors[0]) == ( + "<ErrorWrapper {'loc': ('foo',), 'msg': 'custom error', 'type': 'value_error'}>" + ) + + with pytest.raises(ValidationError) as exc_info: + Foobar(foo=None) + assert exc_info.value.errors() == [{'loc': ('foo',), 'msg': 'custom error', 'type': 'value_error'}]
DataBiosphere__toil-1535
NonCachingFileStore doesn't have the jobID attribute This makes NonCachingFileStore incompatible with dockerCall. NonCachingFileStore doesn't have the jobID attribute This makes NonCachingFileStore incompatible with dockerCall.
[ { "content": "\"\"\"\n Module for calling Docker. Assumes `docker` is on the PATH.\n\n Contains two user-facing functions: dockerCall and dockerCheckOutput\n\n Uses Toil's defer functionality to ensure containers are shutdown even in case of job or pipeline failure\n\n Example of using dockerCall in a Toil pipeline to index a FASTA file with SAMtools:\n def toil_job(job):\n work_dir = job.fileStore.getLocalTempDir()\n path = job.fileStore.readGlobalFile(ref_id, os.path.join(work_dir, 'ref.fasta')\n parameters = ['faidx', path]\n dockerCall(job, tool='quay.io/ucgc_cgl/samtools:latest', work_dir=work_dir, parameters=parameters)\n\"\"\"\nimport base64\nimport logging\nimport subprocess\nimport pipes\nimport os\nfrom bd2k.util.exceptions import require\n\n_logger = logging.getLogger(__name__)\n\n\ndef dockerCall(job,\n tool,\n parameters=None,\n workDir=None,\n dockerParameters=None,\n outfile=None,\n defer=None):\n \"\"\"\n Throws CalledProcessorError if the Docker invocation returns a non-zero exit code\n This function blocks until the subprocess call to Docker returns\n\n :param toil.Job.job job: The Job instance for the calling function.\n :param str tool: Name of the Docker image to be used (e.g. quay.io/ucsc_cgl/samtools:latest).\n :param list[str] parameters: Command line arguments to be passed to the tool.\n If list of lists: list[list[str]], then treat as successive commands chained with pipe.\n :param str workDir: Directory to mount into the container via `-v`. Destination convention is /data\n :param list[str] dockerParameters: Parameters to pass to Docker. Default parameters are `--rm`,\n `--log-driver none`, and the mountpoint `-v work_dir:/data` where /data is the destination convention.\n These defaults are removed if docker_parmaters is passed, so be sure to pass them if they are desired.\n :param file outfile: Pipe output of Docker call to file handle\n :param int defer: What action should be taken on the container upon job completion?\n FORGO (0) will leave the container untouched.\n STOP (1) will attempt to stop the container with `docker stop` (useful for debugging).\n RM (2) will stop the container and then forcefully remove it from the system\n using `docker rm -f`. This is the default behavior if defer is set to None.\n \"\"\"\n _docker(job, tool=tool, parameters=parameters, workDir=workDir, dockerParameters=dockerParameters,\n outfile=outfile, checkOutput=False, defer=defer)\n\n\ndef dockerCheckOutput(job,\n tool,\n parameters=None,\n workDir=None,\n dockerParameters=None,\n defer=None):\n \"\"\"\n Returns the stdout from the Docker invocation (via subprocess.check_output)\n Throws CalledProcessorError if the Docker invocation returns a non-zero exit code\n This function blocks until the subprocess call to Docker returns\n\n :param toil.Job.job job: The Job instance for the calling function.\n :param str tool: Name of the Docker image to be used (e.g. quay.io/ucsc_cgl/samtools:latest).\n :param list[str] parameters: Command line arguments to be passed to the tool.\n If list of lists: list[list[str]], then treat as successive commands chained with pipe.\n :param str workDir: Directory to mount into the container via `-v`. Destination convention is /data\n :param list[str] dockerParameters: Parameters to pass to Docker. Default parameters are `--rm`,\n `--log-driver none`, and the mountpoint `-v work_dir:/data` where /data is the destination convention.\n These defaults are removed if docker_parmaters is passed, so be sure to pass them if they are desired.\n :param int defer: What action should be taken on the container upon job completion?\n FORGO (0) will leave the container untouched.\n STOP (1) will attempt to stop the container with `docker stop` (useful for debugging).\n RM (2) will stop the container and then forcefully remove it from the system\n using `docker rm -f`. This is the default behavior if defer is set to None.\n :returns: Stdout from the docker call\n :rtype: str\n \"\"\"\n return _docker(job, tool=tool, parameters=parameters, workDir=workDir,\n dockerParameters=dockerParameters, checkOutput=True, defer=defer)\n\n\ndef _docker(job,\n tool,\n parameters=None,\n workDir=None,\n dockerParameters=None,\n outfile=None,\n checkOutput=False,\n defer=None):\n \"\"\"\n :param toil.Job.job job: The Job instance for the calling function.\n :param str tool: Name of the Docker image to be used (e.g. quay.io/ucsc_cgl/samtools).\n :param list[str] parameters: Command line arguments to be passed to the tool.\n If list of lists: list[list[str]], then treat as successive commands chained with pipe.\n :param str workDir: Directory to mount into the container via `-v`. Destination convention is /data\n :param list[str] dockerParameters: Parameters to pass to Docker. Default parameters are `--rm`,\n `--log-driver none`, and the mountpoint `-v work_dir:/data` where /data is the destination convention.\n These defaults are removed if docker_parmaters is passed, so be sure to pass them if they are desired.\n :param file outfile: Pipe output of Docker call to file handle\n :param bool checkOutput: When True, this function returns docker's output.\n :param int defer: What action should be taken on the container upon job completion?\n FORGO (0) will leave the container untouched.\n STOP (1) will attempt to stop the container with `docker stop` (useful for debugging).\n RM (2) will stop the container and then forcefully remove it from the system\n using `docker rm -f`. This is the default behavior if defer is set to None.\n \"\"\"\n if parameters is None:\n parameters = []\n if workDir is None:\n workDir = os.getcwd()\n\n # Setup the outgoing subprocess call for docker\n baseDockerCall = ['docker', 'run']\n if dockerParameters:\n baseDockerCall += dockerParameters\n else:\n baseDockerCall += ['--rm', '--log-driver', 'none', '-v',\n os.path.abspath(workDir) + ':/data']\n\n # Ensure the user has passed a valid value for defer\n require(defer in (None, FORGO, STOP, RM),\n 'Please provide a valid value for defer.')\n\n # Get container name which is needed for _dockerKill\n try:\n if any('--name' in x for x in baseDockerCall):\n if any('--name=' in x for x in baseDockerCall):\n containerName = [x.split('=')[1] for x in baseDockerCall if '--name' in x][0]\n else:\n containerName = baseDockerCall[baseDockerCall.index('--name') + 1]\n else:\n containerName = _getContainerName(job)\n except ValueError:\n containerName = _getContainerName(job)\n baseDockerCall.extend(['--name', containerName])\n except IndexError:\n raise RuntimeError(\"Couldn't parse Docker's `--name=` option, check parameters: \" + str(dockerParameters))\n\n # Defer the container on-exit action\n if '--rm' in baseDockerCall and defer is None:\n defer = RM\n if '--rm' in baseDockerCall and defer is not RM:\n _logger.warn('--rm being passed to docker call but defer not set to dockerCall.RM, defer set to: ' + str(defer))\n job.defer(_dockerKill, containerName, action=defer)\n # Defer the permission fixing function which will run after this job concludes.\n # We call this explicitly later on in this function, but we defer it as well to handle unexpected job failure.\n job.defer(_fixPermissions, tool, workDir)\n\n # Make subprocess call\n\n # If parameters is list of lists, treat each list as separate command and chain with pipes\n if len(parameters) > 0 and type(parameters[0]) is list:\n # When piping, all arguments now get merged into a single string to bash -c.\n # We try to support spaces in paths by wrapping them all in quotes first.\n chain_params = [' '.join(p) for p in [map(pipes.quote, q) for q in parameters]]\n call = baseDockerCall + ['--entrypoint', '/bin/bash', tool, '-c', ' | '.join(chain_params)]\n else:\n call = baseDockerCall + [tool] + parameters\n _logger.info(\"Calling docker with \" + repr(call))\n\n if outfile:\n subprocess.check_call(call, stdout=outfile)\n else:\n if checkOutput:\n return subprocess.check_output(call)\n else:\n subprocess.check_call(call)\n\n\nFORGO = 0\nSTOP = 1\nRM = 2\n\n\ndef _dockerKill(containerName, action):\n \"\"\"\n Kills the specified container.\n :param str containerName: The name of the container created by docker_call\n :param int action: What action should be taken on the container? See `defer=` in\n :func:`docker_call`\n \"\"\"\n running = _containerIsRunning(containerName)\n if running is None:\n # This means that the container doesn't exist. We will see this if the container was run\n # with --rm and has already exited before this call.\n _logger.info('The container with name \"%s\" appears to have already been removed. Nothing to '\n 'do.', containerName)\n else:\n if action in (None, FORGO):\n _logger.info('The container with name %s continues to exist as we were asked to forgo a '\n 'post-job action on it.', containerName)\n else:\n _logger.info('The container with name %s exists. Running user-specified defer functions.',\n containerName)\n if running and action >= STOP:\n _logger.info('Stopping container \"%s\".', containerName)\n subprocess.check_call(['docker', 'stop', containerName])\n else:\n _logger.info('The container \"%s\" was not found to be running.', containerName)\n if action >= RM:\n # If the container was run with --rm, then stop will most likely remove the\n # container. We first check if it is running then remove it.\n running = _containerIsRunning(containerName)\n if running is not None:\n _logger.info('Removing container \"%s\".', containerName)\n subprocess.check_call(['docker', 'rm', '-f', containerName])\n else:\n _logger.info('The container \"%s\" was not found on the system. Nothing to remove.',\n containerName)\n\n\ndef _fixPermissions(tool, workDir):\n \"\"\"\n Fix permission of a mounted Docker directory by reusing the tool to change ownership.\n Docker natively runs as a root inside the container, and files written to the\n mounted directory are implicitly owned by root.\n\n :param list baseDockerCall: Docker run parameters\n :param str tool: Name of tool\n :param str workDir: Path of work directory to recursively chown\n \"\"\"\n baseDockerCall = ['docker', 'run', '--log-driver=none',\n '-v', os.path.abspath(workDir) + ':/data', '--rm', '--entrypoint=chown']\n stat = os.stat(workDir)\n command = baseDockerCall + [tool] + ['-R', '{}:{}'.format(stat.st_uid, stat.st_gid), '/data']\n subprocess.check_call(command)\n\n\ndef _getContainerName(job):\n return '--'.join([str(job),\n job.fileStore.jobID,\n base64.b64encode(os.urandom(9), '-_')]).replace(\"'\", '').replace('_', '')\n\n\ndef _containerIsRunning(container_name):\n \"\"\"\n Checks whether the container is running or not.\n :param container_name: Name of the container being checked.\n :returns: True if running, False if not running, None if the container doesn't exist.\n :rtype: bool\n \"\"\"\n try:\n output = subprocess.check_output(['docker', 'inspect', '--format', '{{.State.Running}}',\n container_name]).strip()\n except subprocess.CalledProcessError:\n # This will be raised if the container didn't exist.\n _logger.debug(\"'docker inspect' failed. Assuming container %s doesn't exist.\", container_name,\n exc_info=True)\n return None\n if output == 'true':\n return True\n elif output == 'false':\n return False\n else:\n raise RuntimeError(\"Got unexpected value for State.Running (%s)\" % output)\n", "path": "src/toil/lib/docker.py" } ]
[ { "content": "\"\"\"\n Module for calling Docker. Assumes `docker` is on the PATH.\n\n Contains two user-facing functions: dockerCall and dockerCheckOutput\n\n Uses Toil's defer functionality to ensure containers are shutdown even in case of job or pipeline failure\n\n Example of using dockerCall in a Toil pipeline to index a FASTA file with SAMtools:\n def toil_job(job):\n work_dir = job.fileStore.getLocalTempDir()\n path = job.fileStore.readGlobalFile(ref_id, os.path.join(work_dir, 'ref.fasta')\n parameters = ['faidx', path]\n dockerCall(job, tool='quay.io/ucgc_cgl/samtools:latest', work_dir=work_dir, parameters=parameters)\n\"\"\"\nimport base64\nimport logging\nimport subprocess\nimport pipes\nimport os\nfrom bd2k.util.exceptions import require\n\n_logger = logging.getLogger(__name__)\n\n\ndef dockerCall(job,\n tool,\n parameters=None,\n workDir=None,\n dockerParameters=None,\n outfile=None,\n defer=None):\n \"\"\"\n Throws CalledProcessorError if the Docker invocation returns a non-zero exit code\n This function blocks until the subprocess call to Docker returns\n\n :param toil.Job.job job: The Job instance for the calling function.\n :param str tool: Name of the Docker image to be used (e.g. quay.io/ucsc_cgl/samtools:latest).\n :param list[str] parameters: Command line arguments to be passed to the tool.\n If list of lists: list[list[str]], then treat as successive commands chained with pipe.\n :param str workDir: Directory to mount into the container via `-v`. Destination convention is /data\n :param list[str] dockerParameters: Parameters to pass to Docker. Default parameters are `--rm`,\n `--log-driver none`, and the mountpoint `-v work_dir:/data` where /data is the destination convention.\n These defaults are removed if docker_parmaters is passed, so be sure to pass them if they are desired.\n :param file outfile: Pipe output of Docker call to file handle\n :param int defer: What action should be taken on the container upon job completion?\n FORGO (0) will leave the container untouched.\n STOP (1) will attempt to stop the container with `docker stop` (useful for debugging).\n RM (2) will stop the container and then forcefully remove it from the system\n using `docker rm -f`. This is the default behavior if defer is set to None.\n \"\"\"\n _docker(job, tool=tool, parameters=parameters, workDir=workDir, dockerParameters=dockerParameters,\n outfile=outfile, checkOutput=False, defer=defer)\n\n\ndef dockerCheckOutput(job,\n tool,\n parameters=None,\n workDir=None,\n dockerParameters=None,\n defer=None):\n \"\"\"\n Returns the stdout from the Docker invocation (via subprocess.check_output)\n Throws CalledProcessorError if the Docker invocation returns a non-zero exit code\n This function blocks until the subprocess call to Docker returns\n\n :param toil.Job.job job: The Job instance for the calling function.\n :param str tool: Name of the Docker image to be used (e.g. quay.io/ucsc_cgl/samtools:latest).\n :param list[str] parameters: Command line arguments to be passed to the tool.\n If list of lists: list[list[str]], then treat as successive commands chained with pipe.\n :param str workDir: Directory to mount into the container via `-v`. Destination convention is /data\n :param list[str] dockerParameters: Parameters to pass to Docker. Default parameters are `--rm`,\n `--log-driver none`, and the mountpoint `-v work_dir:/data` where /data is the destination convention.\n These defaults are removed if docker_parmaters is passed, so be sure to pass them if they are desired.\n :param int defer: What action should be taken on the container upon job completion?\n FORGO (0) will leave the container untouched.\n STOP (1) will attempt to stop the container with `docker stop` (useful for debugging).\n RM (2) will stop the container and then forcefully remove it from the system\n using `docker rm -f`. This is the default behavior if defer is set to None.\n :returns: Stdout from the docker call\n :rtype: str\n \"\"\"\n return _docker(job, tool=tool, parameters=parameters, workDir=workDir,\n dockerParameters=dockerParameters, checkOutput=True, defer=defer)\n\n\ndef _docker(job,\n tool,\n parameters=None,\n workDir=None,\n dockerParameters=None,\n outfile=None,\n checkOutput=False,\n defer=None):\n \"\"\"\n :param toil.Job.job job: The Job instance for the calling function.\n :param str tool: Name of the Docker image to be used (e.g. quay.io/ucsc_cgl/samtools).\n :param list[str] parameters: Command line arguments to be passed to the tool.\n If list of lists: list[list[str]], then treat as successive commands chained with pipe.\n :param str workDir: Directory to mount into the container via `-v`. Destination convention is /data\n :param list[str] dockerParameters: Parameters to pass to Docker. Default parameters are `--rm`,\n `--log-driver none`, and the mountpoint `-v work_dir:/data` where /data is the destination convention.\n These defaults are removed if docker_parmaters is passed, so be sure to pass them if they are desired.\n :param file outfile: Pipe output of Docker call to file handle\n :param bool checkOutput: When True, this function returns docker's output.\n :param int defer: What action should be taken on the container upon job completion?\n FORGO (0) will leave the container untouched.\n STOP (1) will attempt to stop the container with `docker stop` (useful for debugging).\n RM (2) will stop the container and then forcefully remove it from the system\n using `docker rm -f`. This is the default behavior if defer is set to None.\n \"\"\"\n if parameters is None:\n parameters = []\n if workDir is None:\n workDir = os.getcwd()\n\n # Setup the outgoing subprocess call for docker\n baseDockerCall = ['docker', 'run']\n if dockerParameters:\n baseDockerCall += dockerParameters\n else:\n baseDockerCall += ['--rm', '--log-driver', 'none', '-v',\n os.path.abspath(workDir) + ':/data']\n\n # Ensure the user has passed a valid value for defer\n require(defer in (None, FORGO, STOP, RM),\n 'Please provide a valid value for defer.')\n\n # Get container name which is needed for _dockerKill\n try:\n if any('--name' in x for x in baseDockerCall):\n if any('--name=' in x for x in baseDockerCall):\n containerName = [x.split('=')[1] for x in baseDockerCall if '--name' in x][0]\n else:\n containerName = baseDockerCall[baseDockerCall.index('--name') + 1]\n else:\n containerName = _getContainerName(job)\n except ValueError:\n containerName = _getContainerName(job)\n baseDockerCall.extend(['--name', containerName])\n except IndexError:\n raise RuntimeError(\"Couldn't parse Docker's `--name=` option, check parameters: \" + str(dockerParameters))\n\n # Defer the container on-exit action\n if '--rm' in baseDockerCall and defer is None:\n defer = RM\n if '--rm' in baseDockerCall and defer is not RM:\n _logger.warn('--rm being passed to docker call but defer not set to dockerCall.RM, defer set to: ' + str(defer))\n job.defer(_dockerKill, containerName, action=defer)\n # Defer the permission fixing function which will run after this job concludes.\n # We call this explicitly later on in this function, but we defer it as well to handle unexpected job failure.\n job.defer(_fixPermissions, tool, workDir)\n\n # Make subprocess call\n\n # If parameters is list of lists, treat each list as separate command and chain with pipes\n if len(parameters) > 0 and type(parameters[0]) is list:\n # When piping, all arguments now get merged into a single string to bash -c.\n # We try to support spaces in paths by wrapping them all in quotes first.\n chain_params = [' '.join(p) for p in [map(pipes.quote, q) for q in parameters]]\n call = baseDockerCall + ['--entrypoint', '/bin/bash', tool, '-c', ' | '.join(chain_params)]\n else:\n call = baseDockerCall + [tool] + parameters\n _logger.info(\"Calling docker with \" + repr(call))\n\n if outfile:\n subprocess.check_call(call, stdout=outfile)\n else:\n if checkOutput:\n return subprocess.check_output(call)\n else:\n subprocess.check_call(call)\n\n\nFORGO = 0\nSTOP = 1\nRM = 2\n\n\ndef _dockerKill(containerName, action):\n \"\"\"\n Kills the specified container.\n :param str containerName: The name of the container created by docker_call\n :param int action: What action should be taken on the container? See `defer=` in\n :func:`docker_call`\n \"\"\"\n running = _containerIsRunning(containerName)\n if running is None:\n # This means that the container doesn't exist. We will see this if the container was run\n # with --rm and has already exited before this call.\n _logger.info('The container with name \"%s\" appears to have already been removed. Nothing to '\n 'do.', containerName)\n else:\n if action in (None, FORGO):\n _logger.info('The container with name %s continues to exist as we were asked to forgo a '\n 'post-job action on it.', containerName)\n else:\n _logger.info('The container with name %s exists. Running user-specified defer functions.',\n containerName)\n if running and action >= STOP:\n _logger.info('Stopping container \"%s\".', containerName)\n subprocess.check_call(['docker', 'stop', containerName])\n else:\n _logger.info('The container \"%s\" was not found to be running.', containerName)\n if action >= RM:\n # If the container was run with --rm, then stop will most likely remove the\n # container. We first check if it is running then remove it.\n running = _containerIsRunning(containerName)\n if running is not None:\n _logger.info('Removing container \"%s\".', containerName)\n subprocess.check_call(['docker', 'rm', '-f', containerName])\n else:\n _logger.info('The container \"%s\" was not found on the system. Nothing to remove.',\n containerName)\n\n\ndef _fixPermissions(tool, workDir):\n \"\"\"\n Fix permission of a mounted Docker directory by reusing the tool to change ownership.\n Docker natively runs as a root inside the container, and files written to the\n mounted directory are implicitly owned by root.\n\n :param list baseDockerCall: Docker run parameters\n :param str tool: Name of tool\n :param str workDir: Path of work directory to recursively chown\n \"\"\"\n baseDockerCall = ['docker', 'run', '--log-driver=none',\n '-v', os.path.abspath(workDir) + ':/data', '--rm', '--entrypoint=chown']\n stat = os.stat(workDir)\n command = baseDockerCall + [tool] + ['-R', '{}:{}'.format(stat.st_uid, stat.st_gid), '/data']\n subprocess.check_call(command)\n\n\ndef _getContainerName(job):\n return '--'.join([str(job),\n base64.b64encode(os.urandom(9), '-_')]).replace(\"'\", '').replace('_', '')\n\n\ndef _containerIsRunning(container_name):\n \"\"\"\n Checks whether the container is running or not.\n :param container_name: Name of the container being checked.\n :returns: True if running, False if not running, None if the container doesn't exist.\n :rtype: bool\n \"\"\"\n try:\n output = subprocess.check_output(['docker', 'inspect', '--format', '{{.State.Running}}',\n container_name]).strip()\n except subprocess.CalledProcessError:\n # This will be raised if the container didn't exist.\n _logger.debug(\"'docker inspect' failed. Assuming container %s doesn't exist.\", container_name,\n exc_info=True)\n return None\n if output == 'true':\n return True\n elif output == 'false':\n return False\n else:\n raise RuntimeError(\"Got unexpected value for State.Running (%s)\" % output)\n", "path": "src/toil/lib/docker.py" } ]
diff --git a/src/toil/lib/docker.py b/src/toil/lib/docker.py index c4b918c6ec..19d8a6f9b6 100644 --- a/src/toil/lib/docker.py +++ b/src/toil/lib/docker.py @@ -232,7 +232,6 @@ def _fixPermissions(tool, workDir): def _getContainerName(job): return '--'.join([str(job), - job.fileStore.jobID, base64.b64encode(os.urandom(9), '-_')]).replace("'", '').replace('_', '') diff --git a/src/toil/test/lib/dockerTest.py b/src/toil/test/lib/dockerTest.py index 1b951f9458..66537c26f4 100644 --- a/src/toil/test/lib/dockerTest.py +++ b/src/toil/test/lib/dockerTest.py @@ -30,7 +30,7 @@ class DockerTest(ToilTest): def setUp(self): self.tempDir = self._createTempDir(purpose='tempDir') - def testDockerClean(self): + def testDockerClean(self, caching=True): """ Run the test container that creates a file in the work dir, and sleeps for 5 minutes. Ensure that the calling job gets SIGKILLed after a minute, leaving behind the spooky/ghost/zombie @@ -57,6 +57,8 @@ def testDockerClean(self): options.logLevel = 'INFO' options.workDir = work_dir options.clean = 'always' + if not caching: + options.disableCaching = True for rm in (True, False): for detached in (True, False): if detached and rm: @@ -64,7 +66,6 @@ def testDockerClean(self): for defer in (FORGO, STOP, RM, None): # Not using base64 logic here since it might create a name starting with a `-`. container_name = uuid.uuid4().hex - print rm, detached, defer A = Job.wrapJobFn(_testDockerCleanFn, data_dir, detached, rm, defer, container_name) try: @@ -94,7 +95,7 @@ def testDockerClean(self): _dockerKill(container_name, RM) os.remove(test_file) - def testDockerPipeChain(self): + def testDockerPipeChain(self, caching=True): """ Test for piping API for dockerCall(). Using this API (activated when list of argument lists is given as parameters), commands a piped together into a chain @@ -105,10 +106,18 @@ def testDockerPipeChain(self): options.logLevel = 'INFO' options.workDir = self.tempDir options.clean = 'always' + if not caching: + options.disableCaching = True A = Job.wrapJobFn(_testDockerPipeChainFn) rv = Job.Runner.startToil(A, options) assert rv.strip() == '2' + def testNonCachingDockerChain(self): + self.testDockerPipeChain(caching=False) + + def testNonCachingDockerClean(self): + self.testDockerClean(caching=False) + def _testDockerCleanFn(job, workDir, detached=None, rm=None, defer=None, containerName=None): """ Test function for test docker_clean. Runs a container with given flags and then dies leaving
elastic__apm-agent-python-1397
No module named 'elasticapm.metrics.sets.transactions' fastapi: 0.70.0 elastic-apm: 6.6.2 Could not register elasticapm.metrics.sets.transactions.TransactionsMetricSet metricset: No module named 'elasticapm.metrics.sets.transactions' ``` File "elasticapm/contrib/starlette/__init__.py", line 70, in make_apm_client return client_cls(config, **defaults) File "elasticapm/base.py", line 199, in __init__ self._metrics.register(path) File "elasticapm/metrics/base_metrics.py", line 72, in register logger.warning("Could not register %s metricset: %s", class_path, compat.text_type(e)) File "__init__.py", line 1480, in warning self._log(WARNING, msg, args, **kwargs) File "__init__.py", line 1615, in _log self.handle(record) File "__init__.py", line 1625, in handle self.callHandlers(record) File "__init__.py", line 967, in handle self.emit(record) ```
[ { "content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nimport logging\nimport logging.handlers\nimport math\nimport os\nimport re\nimport socket\nimport threading\n\nfrom elasticapm.conf.constants import BASE_SANITIZE_FIELD_NAMES\nfrom elasticapm.utils import compat, starmatch_to_regex\nfrom elasticapm.utils.logging import get_logger\nfrom elasticapm.utils.threading import IntervalTimer, ThreadManager\n\n__all__ = (\"setup_logging\", \"Config\")\n\n\nlogger = get_logger(\"elasticapm.conf\")\n\nlog_levels_map = {\n \"trace\": 5,\n \"debug\": logging.DEBUG,\n \"info\": logging.INFO,\n \"warning\": logging.WARNING,\n \"warn\": logging.WARNING,\n \"error\": logging.ERROR,\n \"critical\": logging.CRITICAL,\n \"off\": 1000,\n}\nlogfile_set_up = False\n\n\nclass ConfigurationError(ValueError):\n def __init__(self, msg, field_name):\n self.field_name = field_name\n super(ValueError, self).__init__(msg)\n\n\nclass _ConfigValue(object):\n \"\"\"\n Base class for configuration values\n\n dict_key\n String representing the key used for this config value in dict configs.\n env_key\n String representing the key used in environment variables for this\n config value. If not specified, will be set to `\"ELASTIC_APM_\" + dict_key`.\n type\n Type of value stored in this config value.\n validators\n List of validator classes. Must be callables, which will be called with\n a value and the dict_key for the config value. The validator either\n returns the validated value or raises a ConfigurationError if validation\n fails.\n callbacks\n List of functions which will be called when the config value is updated.\n The callbacks must match this signature:\n callback(dict_key, old_value, new_value, config_instance)\n\n Note that callbacks wait until the end of any given `update()` operation\n and are called at this point. This, coupled with the fact that callbacks\n receive the config instance, means that callbacks can utilize multiple\n configuration values (such as is the case for logging). This is\n complicated if more than one of the involved config values are\n dynamic, as both would need callbacks and the callback would need to\n be idempotent.\n callbacks_on_default\n Whether the callback should be called on config initialization if the\n default value is used. Default: True\n default\n The default for this config value if not user-configured.\n required\n Whether this config value is required. If a default is specified,\n this is a redundant option (except to ensure that this config value\n is specified if a default were ever to be removed).\n\n Note that _ConfigValues and any inheriting classes must implement __set__\n and __get__. The calling instance will always be a _ConfigBase descendant\n and the __set__ and __get__ calls will access `instance._values[self.dict_key]`\n to get and set values.\n \"\"\"\n\n def __init__(\n self,\n dict_key,\n env_key=None,\n type=compat.text_type,\n validators=None,\n callbacks=None,\n callbacks_on_default=True,\n default=None,\n required=False,\n ):\n self.type = type\n self.dict_key = dict_key\n self.validators = validators\n self.callbacks = callbacks\n self.default = default\n self.required = required\n if env_key is None:\n env_key = \"ELASTIC_APM_\" + dict_key\n self.env_key = env_key\n self.callbacks_on_default = callbacks_on_default\n\n def __get__(self, instance, owner):\n if instance:\n return instance._values.get(self.dict_key, self.default)\n else:\n return self.default\n\n def __set__(self, config_instance, value):\n value = self._validate(config_instance, value)\n self._callback_if_changed(config_instance, value)\n config_instance._values[self.dict_key] = value\n\n def _validate(self, instance, value):\n if value is None and self.required:\n raise ConfigurationError(\n \"Configuration error: value for {} is required.\".format(self.dict_key), self.dict_key\n )\n if self.validators and value is not None:\n for validator in self.validators:\n value = validator(value, self.dict_key)\n if self.type and value is not None:\n try:\n value = self.type(value)\n except ValueError as e:\n raise ConfigurationError(\"{}: {}\".format(self.dict_key, compat.text_type(e)), self.dict_key)\n instance._errors.pop(self.dict_key, None)\n return value\n\n def _callback_if_changed(self, instance, new_value):\n \"\"\"\n If the value changed (checked against instance._values[self.dict_key]),\n then run the callback function (if defined)\n \"\"\"\n old_value = instance._values.get(self.dict_key, self.default)\n if old_value != new_value:\n instance.callbacks_queue.append((self.dict_key, old_value, new_value))\n\n def call_callbacks(self, old_value, new_value, config_instance):\n if not self.callbacks:\n return\n for callback in self.callbacks:\n try:\n callback(self.dict_key, old_value, new_value, config_instance)\n except Exception as e:\n raise ConfigurationError(\n \"Callback {} raised an exception when setting {} to {}: {}\".format(\n callback, self.dict_key, new_value, e\n ),\n self.dict_key,\n )\n\n\nclass _ListConfigValue(_ConfigValue):\n def __init__(self, dict_key, list_separator=\",\", **kwargs):\n self.list_separator = list_separator\n super(_ListConfigValue, self).__init__(dict_key, **kwargs)\n\n def __set__(self, instance, value):\n if isinstance(value, compat.string_types):\n value = value.split(self.list_separator)\n elif value is not None:\n value = list(value)\n if value:\n value = [self.type(item) for item in value]\n self._callback_if_changed(instance, value)\n instance._values[self.dict_key] = value\n\n\nclass _DictConfigValue(_ConfigValue):\n def __init__(self, dict_key, item_separator=\",\", keyval_separator=\"=\", **kwargs):\n self.item_separator = item_separator\n self.keyval_separator = keyval_separator\n super(_DictConfigValue, self).__init__(dict_key, **kwargs)\n\n def __set__(self, instance, value):\n if isinstance(value, compat.string_types):\n items = (item.split(self.keyval_separator) for item in value.split(self.item_separator))\n value = {key.strip(): self.type(val.strip()) for key, val in items}\n elif not isinstance(value, dict):\n # TODO: better error handling\n value = None\n self._callback_if_changed(instance, value)\n instance._values[self.dict_key] = value\n\n\nclass _BoolConfigValue(_ConfigValue):\n def __init__(self, dict_key, true_string=\"true\", false_string=\"false\", **kwargs):\n self.true_string = true_string\n self.false_string = false_string\n super(_BoolConfigValue, self).__init__(dict_key, **kwargs)\n\n def __set__(self, instance, value):\n if isinstance(value, compat.string_types):\n if value.lower() == self.true_string:\n value = True\n elif value.lower() == self.false_string:\n value = False\n self._callback_if_changed(instance, value)\n instance._values[self.dict_key] = bool(value)\n\n\nclass RegexValidator(object):\n def __init__(self, regex, verbose_pattern=None):\n self.regex = regex\n self.verbose_pattern = verbose_pattern or regex\n\n def __call__(self, value, field_name):\n value = compat.text_type(value)\n match = re.match(self.regex, value)\n if match:\n return value\n raise ConfigurationError(\"{} does not match pattern {}\".format(value, self.verbose_pattern), field_name)\n\n\nclass UnitValidator(object):\n def __init__(self, regex, verbose_pattern, unit_multipliers):\n self.regex = regex\n self.verbose_pattern = verbose_pattern\n self.unit_multipliers = unit_multipliers\n\n def __call__(self, value, field_name):\n value = compat.text_type(value)\n match = re.match(self.regex, value, re.IGNORECASE)\n if not match:\n raise ConfigurationError(\"{} does not match pattern {}\".format(value, self.verbose_pattern), field_name)\n val, unit = match.groups()\n try:\n val = int(val) * self.unit_multipliers[unit]\n except KeyError:\n raise ConfigurationError(\"{} is not a supported unit\".format(unit), field_name)\n return val\n\n\nclass PrecisionValidator(object):\n \"\"\"\n Forces a float value to `precision` digits of precision.\n\n Rounds half away from zero.\n\n If `minimum` is provided, and the value rounds to 0 (but was not zero to\n begin with), use the minimum instead.\n \"\"\"\n\n def __init__(self, precision=0, minimum=None):\n self.precision = precision\n self.minimum = minimum\n\n def __call__(self, value, field_name):\n try:\n value = float(value)\n except ValueError:\n raise ConfigurationError(\"{} is not a float\".format(value), field_name)\n multiplier = 10 ** self.precision\n rounded = math.floor(value * multiplier + 0.5) / multiplier\n if rounded == 0 and self.minimum and value != 0:\n rounded = self.minimum\n return rounded\n\n\nduration_validator = UnitValidator(r\"^((?:-)?\\d+)(ms|s|m)$\", r\"\\d+(ms|s|m)\", {\"ms\": 1, \"s\": 1000, \"m\": 60000})\nsize_validator = UnitValidator(\n r\"^(\\d+)(b|kb|mb|gb)$\", r\"\\d+(b|KB|MB|GB)\", {\"b\": 1, \"kb\": 1024, \"mb\": 1024 * 1024, \"gb\": 1024 * 1024 * 1024}\n)\n\n\nclass ExcludeRangeValidator(object):\n def __init__(self, range_start, range_end, range_desc):\n self.range_start = range_start\n self.range_end = range_end\n self.range_desc = range_desc\n\n def __call__(self, value, field_name):\n if self.range_start <= value <= self.range_end:\n raise ConfigurationError(\n \"{} cannot be in range: {}\".format(\n value, self.range_desc.format(**{\"range_start\": self.range_start, \"range_end\": self.range_end})\n ),\n field_name,\n )\n return value\n\n\nclass FileIsReadableValidator(object):\n def __call__(self, value, field_name):\n value = os.path.normpath(value)\n if not os.path.exists(value):\n raise ConfigurationError(\"{} does not exist\".format(value), field_name)\n elif not os.path.isfile(value):\n raise ConfigurationError(\"{} is not a file\".format(value), field_name)\n elif not os.access(value, os.R_OK):\n raise ConfigurationError(\"{} is not readable\".format(value), field_name)\n return value\n\n\nclass EnumerationValidator(object):\n \"\"\"\n Validator which ensures that a given config value is chosen from a list\n of valid string options.\n \"\"\"\n\n def __init__(self, valid_values, case_sensitive=False):\n \"\"\"\n valid_values\n List of valid string values for the config value\n case_sensitive\n Whether to compare case when comparing a value to the valid list.\n Defaults to False (case-insensitive)\n \"\"\"\n self.case_sensitive = case_sensitive\n if case_sensitive:\n self.valid_values = {s: s for s in valid_values}\n else:\n self.valid_values = {s.lower(): s for s in valid_values}\n\n def __call__(self, value, field_name):\n if self.case_sensitive:\n ret = self.valid_values.get(value)\n else:\n ret = self.valid_values.get(value.lower())\n if ret is None:\n raise ConfigurationError(\n \"{} is not in the list of valid values: {}\".format(value, list(self.valid_values.values())), field_name\n )\n return ret\n\n\ndef _log_level_callback(dict_key, old_value, new_value, config_instance):\n elasticapm_logger = logging.getLogger(\"elasticapm\")\n elasticapm_logger.setLevel(log_levels_map.get(new_value, 100))\n\n global logfile_set_up\n if not logfile_set_up and config_instance.log_file:\n logfile_set_up = True\n filehandler = logging.handlers.RotatingFileHandler(\n config_instance.log_file, maxBytes=config_instance.log_file_size, backupCount=1\n )\n try:\n import ecs_logging\n\n filehandler.setFormatter(ecs_logging.StdlibFormatter())\n except ImportError:\n pass\n elasticapm_logger.addHandler(filehandler)\n\n\ndef _log_ecs_reformatting_callback(dict_key, old_value, new_value, config_instance):\n \"\"\"\n If ecs_logging is installed and log_ecs_reformatting is set to \"override\", we should\n set the ecs_logging.StdlibFormatter as the formatted for every handler in\n the root logger, and set the default processor for structlog to the\n ecs_logging.StructlogFormatter.\n \"\"\"\n if new_value.lower() == \"override\":\n try:\n import ecs_logging\n except ImportError:\n return\n\n # Stdlib\n root_logger = logging.getLogger()\n formatter = ecs_logging.StdlibFormatter()\n for handler in root_logger.handlers:\n handler.setFormatter(formatter)\n\n # Structlog\n try:\n import structlog\n\n structlog.configure(processors=[ecs_logging.StructlogFormatter()])\n except ImportError:\n pass\n\n\nclass _ConfigBase(object):\n _NO_VALUE = object() # sentinel object\n\n def __init__(self, config_dict=None, env_dict=None, inline_dict=None, copy=False):\n \"\"\"\n config_dict\n Configuration dict as is common for frameworks such as flask and django.\n Keys match the _ConfigValue.dict_key (usually all caps)\n env_dict\n Environment variables dict. Keys match the _ConfigValue.env_key\n (usually \"ELASTIC_APM_\" + dict_key)\n inline_dict\n Any config passed in as kwargs to the Client object. Typically\n the keys match the names of the _ConfigValue variables in the Config\n object.\n copy\n Whether this object is being created to copy an existing Config\n object. If True, don't run the initial `update` (which would call\n callbacks if present)\n \"\"\"\n self._values = {}\n self._errors = {}\n self._dict_key_lookup = {}\n self.callbacks_queue = []\n for config_value in self.__class__.__dict__.values():\n if not isinstance(config_value, _ConfigValue):\n continue\n self._dict_key_lookup[config_value.dict_key] = config_value\n if not copy:\n self.update(config_dict, env_dict, inline_dict, initial=True)\n\n def update(self, config_dict=None, env_dict=None, inline_dict=None, initial=False):\n if config_dict is None:\n config_dict = {}\n if env_dict is None:\n env_dict = os.environ\n if inline_dict is None:\n inline_dict = {}\n for field, config_value in compat.iteritems(self.__class__.__dict__):\n if not isinstance(config_value, _ConfigValue):\n continue\n new_value = self._NO_VALUE\n # first check environment\n if config_value.env_key and config_value.env_key in env_dict:\n new_value = env_dict[config_value.env_key]\n # check the inline config\n elif field in inline_dict:\n new_value = inline_dict[field]\n # finally, check config dictionary\n elif config_value.dict_key in config_dict:\n new_value = config_dict[config_value.dict_key]\n # only set if new_value changed. We'll fall back to the field default if not.\n if new_value is not self._NO_VALUE:\n try:\n setattr(self, field, new_value)\n except ConfigurationError as e:\n self._errors[e.field_name] = str(e)\n # handle initial callbacks\n if (\n initial\n and config_value.callbacks_on_default\n and getattr(self, field) is not None\n and getattr(self, field) == config_value.default\n ):\n self.callbacks_queue.append((config_value.dict_key, self._NO_VALUE, config_value.default))\n # if a field has not been provided by any config source, we have to check separately if it is required\n if config_value.required and getattr(self, field) is None:\n self._errors[config_value.dict_key] = \"Configuration error: value for {} is required.\".format(\n config_value.dict_key\n )\n self.call_pending_callbacks()\n\n def call_pending_callbacks(self):\n \"\"\"\n Call callbacks for config options matching list of tuples:\n\n (dict_key, old_value, new_value)\n \"\"\"\n for dict_key, old_value, new_value in self.callbacks_queue:\n self._dict_key_lookup[dict_key].call_callbacks(old_value, new_value, self)\n self.callbacks_queue = []\n\n @property\n def values(self):\n return self._values\n\n @values.setter\n def values(self, values):\n self._values = values\n\n @property\n def errors(self):\n return self._errors\n\n def copy(self):\n c = self.__class__(copy=True)\n c._errors = {}\n c.values = self.values.copy()\n return c\n\n\nclass Config(_ConfigBase):\n service_name = _ConfigValue(\n \"SERVICE_NAME\", validators=[RegexValidator(\"^[a-zA-Z0-9 _-]+$\")], default=\"python_service\", required=True\n )\n service_node_name = _ConfigValue(\"SERVICE_NODE_NAME\")\n environment = _ConfigValue(\"ENVIRONMENT\")\n secret_token = _ConfigValue(\"SECRET_TOKEN\")\n api_key = _ConfigValue(\"API_KEY\")\n debug = _BoolConfigValue(\"DEBUG\", default=False)\n server_url = _ConfigValue(\"SERVER_URL\", default=\"http://localhost:8200\", required=True)\n server_cert = _ConfigValue(\"SERVER_CERT\", validators=[FileIsReadableValidator()])\n verify_server_cert = _BoolConfigValue(\"VERIFY_SERVER_CERT\", default=True)\n use_certifi = _BoolConfigValue(\"USE_CERTIFI\", default=True)\n include_paths = _ListConfigValue(\"INCLUDE_PATHS\")\n exclude_paths = _ListConfigValue(\"EXCLUDE_PATHS\", default=compat.get_default_library_patters())\n filter_exception_types = _ListConfigValue(\"FILTER_EXCEPTION_TYPES\")\n server_timeout = _ConfigValue(\n \"SERVER_TIMEOUT\",\n type=float,\n validators=[\n UnitValidator(r\"^((?:-)?\\d+)(ms|s|m)?$\", r\"\\d+(ms|s|m)\", {\"ms\": 0.001, \"s\": 1, \"m\": 60, None: 1000})\n ],\n default=5,\n )\n hostname = _ConfigValue(\"HOSTNAME\", default=socket.gethostname())\n auto_log_stacks = _BoolConfigValue(\"AUTO_LOG_STACKS\", default=True)\n transport_class = _ConfigValue(\"TRANSPORT_CLASS\", default=\"elasticapm.transport.http.Transport\", required=True)\n processors = _ListConfigValue(\n \"PROCESSORS\",\n default=[\n \"elasticapm.processors.sanitize_stacktrace_locals\",\n \"elasticapm.processors.sanitize_http_request_cookies\",\n \"elasticapm.processors.sanitize_http_response_cookies\",\n \"elasticapm.processors.sanitize_http_headers\",\n \"elasticapm.processors.sanitize_http_wsgi_env\",\n \"elasticapm.processors.sanitize_http_request_body\",\n ],\n )\n sanitize_field_names = _ListConfigValue(\n \"SANITIZE_FIELD_NAMES\", type=starmatch_to_regex, default=BASE_SANITIZE_FIELD_NAMES\n )\n metrics_sets = _ListConfigValue(\n \"METRICS_SETS\",\n default=[\n \"elasticapm.metrics.sets.cpu.CPUMetricSet\",\n \"elasticapm.metrics.sets.transactions.TransactionsMetricSet\",\n ],\n )\n metrics_interval = _ConfigValue(\n \"METRICS_INTERVAL\",\n type=int,\n validators=[duration_validator, ExcludeRangeValidator(1, 999, \"{range_start} - {range_end} ms\")],\n default=30000,\n )\n breakdown_metrics = _BoolConfigValue(\"BREAKDOWN_METRICS\", default=True)\n prometheus_metrics = _BoolConfigValue(\"PROMETHEUS_METRICS\", default=False)\n prometheus_metrics_prefix = _ConfigValue(\"PROMETHEUS_METRICS_PREFIX\", default=\"prometheus.metrics.\")\n disable_metrics = _ListConfigValue(\"DISABLE_METRICS\", type=starmatch_to_regex, default=[])\n central_config = _BoolConfigValue(\"CENTRAL_CONFIG\", default=True)\n api_request_size = _ConfigValue(\"API_REQUEST_SIZE\", type=int, validators=[size_validator], default=768 * 1024)\n api_request_time = _ConfigValue(\"API_REQUEST_TIME\", type=int, validators=[duration_validator], default=10 * 1000)\n transaction_sample_rate = _ConfigValue(\n \"TRANSACTION_SAMPLE_RATE\", type=float, validators=[PrecisionValidator(4, 0.0001)], default=1.0\n )\n transaction_max_spans = _ConfigValue(\"TRANSACTION_MAX_SPANS\", type=int, default=500)\n stack_trace_limit = _ConfigValue(\"STACK_TRACE_LIMIT\", type=int, default=500)\n span_frames_min_duration = _ConfigValue(\n \"SPAN_FRAMES_MIN_DURATION\",\n default=5,\n validators=[\n UnitValidator(r\"^((?:-)?\\d+)(ms|s|m)?$\", r\"\\d+(ms|s|m)\", {\"ms\": 1, \"s\": 1000, \"m\": 60000, None: 1})\n ],\n type=int,\n )\n span_compression_exact_match_max_duration = _ConfigValue(\n \"span_compression_exact_match_max_duration\",\n default=5,\n validators=[duration_validator],\n type=int,\n )\n span_compression_same_kind_max_duration = _ConfigValue(\n \"span_compression_exact_match_max_duration\",\n default=5,\n validators=[duration_validator],\n type=int,\n )\n collect_local_variables = _ConfigValue(\"COLLECT_LOCAL_VARIABLES\", default=\"errors\")\n source_lines_error_app_frames = _ConfigValue(\"SOURCE_LINES_ERROR_APP_FRAMES\", type=int, default=5)\n source_lines_error_library_frames = _ConfigValue(\"SOURCE_LINES_ERROR_LIBRARY_FRAMES\", type=int, default=5)\n source_lines_span_app_frames = _ConfigValue(\"SOURCE_LINES_SPAN_APP_FRAMES\", type=int, default=0)\n source_lines_span_library_frames = _ConfigValue(\"SOURCE_LINES_SPAN_LIBRARY_FRAMES\", type=int, default=0)\n local_var_max_length = _ConfigValue(\"LOCAL_VAR_MAX_LENGTH\", type=int, default=200)\n local_var_list_max_length = _ConfigValue(\"LOCAL_VAR_LIST_MAX_LENGTH\", type=int, default=10)\n local_var_dict_max_length = _ConfigValue(\"LOCAL_VAR_DICT_MAX_LENGTH\", type=int, default=10)\n capture_body = _ConfigValue(\n \"CAPTURE_BODY\",\n default=\"off\",\n validators=[lambda val, _: {\"errors\": \"error\", \"transactions\": \"transaction\"}.get(val, val)],\n )\n async_mode = _BoolConfigValue(\"ASYNC_MODE\", default=True)\n instrument_django_middleware = _BoolConfigValue(\"INSTRUMENT_DJANGO_MIDDLEWARE\", default=True)\n autoinsert_django_middleware = _BoolConfigValue(\"AUTOINSERT_DJANGO_MIDDLEWARE\", default=True)\n transactions_ignore_patterns = _ListConfigValue(\"TRANSACTIONS_IGNORE_PATTERNS\", default=[])\n transaction_ignore_urls = _ListConfigValue(\"TRANSACTION_IGNORE_URLS\", type=starmatch_to_regex, default=[])\n service_version = _ConfigValue(\"SERVICE_VERSION\")\n framework_name = _ConfigValue(\"FRAMEWORK_NAME\")\n framework_version = _ConfigValue(\"FRAMEWORK_VERSION\")\n global_labels = _DictConfigValue(\"GLOBAL_LABELS\")\n disable_send = _BoolConfigValue(\"DISABLE_SEND\", default=False)\n enabled = _BoolConfigValue(\"ENABLED\", default=True)\n recording = _BoolConfigValue(\"RECORDING\", default=True)\n instrument = _BoolConfigValue(\"INSTRUMENT\", default=True)\n enable_distributed_tracing = _BoolConfigValue(\"ENABLE_DISTRIBUTED_TRACING\", default=True)\n capture_headers = _BoolConfigValue(\"CAPTURE_HEADERS\", default=True)\n django_transaction_name_from_route = _BoolConfigValue(\"DJANGO_TRANSACTION_NAME_FROM_ROUTE\", default=False)\n disable_log_record_factory = _BoolConfigValue(\"DISABLE_LOG_RECORD_FACTORY\", default=False)\n use_elastic_traceparent_header = _BoolConfigValue(\"USE_ELASTIC_TRACEPARENT_HEADER\", default=True)\n use_elastic_excepthook = _BoolConfigValue(\"USE_ELASTIC_EXCEPTHOOK\", default=False)\n cloud_provider = _ConfigValue(\"CLOUD_PROVIDER\", default=True)\n log_level = _ConfigValue(\n \"LOG_LEVEL\",\n validators=[EnumerationValidator([\"trace\", \"debug\", \"info\", \"warning\", \"warn\", \"error\", \"critical\", \"off\"])],\n callbacks=[_log_level_callback],\n )\n log_file = _ConfigValue(\"LOG_FILE\", default=\"\")\n log_file_size = _ConfigValue(\"LOG_FILE_SIZE\", validators=[size_validator], type=int, default=50 * 1024 * 1024)\n log_ecs_reformatting = _ConfigValue(\n \"LOG_ECS_REFORMATTING\",\n validators=[EnumerationValidator([\"off\", \"override\"])],\n callbacks=[_log_ecs_reformatting_callback],\n default=\"off\",\n )\n\n @property\n def is_recording(self):\n if not self.enabled:\n return False\n else:\n return self.recording\n\n\nclass VersionedConfig(ThreadManager):\n \"\"\"\n A thin layer around Config that provides versioning\n \"\"\"\n\n __slots__ = (\n \"_config\",\n \"_version\",\n \"_first_config\",\n \"_first_version\",\n \"_lock\",\n \"transport\",\n \"_update_thread\",\n \"pid\",\n \"start_stop_order\",\n )\n\n def __init__(self, config_object, version, transport=None):\n \"\"\"\n Create a new VersionedConfig with an initial Config object\n :param config_object: the initial Config object\n :param version: a version identifier for the configuration\n \"\"\"\n self._config = self._first_config = config_object\n self._version = self._first_version = version\n self.transport = transport\n self._lock = threading.Lock()\n self._update_thread = None\n super(VersionedConfig, self).__init__()\n\n def update(self, version, **config):\n \"\"\"\n Update the configuration version\n :param version: version identifier for the new configuration\n :param config: a key/value map of new configuration\n :return: configuration errors, if any\n \"\"\"\n new_config = self._config.copy()\n\n # pass an empty env dict to ensure the environment doesn't get precedence\n new_config.update(inline_dict=config, env_dict={})\n if not new_config.errors:\n with self._lock:\n self._version = version\n self._config = new_config\n else:\n return new_config.errors\n\n def reset(self):\n \"\"\"\n Reset state to the original configuration\n\n Note that because ConfigurationValues can have callbacks, we need to\n note any differences between the original configuration and the most\n recent configuration and run any callbacks that might exist for those\n values.\n \"\"\"\n callbacks = []\n for key in compat.iterkeys(self._config.values):\n if key in self._first_config.values and self._config.values[key] != self._first_config.values[key]:\n callbacks.append((key, self._config.values[key], self._first_config.values[key]))\n\n with self._lock:\n self._version = self._first_version\n self._config = self._first_config\n\n self._config.callbacks_queue.extend(callbacks)\n self._config.call_pending_callbacks()\n\n @property\n def changed(self):\n return self._config != self._first_config\n\n def __getattr__(self, item):\n return getattr(self._config, item)\n\n def __setattr__(self, name, value):\n if name not in self.__slots__:\n setattr(self._config, name, value)\n else:\n super(VersionedConfig, self).__setattr__(name, value)\n\n @property\n def config_version(self):\n return self._version\n\n def update_config(self):\n if not self.transport:\n logger.warning(\"No transport set for config updates, skipping\")\n return\n logger.debug(\"Checking for new config...\")\n keys = {\"service\": {\"name\": self.service_name}}\n if self.environment:\n keys[\"service\"][\"environment\"] = self.environment\n new_version, new_config, next_run = self.transport.get_config(self.config_version, keys)\n if new_version and new_config:\n errors = self.update(new_version, **new_config)\n if errors:\n logger.error(\"Error applying new configuration: %s\", repr(errors))\n else:\n logger.info(\n \"Applied new remote configuration: %s\",\n \"; \".join(\n \"%s=%s\" % (compat.text_type(k), compat.text_type(v)) for k, v in compat.iteritems(new_config)\n ),\n )\n elif new_version == self.config_version:\n logger.debug(\"Remote config unchanged\")\n elif not new_config and self.changed:\n logger.debug(\"Remote config disappeared, resetting to original\")\n self.reset()\n\n return next_run\n\n def start_thread(self, pid=None):\n self._update_thread = IntervalTimer(\n self.update_config, 1, \"eapm conf updater\", daemon=True, evaluate_function_interval=True\n )\n self._update_thread.start()\n super(VersionedConfig, self).start_thread(pid=pid)\n\n def stop_thread(self):\n if self._update_thread:\n self._update_thread.cancel()\n self._update_thread = None\n\n\ndef setup_logging(handler):\n \"\"\"\n Configures logging to pipe to Elastic APM.\n\n For a typical Python install:\n\n >>> from elasticapm.handlers.logging import LoggingHandler\n >>> client = ElasticAPM(...)\n >>> setup_logging(LoggingHandler(client))\n\n Within Django:\n\n >>> from elasticapm.contrib.django.handlers import LoggingHandler\n >>> setup_logging(LoggingHandler())\n\n Returns a boolean based on if logging was configured or not.\n \"\"\"\n # TODO We should probably revisit this. Does it make more sense as\n # a method within the Client class? The Client object could easily\n # pass itself into LoggingHandler and we could eliminate args altogether.\n logger = logging.getLogger()\n if handler.__class__ in map(type, logger.handlers):\n return False\n\n logger.addHandler(handler)\n\n return True\n", "path": "elasticapm/conf/__init__.py" } ]
[ { "content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nimport logging\nimport logging.handlers\nimport math\nimport os\nimport re\nimport socket\nimport threading\n\nfrom elasticapm.conf.constants import BASE_SANITIZE_FIELD_NAMES\nfrom elasticapm.utils import compat, starmatch_to_regex\nfrom elasticapm.utils.logging import get_logger\nfrom elasticapm.utils.threading import IntervalTimer, ThreadManager\n\n__all__ = (\"setup_logging\", \"Config\")\n\n\nlogger = get_logger(\"elasticapm.conf\")\n\nlog_levels_map = {\n \"trace\": 5,\n \"debug\": logging.DEBUG,\n \"info\": logging.INFO,\n \"warning\": logging.WARNING,\n \"warn\": logging.WARNING,\n \"error\": logging.ERROR,\n \"critical\": logging.CRITICAL,\n \"off\": 1000,\n}\nlogfile_set_up = False\n\n\nclass ConfigurationError(ValueError):\n def __init__(self, msg, field_name):\n self.field_name = field_name\n super(ValueError, self).__init__(msg)\n\n\nclass _ConfigValue(object):\n \"\"\"\n Base class for configuration values\n\n dict_key\n String representing the key used for this config value in dict configs.\n env_key\n String representing the key used in environment variables for this\n config value. If not specified, will be set to `\"ELASTIC_APM_\" + dict_key`.\n type\n Type of value stored in this config value.\n validators\n List of validator classes. Must be callables, which will be called with\n a value and the dict_key for the config value. The validator either\n returns the validated value or raises a ConfigurationError if validation\n fails.\n callbacks\n List of functions which will be called when the config value is updated.\n The callbacks must match this signature:\n callback(dict_key, old_value, new_value, config_instance)\n\n Note that callbacks wait until the end of any given `update()` operation\n and are called at this point. This, coupled with the fact that callbacks\n receive the config instance, means that callbacks can utilize multiple\n configuration values (such as is the case for logging). This is\n complicated if more than one of the involved config values are\n dynamic, as both would need callbacks and the callback would need to\n be idempotent.\n callbacks_on_default\n Whether the callback should be called on config initialization if the\n default value is used. Default: True\n default\n The default for this config value if not user-configured.\n required\n Whether this config value is required. If a default is specified,\n this is a redundant option (except to ensure that this config value\n is specified if a default were ever to be removed).\n\n Note that _ConfigValues and any inheriting classes must implement __set__\n and __get__. The calling instance will always be a _ConfigBase descendant\n and the __set__ and __get__ calls will access `instance._values[self.dict_key]`\n to get and set values.\n \"\"\"\n\n def __init__(\n self,\n dict_key,\n env_key=None,\n type=compat.text_type,\n validators=None,\n callbacks=None,\n callbacks_on_default=True,\n default=None,\n required=False,\n ):\n self.type = type\n self.dict_key = dict_key\n self.validators = validators\n self.callbacks = callbacks\n self.default = default\n self.required = required\n if env_key is None:\n env_key = \"ELASTIC_APM_\" + dict_key\n self.env_key = env_key\n self.callbacks_on_default = callbacks_on_default\n\n def __get__(self, instance, owner):\n if instance:\n return instance._values.get(self.dict_key, self.default)\n else:\n return self.default\n\n def __set__(self, config_instance, value):\n value = self._validate(config_instance, value)\n self._callback_if_changed(config_instance, value)\n config_instance._values[self.dict_key] = value\n\n def _validate(self, instance, value):\n if value is None and self.required:\n raise ConfigurationError(\n \"Configuration error: value for {} is required.\".format(self.dict_key), self.dict_key\n )\n if self.validators and value is not None:\n for validator in self.validators:\n value = validator(value, self.dict_key)\n if self.type and value is not None:\n try:\n value = self.type(value)\n except ValueError as e:\n raise ConfigurationError(\"{}: {}\".format(self.dict_key, compat.text_type(e)), self.dict_key)\n instance._errors.pop(self.dict_key, None)\n return value\n\n def _callback_if_changed(self, instance, new_value):\n \"\"\"\n If the value changed (checked against instance._values[self.dict_key]),\n then run the callback function (if defined)\n \"\"\"\n old_value = instance._values.get(self.dict_key, self.default)\n if old_value != new_value:\n instance.callbacks_queue.append((self.dict_key, old_value, new_value))\n\n def call_callbacks(self, old_value, new_value, config_instance):\n if not self.callbacks:\n return\n for callback in self.callbacks:\n try:\n callback(self.dict_key, old_value, new_value, config_instance)\n except Exception as e:\n raise ConfigurationError(\n \"Callback {} raised an exception when setting {} to {}: {}\".format(\n callback, self.dict_key, new_value, e\n ),\n self.dict_key,\n )\n\n\nclass _ListConfigValue(_ConfigValue):\n def __init__(self, dict_key, list_separator=\",\", **kwargs):\n self.list_separator = list_separator\n super(_ListConfigValue, self).__init__(dict_key, **kwargs)\n\n def __set__(self, instance, value):\n if isinstance(value, compat.string_types):\n value = value.split(self.list_separator)\n elif value is not None:\n value = list(value)\n if value:\n value = [self.type(item) for item in value]\n self._callback_if_changed(instance, value)\n instance._values[self.dict_key] = value\n\n\nclass _DictConfigValue(_ConfigValue):\n def __init__(self, dict_key, item_separator=\",\", keyval_separator=\"=\", **kwargs):\n self.item_separator = item_separator\n self.keyval_separator = keyval_separator\n super(_DictConfigValue, self).__init__(dict_key, **kwargs)\n\n def __set__(self, instance, value):\n if isinstance(value, compat.string_types):\n items = (item.split(self.keyval_separator) for item in value.split(self.item_separator))\n value = {key.strip(): self.type(val.strip()) for key, val in items}\n elif not isinstance(value, dict):\n # TODO: better error handling\n value = None\n self._callback_if_changed(instance, value)\n instance._values[self.dict_key] = value\n\n\nclass _BoolConfigValue(_ConfigValue):\n def __init__(self, dict_key, true_string=\"true\", false_string=\"false\", **kwargs):\n self.true_string = true_string\n self.false_string = false_string\n super(_BoolConfigValue, self).__init__(dict_key, **kwargs)\n\n def __set__(self, instance, value):\n if isinstance(value, compat.string_types):\n if value.lower() == self.true_string:\n value = True\n elif value.lower() == self.false_string:\n value = False\n self._callback_if_changed(instance, value)\n instance._values[self.dict_key] = bool(value)\n\n\nclass RegexValidator(object):\n def __init__(self, regex, verbose_pattern=None):\n self.regex = regex\n self.verbose_pattern = verbose_pattern or regex\n\n def __call__(self, value, field_name):\n value = compat.text_type(value)\n match = re.match(self.regex, value)\n if match:\n return value\n raise ConfigurationError(\"{} does not match pattern {}\".format(value, self.verbose_pattern), field_name)\n\n\nclass UnitValidator(object):\n def __init__(self, regex, verbose_pattern, unit_multipliers):\n self.regex = regex\n self.verbose_pattern = verbose_pattern\n self.unit_multipliers = unit_multipliers\n\n def __call__(self, value, field_name):\n value = compat.text_type(value)\n match = re.match(self.regex, value, re.IGNORECASE)\n if not match:\n raise ConfigurationError(\"{} does not match pattern {}\".format(value, self.verbose_pattern), field_name)\n val, unit = match.groups()\n try:\n val = int(val) * self.unit_multipliers[unit]\n except KeyError:\n raise ConfigurationError(\"{} is not a supported unit\".format(unit), field_name)\n return val\n\n\nclass PrecisionValidator(object):\n \"\"\"\n Forces a float value to `precision` digits of precision.\n\n Rounds half away from zero.\n\n If `minimum` is provided, and the value rounds to 0 (but was not zero to\n begin with), use the minimum instead.\n \"\"\"\n\n def __init__(self, precision=0, minimum=None):\n self.precision = precision\n self.minimum = minimum\n\n def __call__(self, value, field_name):\n try:\n value = float(value)\n except ValueError:\n raise ConfigurationError(\"{} is not a float\".format(value), field_name)\n multiplier = 10 ** self.precision\n rounded = math.floor(value * multiplier + 0.5) / multiplier\n if rounded == 0 and self.minimum and value != 0:\n rounded = self.minimum\n return rounded\n\n\nduration_validator = UnitValidator(r\"^((?:-)?\\d+)(ms|s|m)$\", r\"\\d+(ms|s|m)\", {\"ms\": 1, \"s\": 1000, \"m\": 60000})\nsize_validator = UnitValidator(\n r\"^(\\d+)(b|kb|mb|gb)$\", r\"\\d+(b|KB|MB|GB)\", {\"b\": 1, \"kb\": 1024, \"mb\": 1024 * 1024, \"gb\": 1024 * 1024 * 1024}\n)\n\n\nclass ExcludeRangeValidator(object):\n def __init__(self, range_start, range_end, range_desc):\n self.range_start = range_start\n self.range_end = range_end\n self.range_desc = range_desc\n\n def __call__(self, value, field_name):\n if self.range_start <= value <= self.range_end:\n raise ConfigurationError(\n \"{} cannot be in range: {}\".format(\n value, self.range_desc.format(**{\"range_start\": self.range_start, \"range_end\": self.range_end})\n ),\n field_name,\n )\n return value\n\n\nclass FileIsReadableValidator(object):\n def __call__(self, value, field_name):\n value = os.path.normpath(value)\n if not os.path.exists(value):\n raise ConfigurationError(\"{} does not exist\".format(value), field_name)\n elif not os.path.isfile(value):\n raise ConfigurationError(\"{} is not a file\".format(value), field_name)\n elif not os.access(value, os.R_OK):\n raise ConfigurationError(\"{} is not readable\".format(value), field_name)\n return value\n\n\nclass EnumerationValidator(object):\n \"\"\"\n Validator which ensures that a given config value is chosen from a list\n of valid string options.\n \"\"\"\n\n def __init__(self, valid_values, case_sensitive=False):\n \"\"\"\n valid_values\n List of valid string values for the config value\n case_sensitive\n Whether to compare case when comparing a value to the valid list.\n Defaults to False (case-insensitive)\n \"\"\"\n self.case_sensitive = case_sensitive\n if case_sensitive:\n self.valid_values = {s: s for s in valid_values}\n else:\n self.valid_values = {s.lower(): s for s in valid_values}\n\n def __call__(self, value, field_name):\n if self.case_sensitive:\n ret = self.valid_values.get(value)\n else:\n ret = self.valid_values.get(value.lower())\n if ret is None:\n raise ConfigurationError(\n \"{} is not in the list of valid values: {}\".format(value, list(self.valid_values.values())), field_name\n )\n return ret\n\n\ndef _log_level_callback(dict_key, old_value, new_value, config_instance):\n elasticapm_logger = logging.getLogger(\"elasticapm\")\n elasticapm_logger.setLevel(log_levels_map.get(new_value, 100))\n\n global logfile_set_up\n if not logfile_set_up and config_instance.log_file:\n logfile_set_up = True\n filehandler = logging.handlers.RotatingFileHandler(\n config_instance.log_file, maxBytes=config_instance.log_file_size, backupCount=1\n )\n try:\n import ecs_logging\n\n filehandler.setFormatter(ecs_logging.StdlibFormatter())\n except ImportError:\n pass\n elasticapm_logger.addHandler(filehandler)\n\n\ndef _log_ecs_reformatting_callback(dict_key, old_value, new_value, config_instance):\n \"\"\"\n If ecs_logging is installed and log_ecs_reformatting is set to \"override\", we should\n set the ecs_logging.StdlibFormatter as the formatted for every handler in\n the root logger, and set the default processor for structlog to the\n ecs_logging.StructlogFormatter.\n \"\"\"\n if new_value.lower() == \"override\":\n try:\n import ecs_logging\n except ImportError:\n return\n\n # Stdlib\n root_logger = logging.getLogger()\n formatter = ecs_logging.StdlibFormatter()\n for handler in root_logger.handlers:\n handler.setFormatter(formatter)\n\n # Structlog\n try:\n import structlog\n\n structlog.configure(processors=[ecs_logging.StructlogFormatter()])\n except ImportError:\n pass\n\n\nclass _ConfigBase(object):\n _NO_VALUE = object() # sentinel object\n\n def __init__(self, config_dict=None, env_dict=None, inline_dict=None, copy=False):\n \"\"\"\n config_dict\n Configuration dict as is common for frameworks such as flask and django.\n Keys match the _ConfigValue.dict_key (usually all caps)\n env_dict\n Environment variables dict. Keys match the _ConfigValue.env_key\n (usually \"ELASTIC_APM_\" + dict_key)\n inline_dict\n Any config passed in as kwargs to the Client object. Typically\n the keys match the names of the _ConfigValue variables in the Config\n object.\n copy\n Whether this object is being created to copy an existing Config\n object. If True, don't run the initial `update` (which would call\n callbacks if present)\n \"\"\"\n self._values = {}\n self._errors = {}\n self._dict_key_lookup = {}\n self.callbacks_queue = []\n for config_value in self.__class__.__dict__.values():\n if not isinstance(config_value, _ConfigValue):\n continue\n self._dict_key_lookup[config_value.dict_key] = config_value\n if not copy:\n self.update(config_dict, env_dict, inline_dict, initial=True)\n\n def update(self, config_dict=None, env_dict=None, inline_dict=None, initial=False):\n if config_dict is None:\n config_dict = {}\n if env_dict is None:\n env_dict = os.environ\n if inline_dict is None:\n inline_dict = {}\n for field, config_value in compat.iteritems(self.__class__.__dict__):\n if not isinstance(config_value, _ConfigValue):\n continue\n new_value = self._NO_VALUE\n # first check environment\n if config_value.env_key and config_value.env_key in env_dict:\n new_value = env_dict[config_value.env_key]\n # check the inline config\n elif field in inline_dict:\n new_value = inline_dict[field]\n # finally, check config dictionary\n elif config_value.dict_key in config_dict:\n new_value = config_dict[config_value.dict_key]\n # only set if new_value changed. We'll fall back to the field default if not.\n if new_value is not self._NO_VALUE:\n try:\n setattr(self, field, new_value)\n except ConfigurationError as e:\n self._errors[e.field_name] = str(e)\n # handle initial callbacks\n if (\n initial\n and config_value.callbacks_on_default\n and getattr(self, field) is not None\n and getattr(self, field) == config_value.default\n ):\n self.callbacks_queue.append((config_value.dict_key, self._NO_VALUE, config_value.default))\n # if a field has not been provided by any config source, we have to check separately if it is required\n if config_value.required and getattr(self, field) is None:\n self._errors[config_value.dict_key] = \"Configuration error: value for {} is required.\".format(\n config_value.dict_key\n )\n self.call_pending_callbacks()\n\n def call_pending_callbacks(self):\n \"\"\"\n Call callbacks for config options matching list of tuples:\n\n (dict_key, old_value, new_value)\n \"\"\"\n for dict_key, old_value, new_value in self.callbacks_queue:\n self._dict_key_lookup[dict_key].call_callbacks(old_value, new_value, self)\n self.callbacks_queue = []\n\n @property\n def values(self):\n return self._values\n\n @values.setter\n def values(self, values):\n self._values = values\n\n @property\n def errors(self):\n return self._errors\n\n def copy(self):\n c = self.__class__(copy=True)\n c._errors = {}\n c.values = self.values.copy()\n return c\n\n\nclass Config(_ConfigBase):\n service_name = _ConfigValue(\n \"SERVICE_NAME\", validators=[RegexValidator(\"^[a-zA-Z0-9 _-]+$\")], default=\"python_service\", required=True\n )\n service_node_name = _ConfigValue(\"SERVICE_NODE_NAME\")\n environment = _ConfigValue(\"ENVIRONMENT\")\n secret_token = _ConfigValue(\"SECRET_TOKEN\")\n api_key = _ConfigValue(\"API_KEY\")\n debug = _BoolConfigValue(\"DEBUG\", default=False)\n server_url = _ConfigValue(\"SERVER_URL\", default=\"http://localhost:8200\", required=True)\n server_cert = _ConfigValue(\"SERVER_CERT\", validators=[FileIsReadableValidator()])\n verify_server_cert = _BoolConfigValue(\"VERIFY_SERVER_CERT\", default=True)\n use_certifi = _BoolConfigValue(\"USE_CERTIFI\", default=True)\n include_paths = _ListConfigValue(\"INCLUDE_PATHS\")\n exclude_paths = _ListConfigValue(\"EXCLUDE_PATHS\", default=compat.get_default_library_patters())\n filter_exception_types = _ListConfigValue(\"FILTER_EXCEPTION_TYPES\")\n server_timeout = _ConfigValue(\n \"SERVER_TIMEOUT\",\n type=float,\n validators=[\n UnitValidator(r\"^((?:-)?\\d+)(ms|s|m)?$\", r\"\\d+(ms|s|m)\", {\"ms\": 0.001, \"s\": 1, \"m\": 60, None: 1000})\n ],\n default=5,\n )\n hostname = _ConfigValue(\"HOSTNAME\", default=socket.gethostname())\n auto_log_stacks = _BoolConfigValue(\"AUTO_LOG_STACKS\", default=True)\n transport_class = _ConfigValue(\"TRANSPORT_CLASS\", default=\"elasticapm.transport.http.Transport\", required=True)\n processors = _ListConfigValue(\n \"PROCESSORS\",\n default=[\n \"elasticapm.processors.sanitize_stacktrace_locals\",\n \"elasticapm.processors.sanitize_http_request_cookies\",\n \"elasticapm.processors.sanitize_http_response_cookies\",\n \"elasticapm.processors.sanitize_http_headers\",\n \"elasticapm.processors.sanitize_http_wsgi_env\",\n \"elasticapm.processors.sanitize_http_request_body\",\n ],\n )\n sanitize_field_names = _ListConfigValue(\n \"SANITIZE_FIELD_NAMES\", type=starmatch_to_regex, default=BASE_SANITIZE_FIELD_NAMES\n )\n metrics_sets = _ListConfigValue(\n \"METRICS_SETS\",\n default=[\n \"elasticapm.metrics.sets.cpu.CPUMetricSet\",\n ],\n )\n metrics_interval = _ConfigValue(\n \"METRICS_INTERVAL\",\n type=int,\n validators=[duration_validator, ExcludeRangeValidator(1, 999, \"{range_start} - {range_end} ms\")],\n default=30000,\n )\n breakdown_metrics = _BoolConfigValue(\"BREAKDOWN_METRICS\", default=True)\n prometheus_metrics = _BoolConfigValue(\"PROMETHEUS_METRICS\", default=False)\n prometheus_metrics_prefix = _ConfigValue(\"PROMETHEUS_METRICS_PREFIX\", default=\"prometheus.metrics.\")\n disable_metrics = _ListConfigValue(\"DISABLE_METRICS\", type=starmatch_to_regex, default=[])\n central_config = _BoolConfigValue(\"CENTRAL_CONFIG\", default=True)\n api_request_size = _ConfigValue(\"API_REQUEST_SIZE\", type=int, validators=[size_validator], default=768 * 1024)\n api_request_time = _ConfigValue(\"API_REQUEST_TIME\", type=int, validators=[duration_validator], default=10 * 1000)\n transaction_sample_rate = _ConfigValue(\n \"TRANSACTION_SAMPLE_RATE\", type=float, validators=[PrecisionValidator(4, 0.0001)], default=1.0\n )\n transaction_max_spans = _ConfigValue(\"TRANSACTION_MAX_SPANS\", type=int, default=500)\n stack_trace_limit = _ConfigValue(\"STACK_TRACE_LIMIT\", type=int, default=500)\n span_frames_min_duration = _ConfigValue(\n \"SPAN_FRAMES_MIN_DURATION\",\n default=5,\n validators=[\n UnitValidator(r\"^((?:-)?\\d+)(ms|s|m)?$\", r\"\\d+(ms|s|m)\", {\"ms\": 1, \"s\": 1000, \"m\": 60000, None: 1})\n ],\n type=int,\n )\n span_compression_exact_match_max_duration = _ConfigValue(\n \"span_compression_exact_match_max_duration\",\n default=5,\n validators=[duration_validator],\n type=int,\n )\n span_compression_same_kind_max_duration = _ConfigValue(\n \"span_compression_exact_match_max_duration\",\n default=5,\n validators=[duration_validator],\n type=int,\n )\n collect_local_variables = _ConfigValue(\"COLLECT_LOCAL_VARIABLES\", default=\"errors\")\n source_lines_error_app_frames = _ConfigValue(\"SOURCE_LINES_ERROR_APP_FRAMES\", type=int, default=5)\n source_lines_error_library_frames = _ConfigValue(\"SOURCE_LINES_ERROR_LIBRARY_FRAMES\", type=int, default=5)\n source_lines_span_app_frames = _ConfigValue(\"SOURCE_LINES_SPAN_APP_FRAMES\", type=int, default=0)\n source_lines_span_library_frames = _ConfigValue(\"SOURCE_LINES_SPAN_LIBRARY_FRAMES\", type=int, default=0)\n local_var_max_length = _ConfigValue(\"LOCAL_VAR_MAX_LENGTH\", type=int, default=200)\n local_var_list_max_length = _ConfigValue(\"LOCAL_VAR_LIST_MAX_LENGTH\", type=int, default=10)\n local_var_dict_max_length = _ConfigValue(\"LOCAL_VAR_DICT_MAX_LENGTH\", type=int, default=10)\n capture_body = _ConfigValue(\n \"CAPTURE_BODY\",\n default=\"off\",\n validators=[lambda val, _: {\"errors\": \"error\", \"transactions\": \"transaction\"}.get(val, val)],\n )\n async_mode = _BoolConfigValue(\"ASYNC_MODE\", default=True)\n instrument_django_middleware = _BoolConfigValue(\"INSTRUMENT_DJANGO_MIDDLEWARE\", default=True)\n autoinsert_django_middleware = _BoolConfigValue(\"AUTOINSERT_DJANGO_MIDDLEWARE\", default=True)\n transactions_ignore_patterns = _ListConfigValue(\"TRANSACTIONS_IGNORE_PATTERNS\", default=[])\n transaction_ignore_urls = _ListConfigValue(\"TRANSACTION_IGNORE_URLS\", type=starmatch_to_regex, default=[])\n service_version = _ConfigValue(\"SERVICE_VERSION\")\n framework_name = _ConfigValue(\"FRAMEWORK_NAME\")\n framework_version = _ConfigValue(\"FRAMEWORK_VERSION\")\n global_labels = _DictConfigValue(\"GLOBAL_LABELS\")\n disable_send = _BoolConfigValue(\"DISABLE_SEND\", default=False)\n enabled = _BoolConfigValue(\"ENABLED\", default=True)\n recording = _BoolConfigValue(\"RECORDING\", default=True)\n instrument = _BoolConfigValue(\"INSTRUMENT\", default=True)\n enable_distributed_tracing = _BoolConfigValue(\"ENABLE_DISTRIBUTED_TRACING\", default=True)\n capture_headers = _BoolConfigValue(\"CAPTURE_HEADERS\", default=True)\n django_transaction_name_from_route = _BoolConfigValue(\"DJANGO_TRANSACTION_NAME_FROM_ROUTE\", default=False)\n disable_log_record_factory = _BoolConfigValue(\"DISABLE_LOG_RECORD_FACTORY\", default=False)\n use_elastic_traceparent_header = _BoolConfigValue(\"USE_ELASTIC_TRACEPARENT_HEADER\", default=True)\n use_elastic_excepthook = _BoolConfigValue(\"USE_ELASTIC_EXCEPTHOOK\", default=False)\n cloud_provider = _ConfigValue(\"CLOUD_PROVIDER\", default=True)\n log_level = _ConfigValue(\n \"LOG_LEVEL\",\n validators=[EnumerationValidator([\"trace\", \"debug\", \"info\", \"warning\", \"warn\", \"error\", \"critical\", \"off\"])],\n callbacks=[_log_level_callback],\n )\n log_file = _ConfigValue(\"LOG_FILE\", default=\"\")\n log_file_size = _ConfigValue(\"LOG_FILE_SIZE\", validators=[size_validator], type=int, default=50 * 1024 * 1024)\n log_ecs_reformatting = _ConfigValue(\n \"LOG_ECS_REFORMATTING\",\n validators=[EnumerationValidator([\"off\", \"override\"])],\n callbacks=[_log_ecs_reformatting_callback],\n default=\"off\",\n )\n\n @property\n def is_recording(self):\n if not self.enabled:\n return False\n else:\n return self.recording\n\n\nclass VersionedConfig(ThreadManager):\n \"\"\"\n A thin layer around Config that provides versioning\n \"\"\"\n\n __slots__ = (\n \"_config\",\n \"_version\",\n \"_first_config\",\n \"_first_version\",\n \"_lock\",\n \"transport\",\n \"_update_thread\",\n \"pid\",\n \"start_stop_order\",\n )\n\n def __init__(self, config_object, version, transport=None):\n \"\"\"\n Create a new VersionedConfig with an initial Config object\n :param config_object: the initial Config object\n :param version: a version identifier for the configuration\n \"\"\"\n self._config = self._first_config = config_object\n self._version = self._first_version = version\n self.transport = transport\n self._lock = threading.Lock()\n self._update_thread = None\n super(VersionedConfig, self).__init__()\n\n def update(self, version, **config):\n \"\"\"\n Update the configuration version\n :param version: version identifier for the new configuration\n :param config: a key/value map of new configuration\n :return: configuration errors, if any\n \"\"\"\n new_config = self._config.copy()\n\n # pass an empty env dict to ensure the environment doesn't get precedence\n new_config.update(inline_dict=config, env_dict={})\n if not new_config.errors:\n with self._lock:\n self._version = version\n self._config = new_config\n else:\n return new_config.errors\n\n def reset(self):\n \"\"\"\n Reset state to the original configuration\n\n Note that because ConfigurationValues can have callbacks, we need to\n note any differences between the original configuration and the most\n recent configuration and run any callbacks that might exist for those\n values.\n \"\"\"\n callbacks = []\n for key in compat.iterkeys(self._config.values):\n if key in self._first_config.values and self._config.values[key] != self._first_config.values[key]:\n callbacks.append((key, self._config.values[key], self._first_config.values[key]))\n\n with self._lock:\n self._version = self._first_version\n self._config = self._first_config\n\n self._config.callbacks_queue.extend(callbacks)\n self._config.call_pending_callbacks()\n\n @property\n def changed(self):\n return self._config != self._first_config\n\n def __getattr__(self, item):\n return getattr(self._config, item)\n\n def __setattr__(self, name, value):\n if name not in self.__slots__:\n setattr(self._config, name, value)\n else:\n super(VersionedConfig, self).__setattr__(name, value)\n\n @property\n def config_version(self):\n return self._version\n\n def update_config(self):\n if not self.transport:\n logger.warning(\"No transport set for config updates, skipping\")\n return\n logger.debug(\"Checking for new config...\")\n keys = {\"service\": {\"name\": self.service_name}}\n if self.environment:\n keys[\"service\"][\"environment\"] = self.environment\n new_version, new_config, next_run = self.transport.get_config(self.config_version, keys)\n if new_version and new_config:\n errors = self.update(new_version, **new_config)\n if errors:\n logger.error(\"Error applying new configuration: %s\", repr(errors))\n else:\n logger.info(\n \"Applied new remote configuration: %s\",\n \"; \".join(\n \"%s=%s\" % (compat.text_type(k), compat.text_type(v)) for k, v in compat.iteritems(new_config)\n ),\n )\n elif new_version == self.config_version:\n logger.debug(\"Remote config unchanged\")\n elif not new_config and self.changed:\n logger.debug(\"Remote config disappeared, resetting to original\")\n self.reset()\n\n return next_run\n\n def start_thread(self, pid=None):\n self._update_thread = IntervalTimer(\n self.update_config, 1, \"eapm conf updater\", daemon=True, evaluate_function_interval=True\n )\n self._update_thread.start()\n super(VersionedConfig, self).start_thread(pid=pid)\n\n def stop_thread(self):\n if self._update_thread:\n self._update_thread.cancel()\n self._update_thread = None\n\n\ndef setup_logging(handler):\n \"\"\"\n Configures logging to pipe to Elastic APM.\n\n For a typical Python install:\n\n >>> from elasticapm.handlers.logging import LoggingHandler\n >>> client = ElasticAPM(...)\n >>> setup_logging(LoggingHandler(client))\n\n Within Django:\n\n >>> from elasticapm.contrib.django.handlers import LoggingHandler\n >>> setup_logging(LoggingHandler())\n\n Returns a boolean based on if logging was configured or not.\n \"\"\"\n # TODO We should probably revisit this. Does it make more sense as\n # a method within the Client class? The Client object could easily\n # pass itself into LoggingHandler and we could eliminate args altogether.\n logger = logging.getLogger()\n if handler.__class__ in map(type, logger.handlers):\n return False\n\n logger.addHandler(handler)\n\n return True\n", "path": "elasticapm/conf/__init__.py" } ]
diff --git a/elasticapm/conf/__init__.py b/elasticapm/conf/__init__.py index 25f13b4ec..3c7c97326 100644 --- a/elasticapm/conf/__init__.py +++ b/elasticapm/conf/__init__.py @@ -550,7 +550,6 @@ class Config(_ConfigBase): "METRICS_SETS", default=[ "elasticapm.metrics.sets.cpu.CPUMetricSet", - "elasticapm.metrics.sets.transactions.TransactionsMetricSet", ], ) metrics_interval = _ConfigValue(
rasterio__rasterio-778
Copy colormap when rasters are merged I'm running `rio merge` over a few single band images that contain a colormap. During the merge, the colormap is not copied to the new raster. Can we modify `rio merge` to preserve the colormap? I have an initial pass of this change at: https://github.com/kapadia/rasterio/tree/rio-merge-colormap
[ { "content": "\"\"\"Merge command.\"\"\"\n\nimport logging\n\nimport click\nfrom cligj import files_inout_arg, format_opt\n\nfrom .helpers import resolve_inout\nfrom . import options\nimport rasterio\n\n\[email protected](short_help=\"Merge a stack of raster datasets.\")\n@files_inout_arg\[email protected]_opt\n@format_opt\[email protected]_opt\[email protected]_opt\[email protected]_opt\[email protected]_overwrite_opt\[email protected]('--precision', type=int, default=7,\n help=\"Number of decimal places of precision in alignment of \"\n \"pixels\")\[email protected]_options\[email protected]_context\ndef merge(ctx, files, output, driver, bounds, res, nodata, force_overwrite,\n precision, creation_options):\n \"\"\"Copy valid pixels from input files to an output file.\n\n All files must have the same number of bands, data type, and\n coordinate reference system.\n\n Input files are merged in their listed order using the reverse\n painter's algorithm. If the output file exists, its values will be\n overwritten by input values.\n\n Geospatial bounds and resolution of a new output file in the\n units of the input file coordinate reference system may be provided\n and are otherwise taken from the first input file.\n\n Note: --res changed from 2 parameters in 0.25.\n\n \\b\n --res 0.1 0.1 => --res 0.1 (square)\n --res 0.1 0.2 => --res 0.1 --res 0.2 (rectangular)\n \"\"\"\n from rasterio.merge import merge as merge_tool\n\n verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1\n\n output, files = resolve_inout(\n files=files, output=output, force_overwrite=force_overwrite)\n\n with rasterio.Env(CPL_DEBUG=verbosity > 2):\n sources = [rasterio.open(f) for f in files]\n dest, output_transform = merge_tool(sources, bounds=bounds, res=res,\n nodata=nodata, precision=precision)\n\n profile = sources[0].profile\n profile.pop('affine')\n profile['transform'] = output_transform\n profile['height'] = dest.shape[1]\n profile['width'] = dest.shape[2]\n profile['driver'] = driver\n\n profile.update(**creation_options)\n\n with rasterio.open(output, 'w', **profile) as dst:\n dst.write(dest)\n", "path": "rasterio/rio/merge.py" } ]
[ { "content": "\"\"\"Merge command.\"\"\"\n\nimport logging\n\nimport click\nfrom cligj import files_inout_arg, format_opt\n\nfrom .helpers import resolve_inout\nfrom . import options\nimport rasterio\n\n\[email protected](short_help=\"Merge a stack of raster datasets.\")\n@files_inout_arg\[email protected]_opt\n@format_opt\[email protected]_opt\[email protected]_opt\[email protected]_opt\[email protected]_overwrite_opt\[email protected]('--precision', type=int, default=7,\n help=\"Number of decimal places of precision in alignment of \"\n \"pixels\")\[email protected]_options\[email protected]_context\ndef merge(ctx, files, output, driver, bounds, res, nodata, force_overwrite,\n precision, creation_options):\n \"\"\"Copy valid pixels from input files to an output file.\n\n All files must have the same number of bands, data type, and\n coordinate reference system.\n\n Input files are merged in their listed order using the reverse\n painter's algorithm. If the output file exists, its values will be\n overwritten by input values.\n\n Geospatial bounds and resolution of a new output file in the\n units of the input file coordinate reference system may be provided\n and are otherwise taken from the first input file.\n\n Note: --res changed from 2 parameters in 0.25.\n\n \\b\n --res 0.1 0.1 => --res 0.1 (square)\n --res 0.1 0.2 => --res 0.1 --res 0.2 (rectangular)\n \"\"\"\n from rasterio.merge import merge as merge_tool\n\n verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1\n\n output, files = resolve_inout(\n files=files, output=output, force_overwrite=force_overwrite)\n\n with rasterio.Env(CPL_DEBUG=verbosity > 2):\n sources = [rasterio.open(f) for f in files]\n dest, output_transform = merge_tool(sources, bounds=bounds, res=res,\n nodata=nodata, precision=precision)\n\n profile = sources[0].profile\n profile.pop('affine')\n profile['transform'] = output_transform\n profile['height'] = dest.shape[1]\n profile['width'] = dest.shape[2]\n profile['driver'] = driver\n\n profile.update(**creation_options)\n\n with rasterio.open(output, 'w', **profile) as dst:\n dst.write(dest)\n\n # uses the colormap in the first input raster.\n try:\n colormap = sources[0].colormap(1)\n dst.write_colormap(1, colormap)\n except ValueError:\n pass\n", "path": "rasterio/rio/merge.py" } ]
diff --git a/rasterio/rio/merge.py b/rasterio/rio/merge.py index d583bcbf1..d44c67138 100644 --- a/rasterio/rio/merge.py +++ b/rasterio/rio/merge.py @@ -67,3 +67,10 @@ def merge(ctx, files, output, driver, bounds, res, nodata, force_overwrite, with rasterio.open(output, 'w', **profile) as dst: dst.write(dest) + + # uses the colormap in the first input raster. + try: + colormap = sources[0].colormap(1) + dst.write_colormap(1, colormap) + except ValueError: + pass diff --git a/tests/test_rio_merge.py b/tests/test_rio_merge.py index b2ce1da01..d643bb9b6 100644 --- a/tests/test_rio_merge.py +++ b/tests/test_rio_merge.py @@ -67,6 +67,26 @@ def test_data_dir_2(tmpdir): return tmpdir +def test_merge_with_colormap(test_data_dir_1): + outputname = str(test_data_dir_1.join('merged.tif')) + inputs = [str(x) for x in test_data_dir_1.listdir()] + inputs.sort() + + # Add a colormap to the first input prior merge + with rasterio.open(inputs[0], 'r+') as src: + src.write_colormap(1, {0: (255, 0, 0, 255), 255: (0, 0, 0, 0)}) + + runner = CliRunner() + result = runner.invoke(merge, inputs + [outputname]) + assert result.exit_code == 0 + assert os.path.exists(outputname) + + with rasterio.open(outputname) as out: + cmap = out.colormap(1) + assert cmap[0] == (255, 0, 0, 255) + assert cmap[255] == (0, 0, 0, 255) + + def test_merge_with_nodata(test_data_dir_1): outputname = str(test_data_dir_1.join('merged.tif')) inputs = [str(x) for x in test_data_dir_1.listdir()]
docker__docker-py-1576
Can't Create/Update an replicated service with replicas = 0, bug finded I try to update a service with the following code: ``` service.update( name=service.name, mode=docker.types.ServiceMode('replicated', replicas=0), networks=networks_list, ) ``` First, I want to update the replicas only but I have to put name and network or the api think I want to change them (?) then send me an error. Second, the ServiceMode object created here don't work and looks like this : `{'replicated': {}}` The reason for this is https://github.com/docker/docker-py/blob/master/docker/types/services.py#L410 where the if should be an if exist. I would love to do a pull request but i'm a newbie and I'm already lost with my first one : #1568 Thx for everything
[ { "content": "import six\n\nfrom .. import errors\nfrom ..constants import IS_WINDOWS_PLATFORM\nfrom ..utils import check_resource, format_environment, split_command\n\n\nclass TaskTemplate(dict):\n \"\"\"\n Describe the task specification to be used when creating or updating a\n service.\n\n Args:\n\n container_spec (ContainerSpec): Container settings for containers\n started as part of this task.\n log_driver (DriverConfig): Log configuration for containers created as\n part of the service.\n resources (Resources): Resource requirements which apply to each\n individual container created as part of the service.\n restart_policy (RestartPolicy): Specification for the restart policy\n which applies to containers created as part of this service.\n placement (:py:class:`list`): A list of constraints.\n force_update (int): A counter that triggers an update even if no\n relevant parameters have been changed.\n \"\"\"\n def __init__(self, container_spec, resources=None, restart_policy=None,\n placement=None, log_driver=None, force_update=None):\n self['ContainerSpec'] = container_spec\n if resources:\n self['Resources'] = resources\n if restart_policy:\n self['RestartPolicy'] = restart_policy\n if placement:\n if isinstance(placement, list):\n placement = {'Constraints': placement}\n self['Placement'] = placement\n if log_driver:\n self['LogDriver'] = log_driver\n\n if force_update is not None:\n if not isinstance(force_update, int):\n raise TypeError('force_update must be an integer')\n self['ForceUpdate'] = force_update\n\n @property\n def container_spec(self):\n return self.get('ContainerSpec')\n\n @property\n def resources(self):\n return self.get('Resources')\n\n @property\n def restart_policy(self):\n return self.get('RestartPolicy')\n\n @property\n def placement(self):\n return self.get('Placement')\n\n\nclass ContainerSpec(dict):\n \"\"\"\n Describes the behavior of containers that are part of a task, and is used\n when declaring a :py:class:`~docker.types.TaskTemplate`.\n\n Args:\n\n image (string): The image name to use for the container.\n command (string or list): The command to be run in the image.\n args (:py:class:`list`): Arguments to the command.\n hostname (string): The hostname to set on the container.\n env (dict): Environment variables.\n dir (string): The working directory for commands to run in.\n user (string): The user inside the container.\n labels (dict): A map of labels to associate with the service.\n mounts (:py:class:`list`): A list of specifications for mounts to be\n added to containers created as part of the service. See the\n :py:class:`~docker.types.Mount` class for details.\n stop_grace_period (int): Amount of time to wait for the container to\n terminate before forcefully killing it.\n secrets (list of py:class:`SecretReference`): List of secrets to be\n made available inside the containers.\n \"\"\"\n def __init__(self, image, command=None, args=None, hostname=None, env=None,\n workdir=None, user=None, labels=None, mounts=None,\n stop_grace_period=None, secrets=None):\n self['Image'] = image\n\n if isinstance(command, six.string_types):\n command = split_command(command)\n self['Command'] = command\n self['Args'] = args\n\n if hostname is not None:\n self['Hostname'] = hostname\n if env is not None:\n if isinstance(env, dict):\n self['Env'] = format_environment(env)\n else:\n self['Env'] = env\n if workdir is not None:\n self['Dir'] = workdir\n if user is not None:\n self['User'] = user\n if labels is not None:\n self['Labels'] = labels\n if mounts is not None:\n for mount in mounts:\n if isinstance(mount, six.string_types):\n mounts.append(Mount.parse_mount_string(mount))\n mounts.remove(mount)\n self['Mounts'] = mounts\n if stop_grace_period is not None:\n self['StopGracePeriod'] = stop_grace_period\n\n if secrets is not None:\n if not isinstance(secrets, list):\n raise TypeError('secrets must be a list')\n self['Secrets'] = secrets\n\n\nclass Mount(dict):\n \"\"\"\n Describes a mounted folder's configuration inside a container. A list of\n :py:class:`Mount`s would be used as part of a\n :py:class:`~docker.types.ContainerSpec`.\n\n Args:\n\n target (string): Container path.\n source (string): Mount source (e.g. a volume name or a host path).\n type (string): The mount type (``bind`` or ``volume``).\n Default: ``volume``.\n read_only (bool): Whether the mount should be read-only.\n propagation (string): A propagation mode with the value ``[r]private``,\n ``[r]shared``, or ``[r]slave``. Only valid for the ``bind`` type.\n no_copy (bool): False if the volume should be populated with the data\n from the target. Default: ``False``. Only valid for the ``volume``\n type.\n labels (dict): User-defined name and labels for the volume. Only valid\n for the ``volume`` type.\n driver_config (DriverConfig): Volume driver configuration. Only valid\n for the ``volume`` type.\n \"\"\"\n def __init__(self, target, source, type='volume', read_only=False,\n propagation=None, no_copy=False, labels=None,\n driver_config=None):\n self['Target'] = target\n self['Source'] = source\n if type not in ('bind', 'volume'):\n raise errors.InvalidArgument(\n 'Only acceptable mount types are `bind` and `volume`.'\n )\n self['Type'] = type\n self['ReadOnly'] = read_only\n\n if type == 'bind':\n if propagation is not None:\n self['BindOptions'] = {\n 'Propagation': propagation\n }\n if any([labels, driver_config, no_copy]):\n raise errors.InvalidArgument(\n 'Mount type is binding but volume options have been '\n 'provided.'\n )\n else:\n volume_opts = {}\n if no_copy:\n volume_opts['NoCopy'] = True\n if labels:\n volume_opts['Labels'] = labels\n if driver_config:\n volume_opts['DriverConfig'] = driver_config\n if volume_opts:\n self['VolumeOptions'] = volume_opts\n if propagation:\n raise errors.InvalidArgument(\n 'Mount type is volume but `propagation` argument has been '\n 'provided.'\n )\n\n @classmethod\n def parse_mount_string(cls, string):\n parts = string.split(':')\n if len(parts) > 3:\n raise errors.InvalidArgument(\n 'Invalid mount format \"{0}\"'.format(string)\n )\n if len(parts) == 1:\n return cls(target=parts[0], source=None)\n else:\n target = parts[1]\n source = parts[0]\n mount_type = 'volume'\n if source.startswith('/') or (\n IS_WINDOWS_PLATFORM and source[0].isalpha() and\n source[1] == ':'\n ):\n # FIXME: That windows condition will fail earlier since we\n # split on ':'. We should look into doing a smarter split\n # if we detect we are on Windows.\n mount_type = 'bind'\n read_only = not (len(parts) == 2 or parts[2] == 'rw')\n return cls(target, source, read_only=read_only, type=mount_type)\n\n\nclass Resources(dict):\n \"\"\"\n Configures resource allocation for containers when made part of a\n :py:class:`~docker.types.ContainerSpec`.\n\n Args:\n\n cpu_limit (int): CPU limit in units of 10^9 CPU shares.\n mem_limit (int): Memory limit in Bytes.\n cpu_reservation (int): CPU reservation in units of 10^9 CPU shares.\n mem_reservation (int): Memory reservation in Bytes.\n \"\"\"\n def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None,\n mem_reservation=None):\n limits = {}\n reservation = {}\n if cpu_limit is not None:\n limits['NanoCPUs'] = cpu_limit\n if mem_limit is not None:\n limits['MemoryBytes'] = mem_limit\n if cpu_reservation is not None:\n reservation['NanoCPUs'] = cpu_reservation\n if mem_reservation is not None:\n reservation['MemoryBytes'] = mem_reservation\n\n if limits:\n self['Limits'] = limits\n if reservation:\n self['Reservations'] = reservation\n\n\nclass UpdateConfig(dict):\n \"\"\"\n\n Used to specify the way container updates should be performed by a service.\n\n Args:\n\n parallelism (int): Maximum number of tasks to be updated in one\n iteration (0 means unlimited parallelism). Default: 0.\n delay (int): Amount of time between updates.\n failure_action (string): Action to take if an updated task fails to\n run, or stops running during the update. Acceptable values are\n ``continue`` and ``pause``. Default: ``continue``\n monitor (int): Amount of time to monitor each updated task for\n failures, in nanoseconds.\n max_failure_ratio (float): The fraction of tasks that may fail during\n an update before the failure action is invoked, specified as a\n floating point number between 0 and 1. Default: 0\n \"\"\"\n def __init__(self, parallelism=0, delay=None, failure_action='continue',\n monitor=None, max_failure_ratio=None):\n self['Parallelism'] = parallelism\n if delay is not None:\n self['Delay'] = delay\n if failure_action not in ('pause', 'continue'):\n raise errors.InvalidArgument(\n 'failure_action must be either `pause` or `continue`.'\n )\n self['FailureAction'] = failure_action\n\n if monitor is not None:\n if not isinstance(monitor, int):\n raise TypeError('monitor must be an integer')\n self['Monitor'] = monitor\n\n if max_failure_ratio is not None:\n if not isinstance(max_failure_ratio, (float, int)):\n raise TypeError('max_failure_ratio must be a float')\n if max_failure_ratio > 1 or max_failure_ratio < 0:\n raise errors.InvalidArgument(\n 'max_failure_ratio must be a number between 0 and 1'\n )\n self['MaxFailureRatio'] = max_failure_ratio\n\n\nclass RestartConditionTypesEnum(object):\n _values = (\n 'none',\n 'on-failure',\n 'any',\n )\n NONE, ON_FAILURE, ANY = _values\n\n\nclass RestartPolicy(dict):\n \"\"\"\n Used when creating a :py:class:`~docker.types.ContainerSpec`,\n dictates whether a container should restart after stopping or failing.\n\n Args:\n\n condition (string): Condition for restart (``none``, ``on-failure``,\n or ``any``). Default: `none`.\n delay (int): Delay between restart attempts. Default: 0\n attempts (int): Maximum attempts to restart a given container before\n giving up. Default value is 0, which is ignored.\n window (int): Time window used to evaluate the restart policy. Default\n value is 0, which is unbounded.\n \"\"\"\n\n condition_types = RestartConditionTypesEnum\n\n def __init__(self, condition=RestartConditionTypesEnum.NONE, delay=0,\n max_attempts=0, window=0):\n if condition not in self.condition_types._values:\n raise TypeError(\n 'Invalid RestartPolicy condition {0}'.format(condition)\n )\n\n self['Condition'] = condition\n self['Delay'] = delay\n self['MaxAttempts'] = max_attempts\n self['Window'] = window\n\n\nclass DriverConfig(dict):\n \"\"\"\n Indicates which driver to use, as well as its configuration. Can be used\n as ``log_driver`` in a :py:class:`~docker.types.ContainerSpec`,\n and for the `driver_config` in a volume\n :py:class:`~docker.types.Mount`.\n\n Args:\n\n name (string): Name of the driver to use.\n options (dict): Driver-specific options. Default: ``None``.\n \"\"\"\n def __init__(self, name, options=None):\n self['Name'] = name\n if options:\n self['Options'] = options\n\n\nclass EndpointSpec(dict):\n \"\"\"\n Describes properties to access and load-balance a service.\n\n Args:\n\n mode (string): The mode of resolution to use for internal load\n balancing between tasks (``'vip'`` or ``'dnsrr'``). Defaults to\n ``'vip'`` if not provided.\n ports (dict): Exposed ports that this service is accessible on from the\n outside, in the form of ``{ target_port: published_port }`` or\n ``{ target_port: (published_port, protocol) }``. Ports can only be\n provided if the ``vip`` resolution mode is used.\n \"\"\"\n def __init__(self, mode=None, ports=None):\n if ports:\n self['Ports'] = convert_service_ports(ports)\n if mode:\n self['Mode'] = mode\n\n\ndef convert_service_ports(ports):\n if isinstance(ports, list):\n return ports\n if not isinstance(ports, dict):\n raise TypeError(\n 'Invalid type for ports, expected dict or list'\n )\n\n result = []\n for k, v in six.iteritems(ports):\n port_spec = {\n 'Protocol': 'tcp',\n 'PublishedPort': k\n }\n\n if isinstance(v, tuple):\n port_spec['TargetPort'] = v[0]\n if len(v) == 2:\n port_spec['Protocol'] = v[1]\n else:\n port_spec['TargetPort'] = v\n\n result.append(port_spec)\n return result\n\n\nclass ServiceMode(dict):\n \"\"\"\n Indicate whether a service should be deployed as a replicated or global\n service, and associated parameters\n\n Args:\n mode (string): Can be either ``replicated`` or ``global``\n replicas (int): Number of replicas. For replicated services only.\n \"\"\"\n def __init__(self, mode, replicas=None):\n if mode not in ('replicated', 'global'):\n raise errors.InvalidArgument(\n 'mode must be either \"replicated\" or \"global\"'\n )\n if mode != 'replicated' and replicas is not None:\n raise errors.InvalidArgument(\n 'replicas can only be used for replicated mode'\n )\n self[mode] = {}\n if replicas:\n self[mode]['Replicas'] = replicas\n\n @property\n def mode(self):\n if 'global' in self:\n return 'global'\n return 'replicated'\n\n @property\n def replicas(self):\n if self.mode != 'replicated':\n return None\n return self['replicated'].get('Replicas')\n\n\nclass SecretReference(dict):\n \"\"\"\n Secret reference to be used as part of a :py:class:`ContainerSpec`.\n Describes how a secret is made accessible inside the service's\n containers.\n\n Args:\n secret_id (string): Secret's ID\n secret_name (string): Secret's name as defined at its creation.\n filename (string): Name of the file containing the secret. Defaults\n to the secret's name if not specified.\n uid (string): UID of the secret file's owner. Default: 0\n gid (string): GID of the secret file's group. Default: 0\n mode (int): File access mode inside the container. Default: 0o444\n \"\"\"\n @check_resource\n def __init__(self, secret_id, secret_name, filename=None, uid=None,\n gid=None, mode=0o444):\n self['SecretName'] = secret_name\n self['SecretID'] = secret_id\n self['File'] = {\n 'Name': filename or secret_name,\n 'UID': uid or '0',\n 'GID': gid or '0',\n 'Mode': mode\n }\n", "path": "docker/types/services.py" } ]
[ { "content": "import six\n\nfrom .. import errors\nfrom ..constants import IS_WINDOWS_PLATFORM\nfrom ..utils import check_resource, format_environment, split_command\n\n\nclass TaskTemplate(dict):\n \"\"\"\n Describe the task specification to be used when creating or updating a\n service.\n\n Args:\n\n container_spec (ContainerSpec): Container settings for containers\n started as part of this task.\n log_driver (DriverConfig): Log configuration for containers created as\n part of the service.\n resources (Resources): Resource requirements which apply to each\n individual container created as part of the service.\n restart_policy (RestartPolicy): Specification for the restart policy\n which applies to containers created as part of this service.\n placement (:py:class:`list`): A list of constraints.\n force_update (int): A counter that triggers an update even if no\n relevant parameters have been changed.\n \"\"\"\n def __init__(self, container_spec, resources=None, restart_policy=None,\n placement=None, log_driver=None, force_update=None):\n self['ContainerSpec'] = container_spec\n if resources:\n self['Resources'] = resources\n if restart_policy:\n self['RestartPolicy'] = restart_policy\n if placement:\n if isinstance(placement, list):\n placement = {'Constraints': placement}\n self['Placement'] = placement\n if log_driver:\n self['LogDriver'] = log_driver\n\n if force_update is not None:\n if not isinstance(force_update, int):\n raise TypeError('force_update must be an integer')\n self['ForceUpdate'] = force_update\n\n @property\n def container_spec(self):\n return self.get('ContainerSpec')\n\n @property\n def resources(self):\n return self.get('Resources')\n\n @property\n def restart_policy(self):\n return self.get('RestartPolicy')\n\n @property\n def placement(self):\n return self.get('Placement')\n\n\nclass ContainerSpec(dict):\n \"\"\"\n Describes the behavior of containers that are part of a task, and is used\n when declaring a :py:class:`~docker.types.TaskTemplate`.\n\n Args:\n\n image (string): The image name to use for the container.\n command (string or list): The command to be run in the image.\n args (:py:class:`list`): Arguments to the command.\n hostname (string): The hostname to set on the container.\n env (dict): Environment variables.\n dir (string): The working directory for commands to run in.\n user (string): The user inside the container.\n labels (dict): A map of labels to associate with the service.\n mounts (:py:class:`list`): A list of specifications for mounts to be\n added to containers created as part of the service. See the\n :py:class:`~docker.types.Mount` class for details.\n stop_grace_period (int): Amount of time to wait for the container to\n terminate before forcefully killing it.\n secrets (list of py:class:`SecretReference`): List of secrets to be\n made available inside the containers.\n \"\"\"\n def __init__(self, image, command=None, args=None, hostname=None, env=None,\n workdir=None, user=None, labels=None, mounts=None,\n stop_grace_period=None, secrets=None):\n self['Image'] = image\n\n if isinstance(command, six.string_types):\n command = split_command(command)\n self['Command'] = command\n self['Args'] = args\n\n if hostname is not None:\n self['Hostname'] = hostname\n if env is not None:\n if isinstance(env, dict):\n self['Env'] = format_environment(env)\n else:\n self['Env'] = env\n if workdir is not None:\n self['Dir'] = workdir\n if user is not None:\n self['User'] = user\n if labels is not None:\n self['Labels'] = labels\n if mounts is not None:\n for mount in mounts:\n if isinstance(mount, six.string_types):\n mounts.append(Mount.parse_mount_string(mount))\n mounts.remove(mount)\n self['Mounts'] = mounts\n if stop_grace_period is not None:\n self['StopGracePeriod'] = stop_grace_period\n\n if secrets is not None:\n if not isinstance(secrets, list):\n raise TypeError('secrets must be a list')\n self['Secrets'] = secrets\n\n\nclass Mount(dict):\n \"\"\"\n Describes a mounted folder's configuration inside a container. A list of\n :py:class:`Mount`s would be used as part of a\n :py:class:`~docker.types.ContainerSpec`.\n\n Args:\n\n target (string): Container path.\n source (string): Mount source (e.g. a volume name or a host path).\n type (string): The mount type (``bind`` or ``volume``).\n Default: ``volume``.\n read_only (bool): Whether the mount should be read-only.\n propagation (string): A propagation mode with the value ``[r]private``,\n ``[r]shared``, or ``[r]slave``. Only valid for the ``bind`` type.\n no_copy (bool): False if the volume should be populated with the data\n from the target. Default: ``False``. Only valid for the ``volume``\n type.\n labels (dict): User-defined name and labels for the volume. Only valid\n for the ``volume`` type.\n driver_config (DriverConfig): Volume driver configuration. Only valid\n for the ``volume`` type.\n \"\"\"\n def __init__(self, target, source, type='volume', read_only=False,\n propagation=None, no_copy=False, labels=None,\n driver_config=None):\n self['Target'] = target\n self['Source'] = source\n if type not in ('bind', 'volume'):\n raise errors.InvalidArgument(\n 'Only acceptable mount types are `bind` and `volume`.'\n )\n self['Type'] = type\n self['ReadOnly'] = read_only\n\n if type == 'bind':\n if propagation is not None:\n self['BindOptions'] = {\n 'Propagation': propagation\n }\n if any([labels, driver_config, no_copy]):\n raise errors.InvalidArgument(\n 'Mount type is binding but volume options have been '\n 'provided.'\n )\n else:\n volume_opts = {}\n if no_copy:\n volume_opts['NoCopy'] = True\n if labels:\n volume_opts['Labels'] = labels\n if driver_config:\n volume_opts['DriverConfig'] = driver_config\n if volume_opts:\n self['VolumeOptions'] = volume_opts\n if propagation:\n raise errors.InvalidArgument(\n 'Mount type is volume but `propagation` argument has been '\n 'provided.'\n )\n\n @classmethod\n def parse_mount_string(cls, string):\n parts = string.split(':')\n if len(parts) > 3:\n raise errors.InvalidArgument(\n 'Invalid mount format \"{0}\"'.format(string)\n )\n if len(parts) == 1:\n return cls(target=parts[0], source=None)\n else:\n target = parts[1]\n source = parts[0]\n mount_type = 'volume'\n if source.startswith('/') or (\n IS_WINDOWS_PLATFORM and source[0].isalpha() and\n source[1] == ':'\n ):\n # FIXME: That windows condition will fail earlier since we\n # split on ':'. We should look into doing a smarter split\n # if we detect we are on Windows.\n mount_type = 'bind'\n read_only = not (len(parts) == 2 or parts[2] == 'rw')\n return cls(target, source, read_only=read_only, type=mount_type)\n\n\nclass Resources(dict):\n \"\"\"\n Configures resource allocation for containers when made part of a\n :py:class:`~docker.types.ContainerSpec`.\n\n Args:\n\n cpu_limit (int): CPU limit in units of 10^9 CPU shares.\n mem_limit (int): Memory limit in Bytes.\n cpu_reservation (int): CPU reservation in units of 10^9 CPU shares.\n mem_reservation (int): Memory reservation in Bytes.\n \"\"\"\n def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None,\n mem_reservation=None):\n limits = {}\n reservation = {}\n if cpu_limit is not None:\n limits['NanoCPUs'] = cpu_limit\n if mem_limit is not None:\n limits['MemoryBytes'] = mem_limit\n if cpu_reservation is not None:\n reservation['NanoCPUs'] = cpu_reservation\n if mem_reservation is not None:\n reservation['MemoryBytes'] = mem_reservation\n\n if limits:\n self['Limits'] = limits\n if reservation:\n self['Reservations'] = reservation\n\n\nclass UpdateConfig(dict):\n \"\"\"\n\n Used to specify the way container updates should be performed by a service.\n\n Args:\n\n parallelism (int): Maximum number of tasks to be updated in one\n iteration (0 means unlimited parallelism). Default: 0.\n delay (int): Amount of time between updates.\n failure_action (string): Action to take if an updated task fails to\n run, or stops running during the update. Acceptable values are\n ``continue`` and ``pause``. Default: ``continue``\n monitor (int): Amount of time to monitor each updated task for\n failures, in nanoseconds.\n max_failure_ratio (float): The fraction of tasks that may fail during\n an update before the failure action is invoked, specified as a\n floating point number between 0 and 1. Default: 0\n \"\"\"\n def __init__(self, parallelism=0, delay=None, failure_action='continue',\n monitor=None, max_failure_ratio=None):\n self['Parallelism'] = parallelism\n if delay is not None:\n self['Delay'] = delay\n if failure_action not in ('pause', 'continue'):\n raise errors.InvalidArgument(\n 'failure_action must be either `pause` or `continue`.'\n )\n self['FailureAction'] = failure_action\n\n if monitor is not None:\n if not isinstance(monitor, int):\n raise TypeError('monitor must be an integer')\n self['Monitor'] = monitor\n\n if max_failure_ratio is not None:\n if not isinstance(max_failure_ratio, (float, int)):\n raise TypeError('max_failure_ratio must be a float')\n if max_failure_ratio > 1 or max_failure_ratio < 0:\n raise errors.InvalidArgument(\n 'max_failure_ratio must be a number between 0 and 1'\n )\n self['MaxFailureRatio'] = max_failure_ratio\n\n\nclass RestartConditionTypesEnum(object):\n _values = (\n 'none',\n 'on-failure',\n 'any',\n )\n NONE, ON_FAILURE, ANY = _values\n\n\nclass RestartPolicy(dict):\n \"\"\"\n Used when creating a :py:class:`~docker.types.ContainerSpec`,\n dictates whether a container should restart after stopping or failing.\n\n Args:\n\n condition (string): Condition for restart (``none``, ``on-failure``,\n or ``any``). Default: `none`.\n delay (int): Delay between restart attempts. Default: 0\n attempts (int): Maximum attempts to restart a given container before\n giving up. Default value is 0, which is ignored.\n window (int): Time window used to evaluate the restart policy. Default\n value is 0, which is unbounded.\n \"\"\"\n\n condition_types = RestartConditionTypesEnum\n\n def __init__(self, condition=RestartConditionTypesEnum.NONE, delay=0,\n max_attempts=0, window=0):\n if condition not in self.condition_types._values:\n raise TypeError(\n 'Invalid RestartPolicy condition {0}'.format(condition)\n )\n\n self['Condition'] = condition\n self['Delay'] = delay\n self['MaxAttempts'] = max_attempts\n self['Window'] = window\n\n\nclass DriverConfig(dict):\n \"\"\"\n Indicates which driver to use, as well as its configuration. Can be used\n as ``log_driver`` in a :py:class:`~docker.types.ContainerSpec`,\n and for the `driver_config` in a volume\n :py:class:`~docker.types.Mount`.\n\n Args:\n\n name (string): Name of the driver to use.\n options (dict): Driver-specific options. Default: ``None``.\n \"\"\"\n def __init__(self, name, options=None):\n self['Name'] = name\n if options:\n self['Options'] = options\n\n\nclass EndpointSpec(dict):\n \"\"\"\n Describes properties to access and load-balance a service.\n\n Args:\n\n mode (string): The mode of resolution to use for internal load\n balancing between tasks (``'vip'`` or ``'dnsrr'``). Defaults to\n ``'vip'`` if not provided.\n ports (dict): Exposed ports that this service is accessible on from the\n outside, in the form of ``{ target_port: published_port }`` or\n ``{ target_port: (published_port, protocol) }``. Ports can only be\n provided if the ``vip`` resolution mode is used.\n \"\"\"\n def __init__(self, mode=None, ports=None):\n if ports:\n self['Ports'] = convert_service_ports(ports)\n if mode:\n self['Mode'] = mode\n\n\ndef convert_service_ports(ports):\n if isinstance(ports, list):\n return ports\n if not isinstance(ports, dict):\n raise TypeError(\n 'Invalid type for ports, expected dict or list'\n )\n\n result = []\n for k, v in six.iteritems(ports):\n port_spec = {\n 'Protocol': 'tcp',\n 'PublishedPort': k\n }\n\n if isinstance(v, tuple):\n port_spec['TargetPort'] = v[0]\n if len(v) == 2:\n port_spec['Protocol'] = v[1]\n else:\n port_spec['TargetPort'] = v\n\n result.append(port_spec)\n return result\n\n\nclass ServiceMode(dict):\n \"\"\"\n Indicate whether a service should be deployed as a replicated or global\n service, and associated parameters\n\n Args:\n mode (string): Can be either ``replicated`` or ``global``\n replicas (int): Number of replicas. For replicated services only.\n \"\"\"\n def __init__(self, mode, replicas=None):\n if mode not in ('replicated', 'global'):\n raise errors.InvalidArgument(\n 'mode must be either \"replicated\" or \"global\"'\n )\n if mode != 'replicated' and replicas is not None:\n raise errors.InvalidArgument(\n 'replicas can only be used for replicated mode'\n )\n self[mode] = {}\n if replicas is not None:\n self[mode]['Replicas'] = replicas\n\n @property\n def mode(self):\n if 'global' in self:\n return 'global'\n return 'replicated'\n\n @property\n def replicas(self):\n if self.mode != 'replicated':\n return None\n return self['replicated'].get('Replicas')\n\n\nclass SecretReference(dict):\n \"\"\"\n Secret reference to be used as part of a :py:class:`ContainerSpec`.\n Describes how a secret is made accessible inside the service's\n containers.\n\n Args:\n secret_id (string): Secret's ID\n secret_name (string): Secret's name as defined at its creation.\n filename (string): Name of the file containing the secret. Defaults\n to the secret's name if not specified.\n uid (string): UID of the secret file's owner. Default: 0\n gid (string): GID of the secret file's group. Default: 0\n mode (int): File access mode inside the container. Default: 0o444\n \"\"\"\n @check_resource\n def __init__(self, secret_id, secret_name, filename=None, uid=None,\n gid=None, mode=0o444):\n self['SecretName'] = secret_name\n self['SecretID'] = secret_id\n self['File'] = {\n 'Name': filename or secret_name,\n 'UID': uid or '0',\n 'GID': gid or '0',\n 'Mode': mode\n }\n", "path": "docker/types/services.py" } ]
diff --git a/docker/types/services.py b/docker/types/services.py index 9291c9bd4..e7787ec81 100644 --- a/docker/types/services.py +++ b/docker/types/services.py @@ -407,7 +407,7 @@ def __init__(self, mode, replicas=None): 'replicas can only be used for replicated mode' ) self[mode] = {} - if replicas: + if replicas is not None: self[mode]['Replicas'] = replicas @property diff --git a/tests/unit/dockertypes_test.py b/tests/unit/dockertypes_test.py index cb1d90ca2..160fabdd7 100644 --- a/tests/unit/dockertypes_test.py +++ b/tests/unit/dockertypes_test.py @@ -305,6 +305,12 @@ def test_replicated_replicas(self): assert mode.mode == 'replicated' assert mode.replicas == 21 + def test_replicated_replicas_0(self): + mode = ServiceMode('replicated', 0) + assert mode == {'replicated': {'Replicas': 0}} + assert mode.mode == 'replicated' + assert mode.replicas == 0 + def test_invalid_mode(self): with pytest.raises(InvalidArgument): ServiceMode('foobar')
cisagov__manage.get.gov-1452
DISCOVERY: Notification and change log for domain managers ### Issue description As a domain manager, I want an in-app log of all changes made to my domain So that I can ensure that it is correct, and track any changes that have been made, avoiding and correcting errors. ### Acceptance criteria TBD ### Additional context Notifications about changes to domain info: All users wanted to be notified of changes to their domain information–in particular, updates to name servers. Most users said they’d like an email notifications because they rarely visit the registrar. However, an in-app audit trail would be helpful, as well, for future reference or in case an email was missed. Need to do some discovery and design exploration around this. Souirce: [User feedback](https://docs.google.com/document/d/1M5foXX34qPc7R_J1uhBACHWUhg8WHwX3bB6nurvNNWE/edit#bookmark=id.pa0k2x54vkx1) ### Links to other issues _No response_
[ { "content": "from auditlog.registry import auditlog # type: ignore\nfrom .contact import Contact\nfrom .domain_application import DomainApplication\nfrom .domain_information import DomainInformation\nfrom .domain import Domain\nfrom .draft_domain import DraftDomain\nfrom .host_ip import HostIP\nfrom .host import Host\nfrom .domain_invitation import DomainInvitation\nfrom .nameserver import Nameserver\nfrom .user_domain_role import UserDomainRole\nfrom .public_contact import PublicContact\nfrom .user import User\nfrom .user_group import UserGroup\nfrom .website import Website\nfrom .transition_domain import TransitionDomain\n\n__all__ = [\n \"Contact\",\n \"DomainApplication\",\n \"DomainInformation\",\n \"Domain\",\n \"DraftDomain\",\n \"DomainInvitation\",\n \"HostIP\",\n \"Host\",\n \"Nameserver\",\n \"UserDomainRole\",\n \"PublicContact\",\n \"User\",\n \"UserGroup\",\n \"Website\",\n \"TransitionDomain\",\n]\n\nauditlog.register(Contact)\nauditlog.register(DomainApplication)\nauditlog.register(Domain)\nauditlog.register(DraftDomain)\nauditlog.register(DomainInvitation)\nauditlog.register(HostIP)\nauditlog.register(Host)\nauditlog.register(Nameserver)\nauditlog.register(UserDomainRole)\nauditlog.register(PublicContact)\nauditlog.register(User, m2m_fields=[\"user_permissions\", \"groups\"])\nauditlog.register(UserGroup, m2m_fields=[\"permissions\"])\nauditlog.register(Website)\nauditlog.register(TransitionDomain)\n", "path": "src/registrar/models/__init__.py" } ]
[ { "content": "from auditlog.registry import auditlog # type: ignore\nfrom .contact import Contact\nfrom .domain_application import DomainApplication\nfrom .domain_information import DomainInformation\nfrom .domain import Domain\nfrom .draft_domain import DraftDomain\nfrom .host_ip import HostIP\nfrom .host import Host\nfrom .domain_invitation import DomainInvitation\nfrom .nameserver import Nameserver\nfrom .user_domain_role import UserDomainRole\nfrom .public_contact import PublicContact\nfrom .user import User\nfrom .user_group import UserGroup\nfrom .website import Website\nfrom .transition_domain import TransitionDomain\n\n__all__ = [\n \"Contact\",\n \"DomainApplication\",\n \"DomainInformation\",\n \"Domain\",\n \"DraftDomain\",\n \"DomainInvitation\",\n \"HostIP\",\n \"Host\",\n \"Nameserver\",\n \"UserDomainRole\",\n \"PublicContact\",\n \"User\",\n \"UserGroup\",\n \"Website\",\n \"TransitionDomain\",\n]\n\nauditlog.register(Contact)\nauditlog.register(DomainApplication)\nauditlog.register(Domain)\nauditlog.register(DraftDomain)\nauditlog.register(DomainInvitation)\nauditlog.register(DomainInformation)\nauditlog.register(HostIP)\nauditlog.register(Host)\nauditlog.register(Nameserver)\nauditlog.register(UserDomainRole)\nauditlog.register(PublicContact)\nauditlog.register(User, m2m_fields=[\"user_permissions\", \"groups\"])\nauditlog.register(UserGroup, m2m_fields=[\"permissions\"])\nauditlog.register(Website)\nauditlog.register(TransitionDomain)\n", "path": "src/registrar/models/__init__.py" } ]
diff --git a/src/registrar/models/__init__.py b/src/registrar/models/__init__.py index 1d28c9e89..1203c7878 100644 --- a/src/registrar/models/__init__.py +++ b/src/registrar/models/__init__.py @@ -38,6 +38,7 @@ auditlog.register(Domain) auditlog.register(DraftDomain) auditlog.register(DomainInvitation) +auditlog.register(DomainInformation) auditlog.register(HostIP) auditlog.register(Host) auditlog.register(Nameserver)
graspologic-org__graspologic-207
GClust bug <img width="558" alt="Screen Shot 2019-06-22 at 3 46 06 PM" src="https://user-images.githubusercontent.com/25714207/59968259-eb346c80-9504-11e9-984c-8c13dff93a37.png"> should be `- self.min_components` rather than `- 1` This causes an indexing error when `min_components` does not equal 1
[ { "content": "# Copyright 2019 NeuroData (http://neurodata.io)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import adjusted_rand_score\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.model_selection import ParameterGrid\n\nfrom .base import BaseCluster\n\n\nclass GaussianCluster(BaseCluster):\n r\"\"\"\n Gaussian Mixture Model (GMM)\n\n Representation of a Gaussian mixture model probability distribution. \n This class allows to estimate the parameters of a Gaussian mixture \n distribution. It computes all possible models from one component to \n max_components. The best model is given by the lowest BIC score.\n\n Parameters\n ----------\n min_components : int, default=2. \n The minimum number of mixture components to consider (unless\n max_components=None, in which case this is the maximum number of\n components to consider). If max_componens is not None, min_components\n must be less than or equal to max_components.\n\n max_components : int or None, default=None.\n The maximum number of mixture components to consider. Must be greater \n than or equal to min_components.\n\n covariance_type : {'full' (default), 'tied', 'diag', 'spherical'}, optional\n String or list/array describing the type of covariance parameters to use.\n If a string, it must be one of:\n \n - 'full'\n each component has its own general covariance matrix\n - 'tied'\n all components share the same general covariance matrix\n - 'diag'\n each component has its own diagonal covariance matrix\n - 'spherical'\n each component has its own single variance\n - 'all'\n considers all covariance structures in ['spherical', 'diag', 'tied', 'full']\n If a list/array, it must be a list/array of strings containing only\n 'spherical', 'tied', 'diag', and/or 'spherical'.\n \n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by ``np.random``.\n\n Attributes\n ----------\n n_components_ : int\n Optimal number of components based on BIC.\n covariance_type_ : str\n Optimal covariance type based on BIC.\n model_ : GaussianMixture object\n Fitted GaussianMixture object fitted with optimal numeber of components \n and optimal covariance structure.\n bic_ : pandas.DataFrame\n A pandas DataFrame of BIC values computed for all possible number of clusters\n given by range(min_components, max_components + 1) and all covariance\n structures given by covariance_type.\n ari_ : pandas.DataFrame\n Only computed when y is given. Pandas Dataframe containing ARI values computed\n for all possible number of clusters given by range(min_components,\n max_components) and all covariance structures given by covariance_type.\n \"\"\"\n\n def __init__(\n self,\n min_components=2,\n max_components=None,\n covariance_type=\"full\",\n random_state=None,\n ):\n if isinstance(min_components, int):\n if min_components <= 0:\n msg = \"min_components must be >= 1.\"\n raise ValueError(msg)\n else:\n msg = \"min_components must be an integer, not {}.\".format(\n type(min_components)\n )\n raise TypeError(msg)\n\n if isinstance(max_components, int):\n if max_components <= 0:\n msg = \"max_components must be >= 1 or None.\"\n raise ValueError(msg)\n elif min_components > max_components:\n msg = \"min_components must be less than or equal to max_components.\"\n raise ValueError(msg)\n elif max_components is not None:\n msg = \"max_components must be an integer or None, not {}.\".format(\n type(max_components)\n )\n raise TypeError(msg)\n\n if isinstance(covariance_type, (np.ndarray, list)):\n covariance_type = np.unique(covariance_type)\n elif isinstance(covariance_type, str):\n if covariance_type == \"all\":\n covariance_type = [\"spherical\", \"diag\", \"tied\", \"full\"]\n else:\n covariance_type = [covariance_type]\n else:\n msg = \"covariance_type must be a numpy array, a list, or \"\n msg += \"string, not {}\".format(type(covariance_type))\n raise TypeError(msg)\n\n for cov in covariance_type:\n if cov not in [\"spherical\", \"diag\", \"tied\", \"full\"]:\n msg = (\n \"covariance structure must be one of \"\n + '[\"spherical\", \"diag\", \"tied\", \"full\"]'\n )\n msg += \" not {}\".format(cov)\n raise ValueError(msg)\n\n new_covariance_type = []\n for cov in [\"spherical\", \"diag\", \"tied\", \"full\"]:\n if cov in covariance_type:\n new_covariance_type.append(cov)\n\n self.min_components = min_components\n self.max_components = max_components\n self.covariance_type = new_covariance_type\n self.random_state = random_state\n\n def fit(self, X, y=None):\n \"\"\"\n Fits gaussian mixure model to the data. \n Estimate model parameters with the EM algorithm.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n \n y : array-like, shape (n_samples,), optional (default=None)\n List of labels for X if available. Used to compute\n ARI scores.\n\n Returns\n -------\n self\n \"\"\"\n\n # Deal with number of clusters\n if self.max_components is None:\n lower_ncomponents = 1\n upper_ncomponents = self.min_components\n else:\n lower_ncomponents = self.min_components\n upper_ncomponents = self.max_components\n\n n_mixture_components = upper_ncomponents - lower_ncomponents + 1\n\n if upper_ncomponents > X.shape[0]:\n if self.max_components is None:\n msg = \"if max_components is None then min_components must be >= \"\n msg += \"n_samples, but min_components = {}, n_samples = {}\".format(\n upper_ncomponents, X.shape[0]\n )\n else:\n msg = \"max_components must be >= n_samples, but max_components = \"\n msg += \"{}, n_samples = {}\".format(upper_ncomponents, X.shape[0])\n raise ValueError(msg)\n elif lower_ncomponents > X.shape[0]:\n msg = \"min_components must be <= n_samples, but min_components = \"\n msg += \"{}, n_samples = {}\".format(upper_ncomponents, X.shape[0])\n raise ValueError(msg)\n\n # Get parameters\n random_state = self.random_state\n\n param_grid = dict(\n covariance_type=self.covariance_type,\n n_components=range(lower_ncomponents, upper_ncomponents + 1),\n random_state=[random_state],\n )\n\n param_grid = list(ParameterGrid(param_grid))\n\n models = [[] for _ in range(n_mixture_components)]\n bics = [[] for _ in range(n_mixture_components)]\n aris = [[] for _ in range(n_mixture_components)]\n\n for i, params in enumerate(param_grid):\n model = GaussianMixture(**params)\n model.fit(X)\n models[i % n_mixture_components].append(model)\n bics[i % n_mixture_components].append(model.bic(X))\n if y is not None:\n predictions = model.predict(X)\n aris[i % n_mixture_components].append(\n adjusted_rand_score(y, predictions)\n )\n\n self.bic_ = pd.DataFrame(\n bics,\n index=np.arange(lower_ncomponents, upper_ncomponents + 1),\n columns=self.covariance_type,\n )\n\n if y is not None:\n self.ari_ = pd.DataFrame(\n aris,\n index=np.arange(lower_ncomponents, upper_ncomponents + 1),\n columns=self.covariance_type,\n )\n else:\n self.ari_ = None\n\n # Get the best cov type and its index within the dataframe\n best_covariance = self.bic_.min(axis=0).idxmin()\n best_covariance_idx = self.covariance_type.index(best_covariance)\n\n # Get the index best component for best_covariance\n best_component = self.bic_.idxmin()[best_covariance]\n\n self.n_components_ = best_component\n self.covariance_type_ = best_covariance\n self.model_ = models[best_component - 1][best_covariance_idx]\n\n return self\n", "path": "graspy/cluster/gclust.py" } ]
[ { "content": "# Copyright 2019 NeuroData (http://neurodata.io)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import adjusted_rand_score\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.model_selection import ParameterGrid\n\nfrom .base import BaseCluster\n\n\nclass GaussianCluster(BaseCluster):\n r\"\"\"\n Gaussian Mixture Model (GMM)\n\n Representation of a Gaussian mixture model probability distribution. \n This class allows to estimate the parameters of a Gaussian mixture \n distribution. It computes all possible models from one component to \n max_components. The best model is given by the lowest BIC score.\n\n Parameters\n ----------\n min_components : int, default=2. \n The minimum number of mixture components to consider (unless\n max_components=None, in which case this is the maximum number of\n components to consider). If max_componens is not None, min_components\n must be less than or equal to max_components.\n\n max_components : int or None, default=None.\n The maximum number of mixture components to consider. Must be greater \n than or equal to min_components.\n\n covariance_type : {'full' (default), 'tied', 'diag', 'spherical'}, optional\n String or list/array describing the type of covariance parameters to use.\n If a string, it must be one of:\n \n - 'full'\n each component has its own general covariance matrix\n - 'tied'\n all components share the same general covariance matrix\n - 'diag'\n each component has its own diagonal covariance matrix\n - 'spherical'\n each component has its own single variance\n - 'all'\n considers all covariance structures in ['spherical', 'diag', 'tied', 'full']\n If a list/array, it must be a list/array of strings containing only\n 'spherical', 'tied', 'diag', and/or 'spherical'.\n \n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by ``np.random``.\n\n Attributes\n ----------\n n_components_ : int\n Optimal number of components based on BIC.\n covariance_type_ : str\n Optimal covariance type based on BIC.\n model_ : GaussianMixture object\n Fitted GaussianMixture object fitted with optimal numeber of components \n and optimal covariance structure.\n bic_ : pandas.DataFrame\n A pandas DataFrame of BIC values computed for all possible number of clusters\n given by range(min_components, max_components + 1) and all covariance\n structures given by covariance_type.\n ari_ : pandas.DataFrame\n Only computed when y is given. Pandas Dataframe containing ARI values computed\n for all possible number of clusters given by range(min_components,\n max_components) and all covariance structures given by covariance_type.\n \"\"\"\n\n def __init__(\n self,\n min_components=2,\n max_components=None,\n covariance_type=\"full\",\n random_state=None,\n ):\n if isinstance(min_components, int):\n if min_components <= 0:\n msg = \"min_components must be >= 1.\"\n raise ValueError(msg)\n else:\n msg = \"min_components must be an integer, not {}.\".format(\n type(min_components)\n )\n raise TypeError(msg)\n\n if isinstance(max_components, int):\n if max_components <= 0:\n msg = \"max_components must be >= 1 or None.\"\n raise ValueError(msg)\n elif min_components > max_components:\n msg = \"min_components must be less than or equal to max_components.\"\n raise ValueError(msg)\n elif max_components is not None:\n msg = \"max_components must be an integer or None, not {}.\".format(\n type(max_components)\n )\n raise TypeError(msg)\n\n if isinstance(covariance_type, (np.ndarray, list)):\n covariance_type = np.unique(covariance_type)\n elif isinstance(covariance_type, str):\n if covariance_type == \"all\":\n covariance_type = [\"spherical\", \"diag\", \"tied\", \"full\"]\n else:\n covariance_type = [covariance_type]\n else:\n msg = \"covariance_type must be a numpy array, a list, or \"\n msg += \"string, not {}\".format(type(covariance_type))\n raise TypeError(msg)\n\n for cov in covariance_type:\n if cov not in [\"spherical\", \"diag\", \"tied\", \"full\"]:\n msg = (\n \"covariance structure must be one of \"\n + '[\"spherical\", \"diag\", \"tied\", \"full\"]'\n )\n msg += \" not {}\".format(cov)\n raise ValueError(msg)\n\n new_covariance_type = []\n for cov in [\"spherical\", \"diag\", \"tied\", \"full\"]:\n if cov in covariance_type:\n new_covariance_type.append(cov)\n\n self.min_components = min_components\n self.max_components = max_components\n self.covariance_type = new_covariance_type\n self.random_state = random_state\n\n def fit(self, X, y=None):\n \"\"\"\n Fits gaussian mixure model to the data. \n Estimate model parameters with the EM algorithm.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n \n y : array-like, shape (n_samples,), optional (default=None)\n List of labels for X if available. Used to compute\n ARI scores.\n\n Returns\n -------\n self\n \"\"\"\n\n # Deal with number of clusters\n if self.max_components is None:\n lower_ncomponents = 1\n upper_ncomponents = self.min_components\n else:\n lower_ncomponents = self.min_components\n upper_ncomponents = self.max_components\n\n n_mixture_components = upper_ncomponents - lower_ncomponents + 1\n\n if upper_ncomponents > X.shape[0]:\n if self.max_components is None:\n msg = \"if max_components is None then min_components must be >= \"\n msg += \"n_samples, but min_components = {}, n_samples = {}\".format(\n upper_ncomponents, X.shape[0]\n )\n else:\n msg = \"max_components must be >= n_samples, but max_components = \"\n msg += \"{}, n_samples = {}\".format(upper_ncomponents, X.shape[0])\n raise ValueError(msg)\n elif lower_ncomponents > X.shape[0]:\n msg = \"min_components must be <= n_samples, but min_components = \"\n msg += \"{}, n_samples = {}\".format(upper_ncomponents, X.shape[0])\n raise ValueError(msg)\n\n # Get parameters\n random_state = self.random_state\n\n param_grid = dict(\n covariance_type=self.covariance_type,\n n_components=range(lower_ncomponents, upper_ncomponents + 1),\n random_state=[random_state],\n )\n\n param_grid = list(ParameterGrid(param_grid))\n\n models = [[] for _ in range(n_mixture_components)]\n bics = [[] for _ in range(n_mixture_components)]\n aris = [[] for _ in range(n_mixture_components)]\n\n for i, params in enumerate(param_grid):\n model = GaussianMixture(**params)\n model.fit(X)\n models[i % n_mixture_components].append(model)\n bics[i % n_mixture_components].append(model.bic(X))\n if y is not None:\n predictions = model.predict(X)\n aris[i % n_mixture_components].append(\n adjusted_rand_score(y, predictions)\n )\n\n self.bic_ = pd.DataFrame(\n bics,\n index=np.arange(lower_ncomponents, upper_ncomponents + 1),\n columns=self.covariance_type,\n )\n\n if y is not None:\n self.ari_ = pd.DataFrame(\n aris,\n index=np.arange(lower_ncomponents, upper_ncomponents + 1),\n columns=self.covariance_type,\n )\n else:\n self.ari_ = None\n\n # Get the best cov type and its index within the dataframe\n best_covariance = self.bic_.min(axis=0).idxmin()\n best_covariance_idx = self.covariance_type.index(best_covariance)\n\n # Get the index best component for best_covariance\n best_component = self.bic_.idxmin()[best_covariance]\n\n self.n_components_ = best_component\n self.covariance_type_ = best_covariance\n self.model_ = models[best_component - self.min_components][best_covariance_idx]\n\n return self\n", "path": "graspy/cluster/gclust.py" } ]
diff --git a/graspy/cluster/gclust.py b/graspy/cluster/gclust.py index 29b6bc07f..78d950399 100644 --- a/graspy/cluster/gclust.py +++ b/graspy/cluster/gclust.py @@ -240,6 +240,6 @@ def fit(self, X, y=None): self.n_components_ = best_component self.covariance_type_ = best_covariance - self.model_ = models[best_component - 1][best_covariance_idx] + self.model_ = models[best_component - self.min_components][best_covariance_idx] return self diff --git a/tests/cluster/test_gclust.py b/tests/cluster/test_gclust.py index 646d6953e..f30756dc0 100644 --- a/tests/cluster/test_gclust.py +++ b/tests/cluster/test_gclust.py @@ -98,7 +98,7 @@ def test_no_y(): assert_equal(gclust.n_components_, 2) -def test_outputs(): +def test_two_class(): """ Easily separable two gaussian problem. """ @@ -127,7 +127,29 @@ def test_outputs(): assert_allclose(gclust.ari_.loc[n_components], 1) -def test_bic(): +def test_five_class(): + """ + Easily separable five gaussian problem. + """ + np.random.seed(10) + + n = 100 + mus = [[i * 5, 0] for i in range(5)] + cov = np.eye(2) # balls + + num_sims = 10 + + for _ in range(num_sims): + X = np.vstack([np.random.multivariate_normal(mu, cov, n) for mu in mus]) + + gclust = GaussianCluster( + min_components=3, max_components=10, covariance_type="all" + ) + gclust.fit(X) + assert_equal(gclust.n_components_, 5) + + +def test_ase_three_blocks(): """ Expect 3 clusters from a 3 block model """