in_source_id
stringlengths 13
58
| issue
stringlengths 3
241k
| before_files
listlengths 0
3
| after_files
listlengths 0
3
| pr_diff
stringlengths 109
107M
⌀ |
---|---|---|---|---|
pydantic__pydantic-2523 | `inherit_config` overwrites `json_encoders` from class creation arguments
### Checks
* [x] I added a descriptive title to this issue
* [x] I have searched (google, github) for similar issues and couldn't find anything
* [x] I have read and followed [the docs](https://pydantic-docs.helpmanual.io/) and still think this is a bug
<!-- Sorry to sound so draconian, but every second saved replying to issues is time spend improving pydantic :-) -->
# Bug
Output of `python -c "import pydantic.utils; print(pydantic.utils.version_info())"`:
```
pydantic version: 1.8.1
pydantic compiled: False
install path: [...]/lib/python3.9/site-packages/pydantic
python version: 3.9.2 (default, Feb 21 2021, 06:38:26) [Clang 7.1.0 (tags/RELEASE_710/final)]
platform: macOS-10.14.6-x86_64-i386-64bit
optional deps. installed: ['typing-extensions']
```
<!-- or if you're using pydantic prior to v1.3, manually include: OS, python version and pydantic version -->
<!-- Please read the [docs](https://pydantic-docs.helpmanual.io/) and search through issues to
confirm your bug hasn't already been reported. -->
<!-- Where possible please include a self-contained code snippet describing your bug: -->
```py
import pydantic
class Foo(pydantic.BaseModel):
class Config:
json_encoders = {int: str}
print('Foo json_encoders:', Foo.__config__.json_encoders)
class Bar(pydantic.BaseModel, json_encoders={int: str}):
pass
print('Bar json_encoders:', Bar.__config__.json_encoders)
```
Output:
```
Foo json_encoders: {<class 'int'>: <class 'str'>}
Bar json_encoders: {}
```
Culprit:
https://github.com/samuelcolvin/pydantic/blob/62bb2ad4921016df51abf3922c3fe51113b08939/pydantic/main.py#L184-L187
`inherit_config` overwrites `json_encoders` from class creation arguments
### Checks
* [x] I added a descriptive title to this issue
* [x] I have searched (google, github) for similar issues and couldn't find anything
* [x] I have read and followed [the docs](https://pydantic-docs.helpmanual.io/) and still think this is a bug
<!-- Sorry to sound so draconian, but every second saved replying to issues is time spend improving pydantic :-) -->
# Bug
Output of `python -c "import pydantic.utils; print(pydantic.utils.version_info())"`:
```
pydantic version: 1.8.1
pydantic compiled: False
install path: [...]/lib/python3.9/site-packages/pydantic
python version: 3.9.2 (default, Feb 21 2021, 06:38:26) [Clang 7.1.0 (tags/RELEASE_710/final)]
platform: macOS-10.14.6-x86_64-i386-64bit
optional deps. installed: ['typing-extensions']
```
<!-- or if you're using pydantic prior to v1.3, manually include: OS, python version and pydantic version -->
<!-- Please read the [docs](https://pydantic-docs.helpmanual.io/) and search through issues to
confirm your bug hasn't already been reported. -->
<!-- Where possible please include a self-contained code snippet describing your bug: -->
```py
import pydantic
class Foo(pydantic.BaseModel):
class Config:
json_encoders = {int: str}
print('Foo json_encoders:', Foo.__config__.json_encoders)
class Bar(pydantic.BaseModel, json_encoders={int: str}):
pass
print('Bar json_encoders:', Bar.__config__.json_encoders)
```
Output:
```
Foo json_encoders: {<class 'int'>: <class 'str'>}
Bar json_encoders: {}
```
Culprit:
https://github.com/samuelcolvin/pydantic/blob/62bb2ad4921016df51abf3922c3fe51113b08939/pydantic/main.py#L184-L187
| [
{
"content": "import json\nimport sys\nimport warnings\nfrom abc import ABCMeta\nfrom copy import deepcopy\nfrom enum import Enum\nfrom functools import partial\nfrom pathlib import Path\nfrom types import FunctionType\nfrom typing import (\n TYPE_CHECKING,\n AbstractSet,\n Any,\n Callable,\n Dict,\n List,\n Mapping,\n Optional,\n Tuple,\n Type,\n TypeVar,\n Union,\n cast,\n no_type_check,\n overload,\n)\n\nfrom .class_validators import ValidatorGroup, extract_root_validators, extract_validators, inherit_validators\nfrom .error_wrappers import ErrorWrapper, ValidationError\nfrom .errors import ConfigError, DictError, ExtraError, MissingError\nfrom .fields import MAPPING_LIKE_SHAPES, ModelField, ModelPrivateAttr, PrivateAttr, Undefined\nfrom .json import custom_pydantic_encoder, pydantic_encoder\nfrom .parse import Protocol, load_file, load_str_bytes\nfrom .schema import default_ref_template, model_schema\nfrom .types import PyObject, StrBytes\nfrom .typing import (\n AnyCallable,\n get_args,\n get_origin,\n is_classvar,\n is_namedtuple,\n resolve_annotations,\n update_field_forward_refs,\n)\nfrom .utils import (\n ROOT_KEY,\n ClassAttribute,\n GetterDict,\n Representation,\n ValueItems,\n generate_model_signature,\n is_valid_field,\n is_valid_private_name,\n lenient_issubclass,\n sequence_like,\n smart_deepcopy,\n unique_list,\n validate_field_name,\n)\n\nif TYPE_CHECKING:\n from inspect import Signature\n\n import typing_extensions\n\n from .class_validators import ValidatorListDict\n from .types import ModelOrDc\n from .typing import ( # noqa: F401\n AbstractSetIntStr,\n CallableGenerator,\n DictAny,\n DictStrAny,\n MappingIntStrAny,\n ReprArgs,\n SetStr,\n TupleGenerator,\n )\n\n ConfigType = Type['BaseConfig']\n Model = TypeVar('Model', bound='BaseModel')\n\n class SchemaExtraCallable(typing_extensions.Protocol):\n @overload\n def __call__(self, schema: Dict[str, Any]) -> None:\n pass\n\n @overload # noqa: F811\n def __call__(self, schema: Dict[str, Any], model_class: Type['Model']) -> None: # noqa: F811\n pass\n\n\nelse:\n SchemaExtraCallable = Callable[..., None]\n\ntry:\n import cython # type: ignore\nexcept ImportError:\n compiled: bool = False\nelse: # pragma: no cover\n try:\n compiled = cython.compiled\n except AttributeError:\n compiled = False\n\n__all__ = 'BaseConfig', 'BaseModel', 'Extra', 'compiled', 'create_model', 'validate_model'\n\n\nclass Extra(str, Enum):\n allow = 'allow'\n ignore = 'ignore'\n forbid = 'forbid'\n\n\nclass BaseConfig:\n title = None\n anystr_lower = False\n anystr_strip_whitespace = False\n min_anystr_length = None\n max_anystr_length = None\n validate_all = False\n extra = Extra.ignore\n allow_mutation = True\n frozen = False\n allow_population_by_field_name = False\n use_enum_values = False\n fields: Dict[str, Union[str, Dict[str, str]]] = {}\n validate_assignment = False\n error_msg_templates: Dict[str, str] = {}\n arbitrary_types_allowed = False\n orm_mode: bool = False\n getter_dict: Type[GetterDict] = GetterDict\n alias_generator: Optional[Callable[[str], str]] = None\n keep_untouched: Tuple[type, ...] = ()\n schema_extra: Union[Dict[str, Any], 'SchemaExtraCallable'] = {}\n json_loads: Callable[[str], Any] = json.loads\n json_dumps: Callable[..., str] = json.dumps\n json_encoders: Dict[Type[Any], AnyCallable] = {}\n underscore_attrs_are_private: bool = False\n\n # Whether or not inherited models as fields should be reconstructed as base model\n copy_on_model_validation: bool = True\n\n @classmethod\n def get_field_info(cls, name: str) -> Dict[str, Any]:\n \"\"\"\n Get properties of FieldInfo from the `fields` property of the config class.\n \"\"\"\n\n fields_value = cls.fields.get(name)\n\n if isinstance(fields_value, str):\n field_info: Dict[str, Any] = {'alias': fields_value}\n elif isinstance(fields_value, dict):\n field_info = fields_value\n else:\n field_info = {}\n\n if 'alias' in field_info:\n field_info.setdefault('alias_priority', 2)\n\n if field_info.get('alias_priority', 0) <= 1 and cls.alias_generator:\n alias = cls.alias_generator(name)\n if not isinstance(alias, str):\n raise TypeError(f'Config.alias_generator must return str, not {alias.__class__}')\n field_info.update(alias=alias, alias_priority=1)\n return field_info\n\n @classmethod\n def prepare_field(cls, field: 'ModelField') -> None:\n \"\"\"\n Optional hook to check or modify fields during model creation.\n \"\"\"\n pass\n\n\ndef inherit_config(self_config: 'ConfigType', parent_config: 'ConfigType', **namespace: Any) -> 'ConfigType':\n if not self_config:\n base_classes: Tuple['ConfigType', ...] = (parent_config,)\n elif self_config == parent_config:\n base_classes = (self_config,)\n else:\n base_classes = self_config, parent_config\n\n namespace['json_encoders'] = {\n **getattr(parent_config, 'json_encoders', {}),\n **getattr(self_config, 'json_encoders', {}),\n }\n\n return type('Config', base_classes, namespace)\n\n\nEXTRA_LINK = 'https://pydantic-docs.helpmanual.io/usage/model_config/'\n\n\ndef prepare_config(config: Type[BaseConfig], cls_name: str) -> None:\n if not isinstance(config.extra, Extra):\n try:\n config.extra = Extra(config.extra)\n except ValueError:\n raise ValueError(f'\"{cls_name}\": {config.extra} is not a valid value for \"extra\"')\n\n\ndef validate_custom_root_type(fields: Dict[str, ModelField]) -> None:\n if len(fields) > 1:\n raise ValueError(f'{ROOT_KEY} cannot be mixed with other fields')\n\n\ndef generate_hash_function(frozen: bool) -> Optional[Callable[[Any], int]]:\n def hash_function(self_: Any) -> int:\n return hash(self_.__class__) + hash(tuple(self_.__dict__.values()))\n\n return hash_function if frozen else None\n\n\n# If a field is of type `Callable`, its default value should be a function and cannot to ignored.\nANNOTATED_FIELD_UNTOUCHED_TYPES: Tuple[Any, ...] = (property, type, classmethod, staticmethod)\n# When creating a `BaseModel` instance, we bypass all the methods, properties... added to the model\nUNTOUCHED_TYPES: Tuple[Any, ...] = (FunctionType,) + ANNOTATED_FIELD_UNTOUCHED_TYPES\n# Note `ModelMetaclass` refers to `BaseModel`, but is also used to *create* `BaseModel`, so we need to add this extra\n# (somewhat hacky) boolean to keep track of whether we've created the `BaseModel` class yet, and therefore whether it's\n# safe to refer to it. If it *hasn't* been created, we assume that the `__new__` call we're in the middle of is for\n# the `BaseModel` class, since that's defined immediately after the metaclass.\n_is_base_model_class_defined = False\n\n\nclass ModelMetaclass(ABCMeta):\n @no_type_check # noqa C901\n def __new__(mcs, name, bases, namespace, **kwargs): # noqa C901\n fields: Dict[str, ModelField] = {}\n config = BaseConfig\n validators: 'ValidatorListDict' = {}\n\n pre_root_validators, post_root_validators = [], []\n private_attributes: Dict[str, ModelPrivateAttr] = {}\n slots: SetStr = namespace.get('__slots__', ())\n slots = {slots} if isinstance(slots, str) else set(slots)\n class_vars: SetStr = set()\n hash_func: Optional[Callable[[Any], int]] = None\n\n for base in reversed(bases):\n if _is_base_model_class_defined and issubclass(base, BaseModel) and base != BaseModel:\n fields.update(smart_deepcopy(base.__fields__))\n config = inherit_config(base.__config__, config)\n validators = inherit_validators(base.__validators__, validators)\n pre_root_validators += base.__pre_root_validators__\n post_root_validators += base.__post_root_validators__\n private_attributes.update(base.__private_attributes__)\n class_vars.update(base.__class_vars__)\n hash_func = base.__hash__\n\n allowed_config_kwargs: SetStr = {\n key\n for key in dir(config)\n if not (key.startswith('__') and key.endswith('__')) # skip dunder methods and attributes\n }\n config_kwargs = {key: kwargs.pop(key) for key in kwargs.keys() & allowed_config_kwargs}\n config_from_namespace = namespace.get('Config')\n if config_kwargs and config_from_namespace:\n raise TypeError('Specifying config in two places is ambiguous, use either Config attribute or class kwargs')\n config = inherit_config(config_from_namespace, config, **config_kwargs)\n\n validators = inherit_validators(extract_validators(namespace), validators)\n vg = ValidatorGroup(validators)\n\n for f in fields.values():\n f.set_config(config)\n extra_validators = vg.get_validators(f.name)\n if extra_validators:\n f.class_validators.update(extra_validators)\n # re-run prepare to add extra validators\n f.populate_validators()\n\n prepare_config(config, name)\n\n untouched_types = ANNOTATED_FIELD_UNTOUCHED_TYPES\n\n def is_untouched(v: Any) -> bool:\n return isinstance(v, untouched_types) or v.__class__.__name__ == 'cython_function_or_method'\n\n if (namespace.get('__module__'), namespace.get('__qualname__')) != ('pydantic.main', 'BaseModel'):\n annotations = resolve_annotations(namespace.get('__annotations__', {}), namespace.get('__module__', None))\n # annotation only fields need to come first in fields\n for ann_name, ann_type in annotations.items():\n if is_classvar(ann_type):\n class_vars.add(ann_name)\n elif is_valid_field(ann_name):\n validate_field_name(bases, ann_name)\n value = namespace.get(ann_name, Undefined)\n allowed_types = get_args(ann_type) if get_origin(ann_type) is Union else (ann_type,)\n if (\n is_untouched(value)\n and ann_type != PyObject\n and not any(\n lenient_issubclass(get_origin(allowed_type), Type) for allowed_type in allowed_types\n )\n ):\n continue\n fields[ann_name] = ModelField.infer(\n name=ann_name,\n value=value,\n annotation=ann_type,\n class_validators=vg.get_validators(ann_name),\n config=config,\n )\n elif ann_name not in namespace and config.underscore_attrs_are_private:\n private_attributes[ann_name] = PrivateAttr()\n\n untouched_types = UNTOUCHED_TYPES + config.keep_untouched\n for var_name, value in namespace.items():\n can_be_changed = var_name not in class_vars and not is_untouched(value)\n if isinstance(value, ModelPrivateAttr):\n if not is_valid_private_name(var_name):\n raise NameError(\n f'Private attributes \"{var_name}\" must not be a valid field name; '\n f'Use sunder or dunder names, e. g. \"_{var_name}\" or \"__{var_name}__\"'\n )\n private_attributes[var_name] = value\n elif config.underscore_attrs_are_private and is_valid_private_name(var_name) and can_be_changed:\n private_attributes[var_name] = PrivateAttr(default=value)\n elif is_valid_field(var_name) and var_name not in annotations and can_be_changed:\n validate_field_name(bases, var_name)\n inferred = ModelField.infer(\n name=var_name,\n value=value,\n annotation=annotations.get(var_name, Undefined),\n class_validators=vg.get_validators(var_name),\n config=config,\n )\n if var_name in fields and inferred.type_ != fields[var_name].type_:\n raise TypeError(\n f'The type of {name}.{var_name} differs from the new default value; '\n f'if you wish to change the type of this field, please use a type annotation'\n )\n fields[var_name] = inferred\n\n _custom_root_type = ROOT_KEY in fields\n if _custom_root_type:\n validate_custom_root_type(fields)\n vg.check_for_unused()\n if config.json_encoders:\n json_encoder = partial(custom_pydantic_encoder, config.json_encoders)\n else:\n json_encoder = pydantic_encoder\n pre_rv_new, post_rv_new = extract_root_validators(namespace)\n\n if hash_func is None:\n hash_func = generate_hash_function(config.frozen)\n\n exclude_from_namespace = fields | private_attributes.keys() | {'__slots__'}\n new_namespace = {\n '__config__': config,\n '__fields__': fields,\n '__exclude_fields__': {\n name: field.field_info.exclude for name, field in fields.items() if field.field_info.exclude is not None\n }\n or None,\n '__include_fields__': {\n name: field.field_info.include for name, field in fields.items() if field.field_info.include is not None\n }\n or None,\n '__validators__': vg.validators,\n '__pre_root_validators__': unique_list(pre_root_validators + pre_rv_new),\n '__post_root_validators__': unique_list(post_root_validators + post_rv_new),\n '__schema_cache__': {},\n '__json_encoder__': staticmethod(json_encoder),\n '__custom_root_type__': _custom_root_type,\n '__private_attributes__': private_attributes,\n '__slots__': slots | private_attributes.keys(),\n '__hash__': hash_func,\n '__class_vars__': class_vars,\n **{n: v for n, v in namespace.items() if n not in exclude_from_namespace},\n }\n\n cls = super().__new__(mcs, name, bases, new_namespace, **kwargs)\n # set __signature__ attr only for model class, but not for its instances\n cls.__signature__ = ClassAttribute('__signature__', generate_model_signature(cls.__init__, fields, config))\n return cls\n\n\nobject_setattr = object.__setattr__\n\n\nclass BaseModel(Representation, metaclass=ModelMetaclass):\n if TYPE_CHECKING:\n # populated by the metaclass, defined here to help IDEs only\n __fields__: Dict[str, ModelField] = {}\n __include_fields__: Optional[Mapping[str, Any]] = None\n __exclude_fields__: Optional[Mapping[str, Any]] = None\n __validators__: Dict[str, AnyCallable] = {}\n __pre_root_validators__: List[AnyCallable]\n __post_root_validators__: List[Tuple[bool, AnyCallable]]\n __config__: Type[BaseConfig] = BaseConfig\n __root__: Any = None\n __json_encoder__: Callable[[Any], Any] = lambda x: x\n __schema_cache__: 'DictAny' = {}\n __custom_root_type__: bool = False\n __signature__: 'Signature'\n __private_attributes__: Dict[str, Any]\n __class_vars__: SetStr\n __fields_set__: SetStr = set()\n\n Config = BaseConfig\n __slots__ = ('__dict__', '__fields_set__')\n __doc__ = '' # Null out the Representation docstring\n\n def __init__(__pydantic_self__, **data: Any) -> None:\n \"\"\"\n Create a new model by parsing and validating input data from keyword arguments.\n\n Raises ValidationError if the input data cannot be parsed to form a valid model.\n \"\"\"\n # Uses something other than `self` the first arg to allow \"self\" as a settable attribute\n values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data)\n if validation_error:\n raise validation_error\n try:\n object_setattr(__pydantic_self__, '__dict__', values)\n except TypeError as e:\n raise TypeError(\n 'Model values must be a dict; you may not have returned a dictionary from a root validator'\n ) from e\n object_setattr(__pydantic_self__, '__fields_set__', fields_set)\n __pydantic_self__._init_private_attributes()\n\n @no_type_check\n def __setattr__(self, name, value): # noqa: C901 (ignore complexity)\n if name in self.__private_attributes__:\n return object_setattr(self, name, value)\n\n if self.__config__.extra is not Extra.allow and name not in self.__fields__:\n raise ValueError(f'\"{self.__class__.__name__}\" object has no field \"{name}\"')\n elif not self.__config__.allow_mutation or self.__config__.frozen:\n raise TypeError(f'\"{self.__class__.__name__}\" is immutable and does not support item assignment')\n elif self.__config__.validate_assignment:\n new_values = {**self.__dict__, name: value}\n\n for validator in self.__pre_root_validators__:\n try:\n new_values = validator(self.__class__, new_values)\n except (ValueError, TypeError, AssertionError) as exc:\n raise ValidationError([ErrorWrapper(exc, loc=ROOT_KEY)], self.__class__)\n\n known_field = self.__fields__.get(name, None)\n if known_field:\n # We want to\n # - make sure validators are called without the current value for this field inside `values`\n # - keep other values (e.g. submodels) untouched (using `BaseModel.dict()` will change them into dicts)\n # - keep the order of the fields\n if not known_field.field_info.allow_mutation:\n raise TypeError(f'\"{known_field.name}\" has allow_mutation set to False and cannot be assigned')\n dict_without_original_value = {k: v for k, v in self.__dict__.items() if k != name}\n value, error_ = known_field.validate(value, dict_without_original_value, loc=name, cls=self.__class__)\n if error_:\n raise ValidationError([error_], self.__class__)\n else:\n new_values[name] = value\n\n errors = []\n for skip_on_failure, validator in self.__post_root_validators__:\n if skip_on_failure and errors:\n continue\n try:\n new_values = validator(self.__class__, new_values)\n except (ValueError, TypeError, AssertionError) as exc:\n errors.append(ErrorWrapper(exc, loc=ROOT_KEY))\n if errors:\n raise ValidationError(errors, self.__class__)\n\n # update the whole __dict__ as other values than just `value`\n # may be changed (e.g. with `root_validator`)\n object_setattr(self, '__dict__', new_values)\n else:\n self.__dict__[name] = value\n\n self.__fields_set__.add(name)\n\n def __getstate__(self) -> 'DictAny':\n private_attrs = ((k, getattr(self, k, Undefined)) for k in self.__private_attributes__)\n return {\n '__dict__': self.__dict__,\n '__fields_set__': self.__fields_set__,\n '__private_attribute_values__': {k: v for k, v in private_attrs if v is not Undefined},\n }\n\n def __setstate__(self, state: 'DictAny') -> None:\n object_setattr(self, '__dict__', state['__dict__'])\n object_setattr(self, '__fields_set__', state['__fields_set__'])\n for name, value in state.get('__private_attribute_values__', {}).items():\n object_setattr(self, name, value)\n\n def _init_private_attributes(self) -> None:\n for name, private_attr in self.__private_attributes__.items():\n default = private_attr.get_default()\n if default is not Undefined:\n object_setattr(self, name, default)\n\n def dict(\n self,\n *,\n include: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None,\n exclude: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None,\n by_alias: bool = False,\n skip_defaults: bool = None,\n exclude_unset: bool = False,\n exclude_defaults: bool = False,\n exclude_none: bool = False,\n ) -> 'DictStrAny':\n \"\"\"\n Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.\n\n \"\"\"\n if skip_defaults is not None:\n warnings.warn(\n f'{self.__class__.__name__}.dict(): \"skip_defaults\" is deprecated and replaced by \"exclude_unset\"',\n DeprecationWarning,\n )\n exclude_unset = skip_defaults\n\n return dict(\n self._iter(\n to_dict=True,\n by_alias=by_alias,\n include=include,\n exclude=exclude,\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n exclude_none=exclude_none,\n )\n )\n\n def json(\n self,\n *,\n include: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None,\n exclude: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None,\n by_alias: bool = False,\n skip_defaults: bool = None,\n exclude_unset: bool = False,\n exclude_defaults: bool = False,\n exclude_none: bool = False,\n encoder: Optional[Callable[[Any], Any]] = None,\n **dumps_kwargs: Any,\n ) -> str:\n \"\"\"\n Generate a JSON representation of the model, `include` and `exclude` arguments as per `dict()`.\n\n `encoder` is an optional function to supply as `default` to json.dumps(), other arguments as per `json.dumps()`.\n \"\"\"\n if skip_defaults is not None:\n warnings.warn(\n f'{self.__class__.__name__}.json(): \"skip_defaults\" is deprecated and replaced by \"exclude_unset\"',\n DeprecationWarning,\n )\n exclude_unset = skip_defaults\n encoder = cast(Callable[[Any], Any], encoder or self.__json_encoder__)\n data = self.dict(\n include=include,\n exclude=exclude,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n exclude_none=exclude_none,\n )\n if self.__custom_root_type__:\n data = data[ROOT_KEY]\n return self.__config__.json_dumps(data, default=encoder, **dumps_kwargs)\n\n @classmethod\n def _enforce_dict_if_root(cls, obj: Any) -> Any:\n if cls.__custom_root_type__ and (\n not (isinstance(obj, dict) and obj.keys() == {ROOT_KEY})\n or cls.__fields__[ROOT_KEY].shape in MAPPING_LIKE_SHAPES\n ):\n return {ROOT_KEY: obj}\n else:\n return obj\n\n @classmethod\n def parse_obj(cls: Type['Model'], obj: Any) -> 'Model':\n obj = cls._enforce_dict_if_root(obj)\n if not isinstance(obj, dict):\n try:\n obj = dict(obj)\n except (TypeError, ValueError) as e:\n exc = TypeError(f'{cls.__name__} expected dict not {obj.__class__.__name__}')\n raise ValidationError([ErrorWrapper(exc, loc=ROOT_KEY)], cls) from e\n return cls(**obj)\n\n @classmethod\n def parse_raw(\n cls: Type['Model'],\n b: StrBytes,\n *,\n content_type: str = None,\n encoding: str = 'utf8',\n proto: Protocol = None,\n allow_pickle: bool = False,\n ) -> 'Model':\n try:\n obj = load_str_bytes(\n b,\n proto=proto,\n content_type=content_type,\n encoding=encoding,\n allow_pickle=allow_pickle,\n json_loads=cls.__config__.json_loads,\n )\n except (ValueError, TypeError, UnicodeDecodeError) as e:\n raise ValidationError([ErrorWrapper(e, loc=ROOT_KEY)], cls)\n return cls.parse_obj(obj)\n\n @classmethod\n def parse_file(\n cls: Type['Model'],\n path: Union[str, Path],\n *,\n content_type: str = None,\n encoding: str = 'utf8',\n proto: Protocol = None,\n allow_pickle: bool = False,\n ) -> 'Model':\n obj = load_file(\n path,\n proto=proto,\n content_type=content_type,\n encoding=encoding,\n allow_pickle=allow_pickle,\n json_loads=cls.__config__.json_loads,\n )\n return cls.parse_obj(obj)\n\n @classmethod\n def from_orm(cls: Type['Model'], obj: Any) -> 'Model':\n if not cls.__config__.orm_mode:\n raise ConfigError('You must have the config attribute orm_mode=True to use from_orm')\n obj = {ROOT_KEY: obj} if cls.__custom_root_type__ else cls._decompose_class(obj)\n m = cls.__new__(cls)\n values, fields_set, validation_error = validate_model(cls, obj)\n if validation_error:\n raise validation_error\n object_setattr(m, '__dict__', values)\n object_setattr(m, '__fields_set__', fields_set)\n m._init_private_attributes()\n return m\n\n @classmethod\n def construct(cls: Type['Model'], _fields_set: Optional['SetStr'] = None, **values: Any) -> 'Model':\n \"\"\"\n Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.\n Default values are respected, but no other validation is performed.\n Behaves as if `Config.extra = 'allow'` was set since it adds all passed values\n \"\"\"\n m = cls.__new__(cls)\n fields_values: Dict[str, Any] = {}\n for name, field in cls.__fields__.items():\n if name in values:\n fields_values[name] = values[name]\n elif not field.required:\n fields_values[name] = field.get_default()\n fields_values.update(values)\n object_setattr(m, '__dict__', fields_values)\n if _fields_set is None:\n _fields_set = set(values.keys())\n object_setattr(m, '__fields_set__', _fields_set)\n m._init_private_attributes()\n return m\n\n def copy(\n self: 'Model',\n *,\n include: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None,\n exclude: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None,\n update: 'DictStrAny' = None,\n deep: bool = False,\n ) -> 'Model':\n \"\"\"\n Duplicate a model, optionally choose which fields to include, exclude and change.\n\n :param include: fields to include in new model\n :param exclude: fields to exclude from new model, as with values this takes precedence over include\n :param update: values to change/add in the new model. Note: the data is not validated before creating\n the new model: you should trust this data\n :param deep: set to `True` to make a deep copy of the model\n :return: new model instance\n \"\"\"\n\n v = dict(\n self._iter(to_dict=False, by_alias=False, include=include, exclude=exclude, exclude_unset=False),\n **(update or {}),\n )\n\n if deep:\n # chances of having empty dict here are quite low for using smart_deepcopy\n v = deepcopy(v)\n\n cls = self.__class__\n m = cls.__new__(cls)\n object_setattr(m, '__dict__', v)\n # new `__fields_set__` can have unset optional fields with a set value in `update` kwarg\n if update:\n fields_set = self.__fields_set__ | update.keys()\n else:\n fields_set = set(self.__fields_set__)\n object_setattr(m, '__fields_set__', fields_set)\n for name in self.__private_attributes__:\n value = getattr(self, name, Undefined)\n if value is not Undefined:\n if deep:\n value = deepcopy(value)\n object_setattr(m, name, value)\n\n return m\n\n @classmethod\n def schema(cls, by_alias: bool = True, ref_template: str = default_ref_template) -> 'DictStrAny':\n cached = cls.__schema_cache__.get((by_alias, ref_template))\n if cached is not None:\n return cached\n s = model_schema(cls, by_alias=by_alias, ref_template=ref_template)\n cls.__schema_cache__[(by_alias, ref_template)] = s\n return s\n\n @classmethod\n def schema_json(\n cls, *, by_alias: bool = True, ref_template: str = default_ref_template, **dumps_kwargs: Any\n ) -> str:\n from .json import pydantic_encoder\n\n return cls.__config__.json_dumps(\n cls.schema(by_alias=by_alias, ref_template=ref_template), default=pydantic_encoder, **dumps_kwargs\n )\n\n @classmethod\n def __get_validators__(cls) -> 'CallableGenerator':\n yield cls.validate\n\n @classmethod\n def validate(cls: Type['Model'], value: Any) -> 'Model':\n if isinstance(value, cls):\n return value.copy() if cls.__config__.copy_on_model_validation else value\n\n value = cls._enforce_dict_if_root(value)\n if isinstance(value, dict):\n return cls(**value)\n elif cls.__config__.orm_mode:\n return cls.from_orm(value)\n else:\n try:\n value_as_dict = dict(value)\n except (TypeError, ValueError) as e:\n raise DictError() from e\n return cls(**value_as_dict)\n\n @classmethod\n def _decompose_class(cls: Type['Model'], obj: Any) -> GetterDict:\n return cls.__config__.getter_dict(obj)\n\n @classmethod\n @no_type_check\n def _get_value(\n cls,\n v: Any,\n to_dict: bool,\n by_alias: bool,\n include: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']],\n exclude: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']],\n exclude_unset: bool,\n exclude_defaults: bool,\n exclude_none: bool,\n ) -> Any:\n\n if isinstance(v, BaseModel):\n if to_dict:\n v_dict = v.dict(\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n include=include,\n exclude=exclude,\n exclude_none=exclude_none,\n )\n if ROOT_KEY in v_dict:\n return v_dict[ROOT_KEY]\n return v_dict\n else:\n return v.copy(include=include, exclude=exclude)\n\n value_exclude = ValueItems(v, exclude) if exclude else None\n value_include = ValueItems(v, include) if include else None\n\n if isinstance(v, dict):\n return {\n k_: cls._get_value(\n v_,\n to_dict=to_dict,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n include=value_include and value_include.for_element(k_),\n exclude=value_exclude and value_exclude.for_element(k_),\n exclude_none=exclude_none,\n )\n for k_, v_ in v.items()\n if (not value_exclude or not value_exclude.is_excluded(k_))\n and (not value_include or value_include.is_included(k_))\n }\n\n elif sequence_like(v):\n seq_args = (\n cls._get_value(\n v_,\n to_dict=to_dict,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n include=value_include and value_include.for_element(i),\n exclude=value_exclude and value_exclude.for_element(i),\n exclude_none=exclude_none,\n )\n for i, v_ in enumerate(v)\n if (not value_exclude or not value_exclude.is_excluded(i))\n and (not value_include or value_include.is_included(i))\n )\n\n return v.__class__(*seq_args) if is_namedtuple(v.__class__) else v.__class__(seq_args)\n\n elif isinstance(v, Enum) and getattr(cls.Config, 'use_enum_values', False):\n return v.value\n\n else:\n return v\n\n @classmethod\n def update_forward_refs(cls, **localns: Any) -> None:\n \"\"\"\n Try to update ForwardRefs on fields based on this Model, globalns and localns.\n \"\"\"\n globalns = sys.modules[cls.__module__].__dict__.copy()\n globalns.setdefault(cls.__name__, cls)\n for f in cls.__fields__.values():\n update_field_forward_refs(f, globalns=globalns, localns=localns)\n\n def __iter__(self) -> 'TupleGenerator':\n \"\"\"\n so `dict(model)` works\n \"\"\"\n yield from self.__dict__.items()\n\n def _iter(\n self,\n to_dict: bool = False,\n by_alias: bool = False,\n include: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None,\n exclude: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None,\n exclude_unset: bool = False,\n exclude_defaults: bool = False,\n exclude_none: bool = False,\n ) -> 'TupleGenerator':\n\n # Merge field set excludes with explicit exclude parameter with explicit overriding field set options.\n # The extra \"is not None\" guards are not logically necessary but optimizes performance for the simple case.\n if exclude is not None or self.__exclude_fields__ is not None:\n exclude = ValueItems.merge(self.__exclude_fields__, exclude)\n\n if include is not None or self.__include_fields__ is not None:\n include = ValueItems.merge(self.__include_fields__, include, intersect=True)\n\n allowed_keys = self._calculate_keys(\n include=include, exclude=exclude, exclude_unset=exclude_unset # type: ignore\n )\n if allowed_keys is None and not (to_dict or by_alias or exclude_unset or exclude_defaults or exclude_none):\n # huge boost for plain _iter()\n yield from self.__dict__.items()\n return\n\n value_exclude = ValueItems(self, exclude) if exclude is not None else None\n value_include = ValueItems(self, include) if include is not None else None\n\n for field_key, v in self.__dict__.items():\n if (allowed_keys is not None and field_key not in allowed_keys) or (exclude_none and v is None):\n continue\n\n if exclude_defaults:\n model_field = self.__fields__.get(field_key)\n if not getattr(model_field, 'required', True) and getattr(model_field, 'default', _missing) == v:\n continue\n\n if by_alias and field_key in self.__fields__:\n dict_key = self.__fields__[field_key].alias\n else:\n dict_key = field_key\n\n if to_dict or value_include or value_exclude:\n v = self._get_value(\n v,\n to_dict=to_dict,\n by_alias=by_alias,\n include=value_include and value_include.for_element(field_key),\n exclude=value_exclude and value_exclude.for_element(field_key),\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n exclude_none=exclude_none,\n )\n yield dict_key, v\n\n def _calculate_keys(\n self,\n include: Optional['MappingIntStrAny'],\n exclude: Optional['MappingIntStrAny'],\n exclude_unset: bool,\n update: Optional['DictStrAny'] = None,\n ) -> Optional[AbstractSet[str]]:\n if include is None and exclude is None and exclude_unset is False:\n return None\n\n keys: AbstractSet[str]\n if exclude_unset:\n keys = self.__fields_set__.copy()\n else:\n keys = self.__dict__.keys()\n\n if include is not None:\n keys &= include.keys()\n\n if update:\n keys -= update.keys()\n\n if exclude:\n keys -= {k for k, v in exclude.items() if ValueItems.is_true(v)}\n\n return keys\n\n def __eq__(self, other: Any) -> bool:\n if isinstance(other, BaseModel):\n return self.dict() == other.dict()\n else:\n return self.dict() == other\n\n def __repr_args__(self) -> 'ReprArgs':\n return self.__dict__.items() # type: ignore\n\n\n_is_base_model_class_defined = True\n\n\ndef create_model(\n __model_name: str,\n *,\n __config__: Type[BaseConfig] = None,\n __base__: Type['Model'] = None,\n __module__: str = __name__,\n __validators__: Dict[str, classmethod] = None,\n **field_definitions: Any,\n) -> Type['Model']:\n \"\"\"\n Dynamically create a model.\n :param __model_name: name of the created model\n :param __config__: config class to use for the new model\n :param __base__: base class for the new model to inherit from\n :param __module__: module of the created model\n :param __validators__: a dict of method names and @validator class methods\n :param field_definitions: fields of the model (or extra fields if a base is supplied)\n in the format `<name>=(<type>, <default default>)` or `<name>=<default value>, e.g.\n `foobar=(str, ...)` or `foobar=123`, or, for complex use-cases, in the format\n `<name>=<FieldInfo>`, e.g. `foo=Field(default_factory=datetime.utcnow, alias='bar')`\n \"\"\"\n\n if __base__ is not None:\n if __config__ is not None:\n raise ConfigError('to avoid confusion __config__ and __base__ cannot be used together')\n else:\n __base__ = cast(Type['Model'], BaseModel)\n\n fields = {}\n annotations = {}\n\n for f_name, f_def in field_definitions.items():\n if not is_valid_field(f_name):\n warnings.warn(f'fields may not start with an underscore, ignoring \"{f_name}\"', RuntimeWarning)\n if isinstance(f_def, tuple):\n try:\n f_annotation, f_value = f_def\n except ValueError as e:\n raise ConfigError(\n 'field definitions should either be a tuple of (<type>, <default>) or just a '\n 'default value, unfortunately this means tuples as '\n 'default values are not allowed'\n ) from e\n else:\n f_annotation, f_value = None, f_def\n\n if f_annotation:\n annotations[f_name] = f_annotation\n fields[f_name] = f_value\n\n namespace: 'DictStrAny' = {'__annotations__': annotations, '__module__': __module__}\n if __validators__:\n namespace.update(__validators__)\n namespace.update(fields)\n if __config__:\n namespace['Config'] = inherit_config(__config__, BaseConfig)\n\n return type(__model_name, (__base__,), namespace)\n\n\n_missing = object()\n\n\ndef validate_model( # noqa: C901 (ignore complexity)\n model: Type[BaseModel], input_data: 'DictStrAny', cls: 'ModelOrDc' = None\n) -> Tuple['DictStrAny', 'SetStr', Optional[ValidationError]]:\n \"\"\"\n validate data against a model.\n \"\"\"\n values = {}\n errors = []\n # input_data names, possibly alias\n names_used = set()\n # field names, never aliases\n fields_set = set()\n config = model.__config__\n check_extra = config.extra is not Extra.ignore\n cls_ = cls or model\n\n for validator in model.__pre_root_validators__:\n try:\n input_data = validator(cls_, input_data)\n except (ValueError, TypeError, AssertionError) as exc:\n return {}, set(), ValidationError([ErrorWrapper(exc, loc=ROOT_KEY)], cls_)\n\n for name, field in model.__fields__.items():\n value = input_data.get(field.alias, _missing)\n using_name = False\n if value is _missing and config.allow_population_by_field_name and field.alt_alias:\n value = input_data.get(field.name, _missing)\n using_name = True\n\n if value is _missing:\n if field.required:\n errors.append(ErrorWrapper(MissingError(), loc=field.alias))\n continue\n\n value = field.get_default()\n\n if not config.validate_all and not field.validate_always:\n values[name] = value\n continue\n else:\n fields_set.add(name)\n if check_extra:\n names_used.add(field.name if using_name else field.alias)\n\n v_, errors_ = field.validate(value, values, loc=field.alias, cls=cls_)\n if isinstance(errors_, ErrorWrapper):\n errors.append(errors_)\n elif isinstance(errors_, list):\n errors.extend(errors_)\n else:\n values[name] = v_\n\n if check_extra:\n if isinstance(input_data, GetterDict):\n extra = input_data.extra_keys() - names_used\n else:\n extra = input_data.keys() - names_used\n if extra:\n fields_set |= extra\n if config.extra is Extra.allow:\n for f in extra:\n values[f] = input_data[f]\n else:\n for f in sorted(extra):\n errors.append(ErrorWrapper(ExtraError(), loc=f))\n\n for skip_on_failure, validator in model.__post_root_validators__:\n if skip_on_failure and errors:\n continue\n try:\n values = validator(cls_, values)\n except (ValueError, TypeError, AssertionError) as exc:\n errors.append(ErrorWrapper(exc, loc=ROOT_KEY))\n\n if errors:\n return values, fields_set, ValidationError(errors, cls_)\n else:\n return values, fields_set, None\n",
"path": "pydantic/main.py"
}
] | [
{
"content": "import json\nimport sys\nimport warnings\nfrom abc import ABCMeta\nfrom copy import deepcopy\nfrom enum import Enum\nfrom functools import partial\nfrom pathlib import Path\nfrom types import FunctionType\nfrom typing import (\n TYPE_CHECKING,\n AbstractSet,\n Any,\n Callable,\n Dict,\n List,\n Mapping,\n Optional,\n Tuple,\n Type,\n TypeVar,\n Union,\n cast,\n no_type_check,\n overload,\n)\n\nfrom .class_validators import ValidatorGroup, extract_root_validators, extract_validators, inherit_validators\nfrom .error_wrappers import ErrorWrapper, ValidationError\nfrom .errors import ConfigError, DictError, ExtraError, MissingError\nfrom .fields import MAPPING_LIKE_SHAPES, ModelField, ModelPrivateAttr, PrivateAttr, Undefined\nfrom .json import custom_pydantic_encoder, pydantic_encoder\nfrom .parse import Protocol, load_file, load_str_bytes\nfrom .schema import default_ref_template, model_schema\nfrom .types import PyObject, StrBytes\nfrom .typing import (\n AnyCallable,\n get_args,\n get_origin,\n is_classvar,\n is_namedtuple,\n resolve_annotations,\n update_field_forward_refs,\n)\nfrom .utils import (\n ROOT_KEY,\n ClassAttribute,\n GetterDict,\n Representation,\n ValueItems,\n generate_model_signature,\n is_valid_field,\n is_valid_private_name,\n lenient_issubclass,\n sequence_like,\n smart_deepcopy,\n unique_list,\n validate_field_name,\n)\n\nif TYPE_CHECKING:\n from inspect import Signature\n\n import typing_extensions\n\n from .class_validators import ValidatorListDict\n from .types import ModelOrDc\n from .typing import ( # noqa: F401\n AbstractSetIntStr,\n CallableGenerator,\n DictAny,\n DictStrAny,\n MappingIntStrAny,\n ReprArgs,\n SetStr,\n TupleGenerator,\n )\n\n ConfigType = Type['BaseConfig']\n Model = TypeVar('Model', bound='BaseModel')\n\n class SchemaExtraCallable(typing_extensions.Protocol):\n @overload\n def __call__(self, schema: Dict[str, Any]) -> None:\n pass\n\n @overload # noqa: F811\n def __call__(self, schema: Dict[str, Any], model_class: Type['Model']) -> None: # noqa: F811\n pass\n\n\nelse:\n SchemaExtraCallable = Callable[..., None]\n\ntry:\n import cython # type: ignore\nexcept ImportError:\n compiled: bool = False\nelse: # pragma: no cover\n try:\n compiled = cython.compiled\n except AttributeError:\n compiled = False\n\n__all__ = 'BaseConfig', 'BaseModel', 'Extra', 'compiled', 'create_model', 'validate_model'\n\n\nclass Extra(str, Enum):\n allow = 'allow'\n ignore = 'ignore'\n forbid = 'forbid'\n\n\nclass BaseConfig:\n title = None\n anystr_lower = False\n anystr_strip_whitespace = False\n min_anystr_length = None\n max_anystr_length = None\n validate_all = False\n extra = Extra.ignore\n allow_mutation = True\n frozen = False\n allow_population_by_field_name = False\n use_enum_values = False\n fields: Dict[str, Union[str, Dict[str, str]]] = {}\n validate_assignment = False\n error_msg_templates: Dict[str, str] = {}\n arbitrary_types_allowed = False\n orm_mode: bool = False\n getter_dict: Type[GetterDict] = GetterDict\n alias_generator: Optional[Callable[[str], str]] = None\n keep_untouched: Tuple[type, ...] = ()\n schema_extra: Union[Dict[str, Any], 'SchemaExtraCallable'] = {}\n json_loads: Callable[[str], Any] = json.loads\n json_dumps: Callable[..., str] = json.dumps\n json_encoders: Dict[Type[Any], AnyCallable] = {}\n underscore_attrs_are_private: bool = False\n\n # Whether or not inherited models as fields should be reconstructed as base model\n copy_on_model_validation: bool = True\n\n @classmethod\n def get_field_info(cls, name: str) -> Dict[str, Any]:\n \"\"\"\n Get properties of FieldInfo from the `fields` property of the config class.\n \"\"\"\n\n fields_value = cls.fields.get(name)\n\n if isinstance(fields_value, str):\n field_info: Dict[str, Any] = {'alias': fields_value}\n elif isinstance(fields_value, dict):\n field_info = fields_value\n else:\n field_info = {}\n\n if 'alias' in field_info:\n field_info.setdefault('alias_priority', 2)\n\n if field_info.get('alias_priority', 0) <= 1 and cls.alias_generator:\n alias = cls.alias_generator(name)\n if not isinstance(alias, str):\n raise TypeError(f'Config.alias_generator must return str, not {alias.__class__}')\n field_info.update(alias=alias, alias_priority=1)\n return field_info\n\n @classmethod\n def prepare_field(cls, field: 'ModelField') -> None:\n \"\"\"\n Optional hook to check or modify fields during model creation.\n \"\"\"\n pass\n\n\ndef inherit_config(self_config: 'ConfigType', parent_config: 'ConfigType', **namespace: Any) -> 'ConfigType':\n if not self_config:\n base_classes: Tuple['ConfigType', ...] = (parent_config,)\n elif self_config == parent_config:\n base_classes = (self_config,)\n else:\n base_classes = self_config, parent_config\n\n namespace['json_encoders'] = {\n **getattr(parent_config, 'json_encoders', {}),\n **getattr(self_config, 'json_encoders', {}),\n **namespace.get('json_encoders', {}),\n }\n\n return type('Config', base_classes, namespace)\n\n\nEXTRA_LINK = 'https://pydantic-docs.helpmanual.io/usage/model_config/'\n\n\ndef prepare_config(config: Type[BaseConfig], cls_name: str) -> None:\n if not isinstance(config.extra, Extra):\n try:\n config.extra = Extra(config.extra)\n except ValueError:\n raise ValueError(f'\"{cls_name}\": {config.extra} is not a valid value for \"extra\"')\n\n\ndef validate_custom_root_type(fields: Dict[str, ModelField]) -> None:\n if len(fields) > 1:\n raise ValueError(f'{ROOT_KEY} cannot be mixed with other fields')\n\n\ndef generate_hash_function(frozen: bool) -> Optional[Callable[[Any], int]]:\n def hash_function(self_: Any) -> int:\n return hash(self_.__class__) + hash(tuple(self_.__dict__.values()))\n\n return hash_function if frozen else None\n\n\n# If a field is of type `Callable`, its default value should be a function and cannot to ignored.\nANNOTATED_FIELD_UNTOUCHED_TYPES: Tuple[Any, ...] = (property, type, classmethod, staticmethod)\n# When creating a `BaseModel` instance, we bypass all the methods, properties... added to the model\nUNTOUCHED_TYPES: Tuple[Any, ...] = (FunctionType,) + ANNOTATED_FIELD_UNTOUCHED_TYPES\n# Note `ModelMetaclass` refers to `BaseModel`, but is also used to *create* `BaseModel`, so we need to add this extra\n# (somewhat hacky) boolean to keep track of whether we've created the `BaseModel` class yet, and therefore whether it's\n# safe to refer to it. If it *hasn't* been created, we assume that the `__new__` call we're in the middle of is for\n# the `BaseModel` class, since that's defined immediately after the metaclass.\n_is_base_model_class_defined = False\n\n\nclass ModelMetaclass(ABCMeta):\n @no_type_check # noqa C901\n def __new__(mcs, name, bases, namespace, **kwargs): # noqa C901\n fields: Dict[str, ModelField] = {}\n config = BaseConfig\n validators: 'ValidatorListDict' = {}\n\n pre_root_validators, post_root_validators = [], []\n private_attributes: Dict[str, ModelPrivateAttr] = {}\n slots: SetStr = namespace.get('__slots__', ())\n slots = {slots} if isinstance(slots, str) else set(slots)\n class_vars: SetStr = set()\n hash_func: Optional[Callable[[Any], int]] = None\n\n for base in reversed(bases):\n if _is_base_model_class_defined and issubclass(base, BaseModel) and base != BaseModel:\n fields.update(smart_deepcopy(base.__fields__))\n config = inherit_config(base.__config__, config)\n validators = inherit_validators(base.__validators__, validators)\n pre_root_validators += base.__pre_root_validators__\n post_root_validators += base.__post_root_validators__\n private_attributes.update(base.__private_attributes__)\n class_vars.update(base.__class_vars__)\n hash_func = base.__hash__\n\n config_kwargs = {key: kwargs.pop(key) for key in kwargs.keys() & BaseConfig.__dict__.keys()}\n config_from_namespace = namespace.get('Config')\n if config_kwargs and config_from_namespace:\n raise TypeError('Specifying config in two places is ambiguous, use either Config attribute or class kwargs')\n config = inherit_config(config_from_namespace, config, **config_kwargs)\n\n validators = inherit_validators(extract_validators(namespace), validators)\n vg = ValidatorGroup(validators)\n\n for f in fields.values():\n f.set_config(config)\n extra_validators = vg.get_validators(f.name)\n if extra_validators:\n f.class_validators.update(extra_validators)\n # re-run prepare to add extra validators\n f.populate_validators()\n\n prepare_config(config, name)\n\n untouched_types = ANNOTATED_FIELD_UNTOUCHED_TYPES\n\n def is_untouched(v: Any) -> bool:\n return isinstance(v, untouched_types) or v.__class__.__name__ == 'cython_function_or_method'\n\n if (namespace.get('__module__'), namespace.get('__qualname__')) != ('pydantic.main', 'BaseModel'):\n annotations = resolve_annotations(namespace.get('__annotations__', {}), namespace.get('__module__', None))\n # annotation only fields need to come first in fields\n for ann_name, ann_type in annotations.items():\n if is_classvar(ann_type):\n class_vars.add(ann_name)\n elif is_valid_field(ann_name):\n validate_field_name(bases, ann_name)\n value = namespace.get(ann_name, Undefined)\n allowed_types = get_args(ann_type) if get_origin(ann_type) is Union else (ann_type,)\n if (\n is_untouched(value)\n and ann_type != PyObject\n and not any(\n lenient_issubclass(get_origin(allowed_type), Type) for allowed_type in allowed_types\n )\n ):\n continue\n fields[ann_name] = ModelField.infer(\n name=ann_name,\n value=value,\n annotation=ann_type,\n class_validators=vg.get_validators(ann_name),\n config=config,\n )\n elif ann_name not in namespace and config.underscore_attrs_are_private:\n private_attributes[ann_name] = PrivateAttr()\n\n untouched_types = UNTOUCHED_TYPES + config.keep_untouched\n for var_name, value in namespace.items():\n can_be_changed = var_name not in class_vars and not is_untouched(value)\n if isinstance(value, ModelPrivateAttr):\n if not is_valid_private_name(var_name):\n raise NameError(\n f'Private attributes \"{var_name}\" must not be a valid field name; '\n f'Use sunder or dunder names, e. g. \"_{var_name}\" or \"__{var_name}__\"'\n )\n private_attributes[var_name] = value\n elif config.underscore_attrs_are_private and is_valid_private_name(var_name) and can_be_changed:\n private_attributes[var_name] = PrivateAttr(default=value)\n elif is_valid_field(var_name) and var_name not in annotations and can_be_changed:\n validate_field_name(bases, var_name)\n inferred = ModelField.infer(\n name=var_name,\n value=value,\n annotation=annotations.get(var_name, Undefined),\n class_validators=vg.get_validators(var_name),\n config=config,\n )\n if var_name in fields and inferred.type_ != fields[var_name].type_:\n raise TypeError(\n f'The type of {name}.{var_name} differs from the new default value; '\n f'if you wish to change the type of this field, please use a type annotation'\n )\n fields[var_name] = inferred\n\n _custom_root_type = ROOT_KEY in fields\n if _custom_root_type:\n validate_custom_root_type(fields)\n vg.check_for_unused()\n if config.json_encoders:\n json_encoder = partial(custom_pydantic_encoder, config.json_encoders)\n else:\n json_encoder = pydantic_encoder\n pre_rv_new, post_rv_new = extract_root_validators(namespace)\n\n if hash_func is None:\n hash_func = generate_hash_function(config.frozen)\n\n exclude_from_namespace = fields | private_attributes.keys() | {'__slots__'}\n new_namespace = {\n '__config__': config,\n '__fields__': fields,\n '__validators__': vg.validators,\n '__pre_root_validators__': unique_list(pre_root_validators + pre_rv_new),\n '__post_root_validators__': unique_list(post_root_validators + post_rv_new),\n '__schema_cache__': {},\n '__json_encoder__': staticmethod(json_encoder),\n '__custom_root_type__': _custom_root_type,\n '__private_attributes__': private_attributes,\n '__slots__': slots | private_attributes.keys(),\n '__hash__': hash_func,\n '__class_vars__': class_vars,\n **{n: v for n, v in namespace.items() if n not in exclude_from_namespace},\n }\n\n cls = super().__new__(mcs, name, bases, new_namespace, **kwargs)\n # set __signature__ attr only for model class, but not for its instances\n cls.__signature__ = ClassAttribute('__signature__', generate_model_signature(cls.__init__, fields, config))\n return cls\n\n\nobject_setattr = object.__setattr__\n\n\nclass BaseModel(Representation, metaclass=ModelMetaclass):\n if TYPE_CHECKING:\n # populated by the metaclass, defined here to help IDEs only\n __fields__: Dict[str, ModelField] = {}\n __validators__: Dict[str, AnyCallable] = {}\n __pre_root_validators__: List[AnyCallable]\n __post_root_validators__: List[Tuple[bool, AnyCallable]]\n __config__: Type[BaseConfig] = BaseConfig\n __root__: Any = None\n __json_encoder__: Callable[[Any], Any] = lambda x: x\n __schema_cache__: 'DictAny' = {}\n __custom_root_type__: bool = False\n __signature__: 'Signature'\n __private_attributes__: Dict[str, Any]\n __class_vars__: SetStr\n __fields_set__: SetStr = set()\n\n Config = BaseConfig\n __slots__ = ('__dict__', '__fields_set__')\n __doc__ = '' # Null out the Representation docstring\n\n def __init__(__pydantic_self__, **data: Any) -> None:\n \"\"\"\n Create a new model by parsing and validating input data from keyword arguments.\n\n Raises ValidationError if the input data cannot be parsed to form a valid model.\n \"\"\"\n # Uses something other than `self` the first arg to allow \"self\" as a settable attribute\n values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data)\n if validation_error:\n raise validation_error\n try:\n object_setattr(__pydantic_self__, '__dict__', values)\n except TypeError as e:\n raise TypeError(\n 'Model values must be a dict; you may not have returned a dictionary from a root validator'\n ) from e\n object_setattr(__pydantic_self__, '__fields_set__', fields_set)\n __pydantic_self__._init_private_attributes()\n\n @no_type_check\n def __setattr__(self, name, value): # noqa: C901 (ignore complexity)\n if name in self.__private_attributes__:\n return object_setattr(self, name, value)\n\n if self.__config__.extra is not Extra.allow and name not in self.__fields__:\n raise ValueError(f'\"{self.__class__.__name__}\" object has no field \"{name}\"')\n elif not self.__config__.allow_mutation or self.__config__.frozen:\n raise TypeError(f'\"{self.__class__.__name__}\" is immutable and does not support item assignment')\n elif self.__config__.validate_assignment:\n new_values = {**self.__dict__, name: value}\n\n for validator in self.__pre_root_validators__:\n try:\n new_values = validator(self.__class__, new_values)\n except (ValueError, TypeError, AssertionError) as exc:\n raise ValidationError([ErrorWrapper(exc, loc=ROOT_KEY)], self.__class__)\n\n known_field = self.__fields__.get(name, None)\n if known_field:\n # We want to\n # - make sure validators are called without the current value for this field inside `values`\n # - keep other values (e.g. submodels) untouched (using `BaseModel.dict()` will change them into dicts)\n # - keep the order of the fields\n if not known_field.field_info.allow_mutation:\n raise TypeError(f'\"{known_field.name}\" has allow_mutation set to False and cannot be assigned')\n dict_without_original_value = {k: v for k, v in self.__dict__.items() if k != name}\n value, error_ = known_field.validate(value, dict_without_original_value, loc=name, cls=self.__class__)\n if error_:\n raise ValidationError([error_], self.__class__)\n else:\n new_values[name] = value\n\n errors = []\n for skip_on_failure, validator in self.__post_root_validators__:\n if skip_on_failure and errors:\n continue\n try:\n new_values = validator(self.__class__, new_values)\n except (ValueError, TypeError, AssertionError) as exc:\n errors.append(ErrorWrapper(exc, loc=ROOT_KEY))\n if errors:\n raise ValidationError(errors, self.__class__)\n\n # update the whole __dict__ as other values than just `value`\n # may be changed (e.g. with `root_validator`)\n object_setattr(self, '__dict__', new_values)\n else:\n self.__dict__[name] = value\n\n self.__fields_set__.add(name)\n\n def __getstate__(self) -> 'DictAny':\n private_attrs = ((k, getattr(self, k, Undefined)) for k in self.__private_attributes__)\n return {\n '__dict__': self.__dict__,\n '__fields_set__': self.__fields_set__,\n '__private_attribute_values__': {k: v for k, v in private_attrs if v is not Undefined},\n }\n\n def __setstate__(self, state: 'DictAny') -> None:\n object_setattr(self, '__dict__', state['__dict__'])\n object_setattr(self, '__fields_set__', state['__fields_set__'])\n for name, value in state.get('__private_attribute_values__', {}).items():\n object_setattr(self, name, value)\n\n def _init_private_attributes(self) -> None:\n for name, private_attr in self.__private_attributes__.items():\n default = private_attr.get_default()\n if default is not Undefined:\n object_setattr(self, name, default)\n\n def dict(\n self,\n *,\n include: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None,\n exclude: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None,\n by_alias: bool = False,\n skip_defaults: bool = None,\n exclude_unset: bool = False,\n exclude_defaults: bool = False,\n exclude_none: bool = False,\n ) -> 'DictStrAny':\n \"\"\"\n Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.\n\n \"\"\"\n if skip_defaults is not None:\n warnings.warn(\n f'{self.__class__.__name__}.dict(): \"skip_defaults\" is deprecated and replaced by \"exclude_unset\"',\n DeprecationWarning,\n )\n exclude_unset = skip_defaults\n\n return dict(\n self._iter(\n to_dict=True,\n by_alias=by_alias,\n include=include,\n exclude=exclude,\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n exclude_none=exclude_none,\n )\n )\n\n def json(\n self,\n *,\n include: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None,\n exclude: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None,\n by_alias: bool = False,\n skip_defaults: bool = None,\n exclude_unset: bool = False,\n exclude_defaults: bool = False,\n exclude_none: bool = False,\n encoder: Optional[Callable[[Any], Any]] = None,\n **dumps_kwargs: Any,\n ) -> str:\n \"\"\"\n Generate a JSON representation of the model, `include` and `exclude` arguments as per `dict()`.\n\n `encoder` is an optional function to supply as `default` to json.dumps(), other arguments as per `json.dumps()`.\n \"\"\"\n if skip_defaults is not None:\n warnings.warn(\n f'{self.__class__.__name__}.json(): \"skip_defaults\" is deprecated and replaced by \"exclude_unset\"',\n DeprecationWarning,\n )\n exclude_unset = skip_defaults\n encoder = cast(Callable[[Any], Any], encoder or self.__json_encoder__)\n data = self.dict(\n include=include,\n exclude=exclude,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n exclude_none=exclude_none,\n )\n if self.__custom_root_type__:\n data = data[ROOT_KEY]\n return self.__config__.json_dumps(data, default=encoder, **dumps_kwargs)\n\n @classmethod\n def _enforce_dict_if_root(cls, obj: Any) -> Any:\n if cls.__custom_root_type__ and (\n not (isinstance(obj, dict) and obj.keys() == {ROOT_KEY})\n or cls.__fields__[ROOT_KEY].shape in MAPPING_LIKE_SHAPES\n ):\n return {ROOT_KEY: obj}\n else:\n return obj\n\n @classmethod\n def parse_obj(cls: Type['Model'], obj: Any) -> 'Model':\n obj = cls._enforce_dict_if_root(obj)\n if not isinstance(obj, dict):\n try:\n obj = dict(obj)\n except (TypeError, ValueError) as e:\n exc = TypeError(f'{cls.__name__} expected dict not {obj.__class__.__name__}')\n raise ValidationError([ErrorWrapper(exc, loc=ROOT_KEY)], cls) from e\n return cls(**obj)\n\n @classmethod\n def parse_raw(\n cls: Type['Model'],\n b: StrBytes,\n *,\n content_type: str = None,\n encoding: str = 'utf8',\n proto: Protocol = None,\n allow_pickle: bool = False,\n ) -> 'Model':\n try:\n obj = load_str_bytes(\n b,\n proto=proto,\n content_type=content_type,\n encoding=encoding,\n allow_pickle=allow_pickle,\n json_loads=cls.__config__.json_loads,\n )\n except (ValueError, TypeError, UnicodeDecodeError) as e:\n raise ValidationError([ErrorWrapper(e, loc=ROOT_KEY)], cls)\n return cls.parse_obj(obj)\n\n @classmethod\n def parse_file(\n cls: Type['Model'],\n path: Union[str, Path],\n *,\n content_type: str = None,\n encoding: str = 'utf8',\n proto: Protocol = None,\n allow_pickle: bool = False,\n ) -> 'Model':\n obj = load_file(\n path,\n proto=proto,\n content_type=content_type,\n encoding=encoding,\n allow_pickle=allow_pickle,\n json_loads=cls.__config__.json_loads,\n )\n return cls.parse_obj(obj)\n\n @classmethod\n def from_orm(cls: Type['Model'], obj: Any) -> 'Model':\n if not cls.__config__.orm_mode:\n raise ConfigError('You must have the config attribute orm_mode=True to use from_orm')\n obj = {ROOT_KEY: obj} if cls.__custom_root_type__ else cls._decompose_class(obj)\n m = cls.__new__(cls)\n values, fields_set, validation_error = validate_model(cls, obj)\n if validation_error:\n raise validation_error\n object_setattr(m, '__dict__', values)\n object_setattr(m, '__fields_set__', fields_set)\n m._init_private_attributes()\n return m\n\n @classmethod\n def construct(cls: Type['Model'], _fields_set: Optional['SetStr'] = None, **values: Any) -> 'Model':\n \"\"\"\n Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.\n Default values are respected, but no other validation is performed.\n Behaves as if `Config.extra = 'allow'` was set since it adds all passed values\n \"\"\"\n m = cls.__new__(cls)\n fields_values: Dict[str, Any] = {}\n for name, field in cls.__fields__.items():\n if name in values:\n fields_values[name] = values[name]\n elif not field.required:\n fields_values[name] = field.get_default()\n fields_values.update(values)\n object_setattr(m, '__dict__', fields_values)\n if _fields_set is None:\n _fields_set = set(values.keys())\n object_setattr(m, '__fields_set__', _fields_set)\n m._init_private_attributes()\n return m\n\n def copy(\n self: 'Model',\n *,\n include: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None,\n exclude: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None,\n update: 'DictStrAny' = None,\n deep: bool = False,\n ) -> 'Model':\n \"\"\"\n Duplicate a model, optionally choose which fields to include, exclude and change.\n\n :param include: fields to include in new model\n :param exclude: fields to exclude from new model, as with values this takes precedence over include\n :param update: values to change/add in the new model. Note: the data is not validated before creating\n the new model: you should trust this data\n :param deep: set to `True` to make a deep copy of the model\n :return: new model instance\n \"\"\"\n\n v = dict(\n self._iter(to_dict=False, by_alias=False, include=include, exclude=exclude, exclude_unset=False),\n **(update or {}),\n )\n\n if deep:\n # chances of having empty dict here are quite low for using smart_deepcopy\n v = deepcopy(v)\n\n cls = self.__class__\n m = cls.__new__(cls)\n object_setattr(m, '__dict__', v)\n # new `__fields_set__` can have unset optional fields with a set value in `update` kwarg\n if update:\n fields_set = self.__fields_set__ | update.keys()\n else:\n fields_set = set(self.__fields_set__)\n object_setattr(m, '__fields_set__', fields_set)\n for name in self.__private_attributes__:\n value = getattr(self, name, Undefined)\n if value is not Undefined:\n if deep:\n value = deepcopy(value)\n object_setattr(m, name, value)\n\n return m\n\n @classmethod\n def schema(cls, by_alias: bool = True, ref_template: str = default_ref_template) -> 'DictStrAny':\n cached = cls.__schema_cache__.get((by_alias, ref_template))\n if cached is not None:\n return cached\n s = model_schema(cls, by_alias=by_alias, ref_template=ref_template)\n cls.__schema_cache__[(by_alias, ref_template)] = s\n return s\n\n @classmethod\n def schema_json(\n cls, *, by_alias: bool = True, ref_template: str = default_ref_template, **dumps_kwargs: Any\n ) -> str:\n from .json import pydantic_encoder\n\n return cls.__config__.json_dumps(\n cls.schema(by_alias=by_alias, ref_template=ref_template), default=pydantic_encoder, **dumps_kwargs\n )\n\n @classmethod\n def __get_validators__(cls) -> 'CallableGenerator':\n yield cls.validate\n\n @classmethod\n def validate(cls: Type['Model'], value: Any) -> 'Model':\n if isinstance(value, cls):\n return value.copy() if cls.__config__.copy_on_model_validation else value\n\n value = cls._enforce_dict_if_root(value)\n if isinstance(value, dict):\n return cls(**value)\n elif cls.__config__.orm_mode:\n return cls.from_orm(value)\n else:\n try:\n value_as_dict = dict(value)\n except (TypeError, ValueError) as e:\n raise DictError() from e\n return cls(**value_as_dict)\n\n @classmethod\n def _decompose_class(cls: Type['Model'], obj: Any) -> GetterDict:\n return cls.__config__.getter_dict(obj)\n\n @classmethod\n @no_type_check\n def _get_value(\n cls,\n v: Any,\n to_dict: bool,\n by_alias: bool,\n include: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']],\n exclude: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']],\n exclude_unset: bool,\n exclude_defaults: bool,\n exclude_none: bool,\n ) -> Any:\n\n if isinstance(v, BaseModel):\n if to_dict:\n v_dict = v.dict(\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n include=include,\n exclude=exclude,\n exclude_none=exclude_none,\n )\n if ROOT_KEY in v_dict:\n return v_dict[ROOT_KEY]\n return v_dict\n else:\n return v.copy(include=include, exclude=exclude)\n\n value_exclude = ValueItems(v, exclude) if exclude else None\n value_include = ValueItems(v, include) if include else None\n\n if isinstance(v, dict):\n return {\n k_: cls._get_value(\n v_,\n to_dict=to_dict,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n include=value_include and value_include.for_element(k_),\n exclude=value_exclude and value_exclude.for_element(k_),\n exclude_none=exclude_none,\n )\n for k_, v_ in v.items()\n if (not value_exclude or not value_exclude.is_excluded(k_))\n and (not value_include or value_include.is_included(k_))\n }\n\n elif sequence_like(v):\n seq_args = (\n cls._get_value(\n v_,\n to_dict=to_dict,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n include=value_include and value_include.for_element(i),\n exclude=value_exclude and value_exclude.for_element(i),\n exclude_none=exclude_none,\n )\n for i, v_ in enumerate(v)\n if (not value_exclude or not value_exclude.is_excluded(i))\n and (not value_include or value_include.is_included(i))\n )\n\n return v.__class__(*seq_args) if is_namedtuple(v.__class__) else v.__class__(seq_args)\n\n elif isinstance(v, Enum) and getattr(cls.Config, 'use_enum_values', False):\n return v.value\n\n else:\n return v\n\n @classmethod\n def update_forward_refs(cls, **localns: Any) -> None:\n \"\"\"\n Try to update ForwardRefs on fields based on this Model, globalns and localns.\n \"\"\"\n globalns = sys.modules[cls.__module__].__dict__.copy()\n globalns.setdefault(cls.__name__, cls)\n for f in cls.__fields__.values():\n update_field_forward_refs(f, globalns=globalns, localns=localns)\n\n def __iter__(self) -> 'TupleGenerator':\n \"\"\"\n so `dict(model)` works\n \"\"\"\n yield from self.__dict__.items()\n\n def _iter(\n self,\n to_dict: bool = False,\n by_alias: bool = False,\n include: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None,\n exclude: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None,\n exclude_unset: bool = False,\n exclude_defaults: bool = False,\n exclude_none: bool = False,\n ) -> 'TupleGenerator':\n\n allowed_keys = self._calculate_keys(include=include, exclude=exclude, exclude_unset=exclude_unset)\n if allowed_keys is None and not (to_dict or by_alias or exclude_unset or exclude_defaults or exclude_none):\n # huge boost for plain _iter()\n yield from self.__dict__.items()\n return\n\n value_exclude = ValueItems(self, exclude) if exclude else None\n value_include = ValueItems(self, include) if include else None\n\n for field_key, v in self.__dict__.items():\n if (allowed_keys is not None and field_key not in allowed_keys) or (exclude_none and v is None):\n continue\n\n if exclude_defaults:\n model_field = self.__fields__.get(field_key)\n if not getattr(model_field, 'required', True) and getattr(model_field, 'default', _missing) == v:\n continue\n\n if by_alias and field_key in self.__fields__:\n dict_key = self.__fields__[field_key].alias\n else:\n dict_key = field_key\n\n if to_dict or value_include or value_exclude:\n v = self._get_value(\n v,\n to_dict=to_dict,\n by_alias=by_alias,\n include=value_include and value_include.for_element(field_key),\n exclude=value_exclude and value_exclude.for_element(field_key),\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n exclude_none=exclude_none,\n )\n yield dict_key, v\n\n def _calculate_keys(\n self,\n include: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']],\n exclude: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']],\n exclude_unset: bool,\n update: Optional['DictStrAny'] = None,\n ) -> Optional[AbstractSet[str]]:\n if include is None and exclude is None and exclude_unset is False:\n return None\n\n keys: AbstractSet[str]\n if exclude_unset:\n keys = self.__fields_set__.copy()\n else:\n keys = self.__dict__.keys()\n\n if include is not None:\n if isinstance(include, Mapping):\n keys &= include.keys()\n else:\n keys &= include\n\n if update:\n keys -= update.keys()\n\n if exclude:\n if isinstance(exclude, Mapping):\n keys -= {k for k, v in exclude.items() if v is ...}\n else:\n keys -= exclude\n\n return keys\n\n def __eq__(self, other: Any) -> bool:\n if isinstance(other, BaseModel):\n return self.dict() == other.dict()\n else:\n return self.dict() == other\n\n def __repr_args__(self) -> 'ReprArgs':\n return self.__dict__.items() # type: ignore\n\n\n_is_base_model_class_defined = True\n\n\ndef create_model(\n __model_name: str,\n *,\n __config__: Type[BaseConfig] = None,\n __base__: Type['Model'] = None,\n __module__: str = __name__,\n __validators__: Dict[str, classmethod] = None,\n **field_definitions: Any,\n) -> Type['Model']:\n \"\"\"\n Dynamically create a model.\n :param __model_name: name of the created model\n :param __config__: config class to use for the new model\n :param __base__: base class for the new model to inherit from\n :param __module__: module of the created model\n :param __validators__: a dict of method names and @validator class methods\n :param field_definitions: fields of the model (or extra fields if a base is supplied)\n in the format `<name>=(<type>, <default default>)` or `<name>=<default value>, e.g.\n `foobar=(str, ...)` or `foobar=123`, or, for complex use-cases, in the format\n `<name>=<FieldInfo>`, e.g. `foo=Field(default_factory=datetime.utcnow, alias='bar')`\n \"\"\"\n\n if __base__ is not None:\n if __config__ is not None:\n raise ConfigError('to avoid confusion __config__ and __base__ cannot be used together')\n else:\n __base__ = cast(Type['Model'], BaseModel)\n\n fields = {}\n annotations = {}\n\n for f_name, f_def in field_definitions.items():\n if not is_valid_field(f_name):\n warnings.warn(f'fields may not start with an underscore, ignoring \"{f_name}\"', RuntimeWarning)\n if isinstance(f_def, tuple):\n try:\n f_annotation, f_value = f_def\n except ValueError as e:\n raise ConfigError(\n 'field definitions should either be a tuple of (<type>, <default>) or just a '\n 'default value, unfortunately this means tuples as '\n 'default values are not allowed'\n ) from e\n else:\n f_annotation, f_value = None, f_def\n\n if f_annotation:\n annotations[f_name] = f_annotation\n fields[f_name] = f_value\n\n namespace: 'DictStrAny' = {'__annotations__': annotations, '__module__': __module__}\n if __validators__:\n namespace.update(__validators__)\n namespace.update(fields)\n if __config__:\n namespace['Config'] = inherit_config(__config__, BaseConfig)\n\n return type(__model_name, (__base__,), namespace)\n\n\n_missing = object()\n\n\ndef validate_model( # noqa: C901 (ignore complexity)\n model: Type[BaseModel], input_data: 'DictStrAny', cls: 'ModelOrDc' = None\n) -> Tuple['DictStrAny', 'SetStr', Optional[ValidationError]]:\n \"\"\"\n validate data against a model.\n \"\"\"\n values = {}\n errors = []\n # input_data names, possibly alias\n names_used = set()\n # field names, never aliases\n fields_set = set()\n config = model.__config__\n check_extra = config.extra is not Extra.ignore\n cls_ = cls or model\n\n for validator in model.__pre_root_validators__:\n try:\n input_data = validator(cls_, input_data)\n except (ValueError, TypeError, AssertionError) as exc:\n return {}, set(), ValidationError([ErrorWrapper(exc, loc=ROOT_KEY)], cls_)\n\n for name, field in model.__fields__.items():\n value = input_data.get(field.alias, _missing)\n using_name = False\n if value is _missing and config.allow_population_by_field_name and field.alt_alias:\n value = input_data.get(field.name, _missing)\n using_name = True\n\n if value is _missing:\n if field.required:\n errors.append(ErrorWrapper(MissingError(), loc=field.alias))\n continue\n\n value = field.get_default()\n\n if not config.validate_all and not field.validate_always:\n values[name] = value\n continue\n else:\n fields_set.add(name)\n if check_extra:\n names_used.add(field.name if using_name else field.alias)\n\n v_, errors_ = field.validate(value, values, loc=field.alias, cls=cls_)\n if isinstance(errors_, ErrorWrapper):\n errors.append(errors_)\n elif isinstance(errors_, list):\n errors.extend(errors_)\n else:\n values[name] = v_\n\n if check_extra:\n if isinstance(input_data, GetterDict):\n extra = input_data.extra_keys() - names_used\n else:\n extra = input_data.keys() - names_used\n if extra:\n fields_set |= extra\n if config.extra is Extra.allow:\n for f in extra:\n values[f] = input_data[f]\n else:\n for f in sorted(extra):\n errors.append(ErrorWrapper(ExtraError(), loc=f))\n\n for skip_on_failure, validator in model.__post_root_validators__:\n if skip_on_failure and errors:\n continue\n try:\n values = validator(cls_, values)\n except (ValueError, TypeError, AssertionError) as exc:\n errors.append(ErrorWrapper(exc, loc=ROOT_KEY))\n\n if errors:\n return values, fields_set, ValidationError(errors, cls_)\n else:\n return values, fields_set, None\n",
"path": "pydantic/main.py"
}
] | diff --git a/changes/2521-layday.md b/changes/2521-layday.md
new file mode 100644
index 00000000000..3c02f4dfd60
--- /dev/null
+++ b/changes/2521-layday.md
@@ -0,0 +1 @@
+Allow passing `json_encoders` in class kwargs
diff --git a/pydantic/main.py b/pydantic/main.py
index f6aca41048f..37f005af87d 100644
--- a/pydantic/main.py
+++ b/pydantic/main.py
@@ -184,6 +184,7 @@ def inherit_config(self_config: 'ConfigType', parent_config: 'ConfigType', **nam
namespace['json_encoders'] = {
**getattr(parent_config, 'json_encoders', {}),
**getattr(self_config, 'json_encoders', {}),
+ **namespace.get('json_encoders', {}),
}
return type('Config', base_classes, namespace)
diff --git a/tests/test_main.py b/tests/test_main.py
index 328e89a2b32..d2180c9e735 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -1723,6 +1723,13 @@ class Model(Base, extra='allow'):
assert Model.__fields__['b'].alias == 'B' # alias_generator still works
+def test_class_kwargs_config_json_encoders():
+ class Model(BaseModel, json_encoders={int: str}):
+ pass
+
+ assert Model.__config__.json_encoders == {int: str}
+
+
def test_class_kwargs_config_and_attr_conflict():
with pytest.raises(
|
pypa__pipenv-2975 | pipenv graph fails: No module named 'pipenv'
### Issue description
With the newest version of pipenv (2018.10.9), `pipenv graph` fails with the error message `ModuleNotFoundError: No module named 'pipenv'`
### Expected result
I expected `pipenv graph` to output the dependency graph of the venv.
### Actual result
```
$ pipenv graph
ERROR: Traceback (most recent call last):
File "c:\\python27\\lib\\site-packages\\pipenv\\vendor\\pipdeptree.py", line 16, in <module>
from pipenv.vendor.pip_shims import get_installed_distributions, FrozenRequirement
ModuleNotFoundError: No module named 'pipenv'
```
### Steps to replicate
- Install version 2018.10.9 of pipenv.
- Run `pipenv graph`
-------------------------------------------------------------------------------
<details><summary>$ pipenv --support</summary>
Pipenv version: `'2018.10.9'`
Pipenv location: `'c:\\python27\\lib\\site-packages\\pipenv'`
Python location: `'c:\\python27\\python.exe'`
Python installations found:
- `3.6.3`: `C:\Python36\python.exe`
- `2.7`: `C:\Python27\python.exe`
- `2.7`: `C:\Users\m.manhertz\.windows-build-tools\python27\python.exe`
- `2.7`: `C:\Users\m.manhertz\Envs\tpe\Scripts\python.exe`
- `3.7.0`: `C:\Python37\python.exe`
PEP 508 Information:
```
{'implementation_name': 'cpython',
'implementation_version': '0',
'os_name': 'nt',
'platform_machine': 'AMD64',
'platform_python_implementation': 'CPython',
'platform_release': '10',
'platform_system': 'Windows',
'platform_version': '10.0.17134',
'python_full_version': '2.7.12',
'python_version': '2.7',
'sys_platform': 'win32'}
```
System environment variables:
- `TMP`
- `TPE_DB_PASSWORD`
- `COMPUTERNAME`
- `VS140COMNTOOLS`
- `USERDOMAIN`
- `TPE_DB_HOST`
- `PSMODULEPATH`
- `PYTHONDONTWRITEBYTECODE`
- `COMMONPROGRAMFILES`
- `PROCESSOR_IDENTIFIER`
- `VBOX_MSI_INSTALL_PATH`
- `PROGRAMFILES`
- `PROCESSOR_REVISION`
- `HOME`
- `SYSTEMROOT`
- `PROGRAMFILES(X86)`
- `COMSPEC`
- `DRIVERDATA`
- `TERM`
- `DJANGO_SETTINGS_MODULE`
- `TEMP`
- `ALLUSERSPROFILE`
- `GITHUB_POSH_GIT`
- `TVT`
- `COMMONPROGRAMFILES(X86)`
- `TPE_DB_NAME`
- `PROCESSOR_ARCHITECTURE`
- `PLINK_PROTOCOL`
- `EDITOR`
- `LOCALAPPDATA`
- `GYP_MSVS_VERSION`
- `HOMEPATH`
- `USERDOMAIN_ROAMINGPROFILE`
- `TPE_SECRET_KEY`
- `ERLANG_HOME`
- `USERNAME`
- `WORKON_HOME`
- `LOGONSERVER`
- `SESSIONNAME`
- `PROGRAMDATA`
- `PYTHONPATH`
- `ONEDRIVE`
- `PATH`
- `PIP_SHIMS_BASE_MODULE`
- `TPE_DB_USER`
- `AWE_DIR`
- `PATHEXT`
- `PIP_PYTHON_PATH`
- `WINDIR`
- `APPDATA`
- `HOMEDRIVE`
- `PROGRAMW6432`
- `SYSTEMDRIVE`
- `NUMBER_OF_PROCESSORS`
- `USERDNSDOMAIN`
- `PROCESSOR_LEVEL`
- `VCTARGETSPATH`
- `GETTEXTCLDRDIR`
- `PYTHON_HOME`
- `GITHUB_GIT`
- `COMMONPROGRAMW6432`
- `OS`
- `PUBLIC`
- `USERPROFILE`
Pipenv???specific environment variables:
Debug???specific environment variables:
- `PATH`: `C:\Program Files\Docker\Docker\Resources\bin;C:\Program Files (x86)\Common Files\Oracle\Java\javapath;C:\Program Files (x86)\Intel\iCLS Client\;C:\ProgramData\Oracle\Java\javapath;C:\Program Files\Intel\iCLS Client\;C:\Python27\;C:\Python27\Scripts;C:\WINDOWS\system32;C:\WINDOWS;C:\WINDOWS\System32\Wbem;C:\WINDOWS\System32\WindowsPowerShell\v1.0\;c:\Python27\;c:\Python27\Scripts;C:\Program Files\PostgreSQL\9.4\bin;C:\Program Files\PostgreSQL\9.4\lib;C:\Program Files\PostgreSQL\9.4\include;C:\Program Files\Redis\;C:\Program Files (x86)\PuTTY\;C:\HashiCorp\Vagrant\bin;C:\Program Files\Git\cmd;C:\Program Files (x86)\Intel\Intel(R) Management Engine Components\DAL;C:\Program Files\Intel\Intel(R) Management Engine Components\DAL;C:\Program Files (x86)\Intel\Intel(R) Management Engine Components\IPT;C:\Program Files\Intel\Intel(R) Management Engine Components\IPT;C:\Program Files\nodejs\;C:\Program Files (x86)\Windows Kits\10\Windows Performance Toolkit\;C:\Program Files\Intel\WiFi\bin\;C:\Program Files\Common Files\Intel\WirelessCommon\;C:\WINDOWS\System32\OpenSSH\;C:\Users\m.manhertz\AppData\Local\Microsoft\WindowsApps;C:\Users\m.manhertz\Documents\Tools;C:\Users\m.manhertz\AppData\Local\atom\bin;C:\Program Files\gettext-iconv\bin;C:\Program Files (x86)\Sophos\Sophos SSL VPN Client\bin;C:\Users\m.manhertz\AppData\Local\Microsoft\WindowsApps;C:\Users\m.manhertz\AppData\Roaming\npm;C:\Python36\Scripts\;C:\Program Files\PostgreSQL\9.6\bin;C:\Program Files\Intel\WiFi\bin\;C:\Program Files\Common Files\Intel\WirelessCommon\;;C:\Users\m.manhertz\AppData\Local\GitHub\PortableGit_f02737a78695063deace08e96d5042710d3e32db\cmd;C:\Users\m.manhertz\AppData\Local\GitHub\PortableGit_f02737a78695063deace08e96d5042710d3e32db\usr\bin;C:\Users\m.manhertz\AppData\Local\GitHub\PortableGit_f02737a78695063deace08e96d5042710d3e32db\usr\share\git-tfs;C:\Users\m.manhertz\AppData\Local\GitHub\lfs-amd64_1.5.5;C:\Users\m.manhertz\AppData\Local\Apps\2.0\OOH24QXT.R8H\XWTJVPKY.DW1\gith..tion_317444273a93ac29_0003.0003_5794af8169eeff14;C:\Windows\Microsoft.NET\Framework\v4.0.30319\;c:\python27\lib\site-packages\pywin32_system32`
- `EDITOR`: `GitPad`
---------------------------
Contents of `Pipfile` ('C:\\Users\\m.manhertz\\Documents\\GitHub\\demo\\Pipfile'):
```toml
[[source]]
url = "https://pypi.org/simple"
verify_ssl = true
name = "pypi"
[dev-packages]
[packages]
[requires]
python_version = "3.7"
```
Contents of `Pipfile.lock` ('C:\\Users\\m.manhertz\\Documents\\GitHub\\demo\\Pipfile.lock'):
```json
{
"_meta": {
"hash": {
"sha256": "7e7ef69da7248742e869378f8421880cf8f0017f96d94d086813baa518a65489"
},
"pipfile-spec": 6,
"requires": {
"python_version": "3.7"
},
"sources": [
{
"name": "pypi",
"url": "https://pypi.org/simple",
"verify_ssl": true
}
]
},
"default": {},
"develop": {}
}
```
</details>
| [
{
"content": "from __future__ import print_function\nimport os\nimport sys\nfrom itertools import chain\nfrom collections import defaultdict\nimport argparse\nfrom operator import attrgetter\nimport json\nfrom importlib import import_module\n\ntry:\n from collections import OrderedDict\nexcept ImportError:\n from ordereddict import OrderedDict\n\nfrom pipenv.vendor.pip_shims import get_installed_distributions, FrozenRequirement\n\nimport pkg_resources\n# inline:\n# from graphviz import backend, Digraph\n\n\n__version__ = '0.13.0'\n\n\nflatten = chain.from_iterable\n\n\ndef build_dist_index(pkgs):\n \"\"\"Build an index pkgs by their key as a dict.\n\n :param list pkgs: list of pkg_resources.Distribution instances\n :returns: index of the pkgs by the pkg key\n :rtype: dict\n\n \"\"\"\n return dict((p.key, DistPackage(p)) for p in pkgs)\n\n\ndef construct_tree(index):\n \"\"\"Construct tree representation of the pkgs from the index.\n\n The keys of the dict representing the tree will be objects of type\n DistPackage and the values will be list of ReqPackage objects.\n\n :param dict index: dist index ie. index of pkgs by their keys\n :returns: tree of pkgs and their dependencies\n :rtype: dict\n\n \"\"\"\n return dict((p, [ReqPackage(r, index.get(r.key))\n for r in p.requires()])\n for p in index.values())\n\n\ndef sorted_tree(tree):\n \"\"\"Sorts the dict representation of the tree\n\n The root packages as well as the intermediate packages are sorted\n in the alphabetical order of the package names.\n\n :param dict tree: the pkg dependency tree obtained by calling\n `construct_tree` function\n :returns: sorted tree\n :rtype: collections.OrderedDict\n\n \"\"\"\n return OrderedDict(sorted([(k, sorted(v, key=attrgetter('key')))\n for k, v in tree.items()],\n key=lambda kv: kv[0].key))\n\n\ndef find_tree_root(tree, key):\n \"\"\"Find a root in a tree by it's key\n\n :param dict tree: the pkg dependency tree obtained by calling\n `construct_tree` function\n :param str key: key of the root node to find\n :returns: a root node if found else None\n :rtype: mixed\n\n \"\"\"\n result = [p for p in tree.keys() if p.key == key]\n assert len(result) in [0, 1]\n return None if len(result) == 0 else result[0]\n\n\ndef reverse_tree(tree):\n \"\"\"Reverse the dependency tree.\n\n ie. the keys of the resulting dict are objects of type\n ReqPackage and the values are lists of DistPackage objects.\n\n :param dict tree: the pkg dependency tree obtained by calling\n `construct_tree` function\n :returns: reversed tree\n :rtype: dict\n\n \"\"\"\n rtree = defaultdict(list)\n child_keys = set(c.key for c in flatten(tree.values()))\n for k, vs in tree.items():\n for v in vs:\n node = find_tree_root(rtree, v.key) or v\n rtree[node].append(k.as_required_by(v))\n if k.key not in child_keys:\n rtree[k.as_requirement()] = []\n return rtree\n\n\ndef guess_version(pkg_key, default='?'):\n \"\"\"Guess the version of a pkg when pip doesn't provide it\n\n :param str pkg_key: key of the package\n :param str default: default version to return if unable to find\n :returns: version\n :rtype: string\n\n \"\"\"\n try:\n m = import_module(pkg_key)\n except ImportError:\n return default\n else:\n return getattr(m, '__version__', default)\n\n\nclass Package(object):\n \"\"\"Abstract class for wrappers around objects that pip returns.\n\n This class needs to be subclassed with implementations for\n `render_as_root` and `render_as_branch` methods.\n\n \"\"\"\n\n def __init__(self, obj):\n self._obj = obj\n self.project_name = obj.project_name\n self.key = obj.key\n\n def render_as_root(self, frozen):\n return NotImplementedError\n\n def render_as_branch(self, frozen):\n return NotImplementedError\n\n def render(self, parent=None, frozen=False):\n if not parent:\n return self.render_as_root(frozen)\n else:\n return self.render_as_branch(frozen)\n\n @staticmethod\n def frozen_repr(obj):\n fr = FrozenRequirement.from_dist(obj, [])\n return str(fr).strip()\n\n def __getattr__(self, key):\n return getattr(self._obj, key)\n\n def __repr__(self):\n return '<{0}(\"{1}\")>'.format(self.__class__.__name__, self.key)\n\n\nclass DistPackage(Package):\n \"\"\"Wrapper class for pkg_resources.Distribution instances\n\n :param obj: pkg_resources.Distribution to wrap over\n :param req: optional ReqPackage object to associate this\n DistPackage with. This is useful for displaying the\n tree in reverse\n \"\"\"\n\n def __init__(self, obj, req=None):\n super(DistPackage, self).__init__(obj)\n self.version_spec = None\n self.req = req\n\n def render_as_root(self, frozen):\n if not frozen:\n return '{0}=={1}'.format(self.project_name, self.version)\n else:\n return self.__class__.frozen_repr(self._obj)\n\n def render_as_branch(self, frozen):\n assert self.req is not None\n if not frozen:\n parent_ver_spec = self.req.version_spec\n parent_str = self.req.project_name\n if parent_ver_spec:\n parent_str += parent_ver_spec\n return (\n '{0}=={1} [requires: {2}]'\n ).format(self.project_name, self.version, parent_str)\n else:\n return self.render_as_root(frozen)\n\n def as_requirement(self):\n \"\"\"Return a ReqPackage representation of this DistPackage\"\"\"\n return ReqPackage(self._obj.as_requirement(), dist=self)\n\n def as_required_by(self, req):\n \"\"\"Return a DistPackage instance associated to a requirement\n\n This association is necessary for displaying the tree in\n reverse.\n\n :param ReqPackage req: the requirement to associate with\n :returns: DistPackage instance\n\n \"\"\"\n return self.__class__(self._obj, req)\n\n def as_dict(self):\n return {'key': self.key,\n 'package_name': self.project_name,\n 'installed_version': self.version}\n\n\nclass ReqPackage(Package):\n \"\"\"Wrapper class for Requirements instance\n\n :param obj: The `Requirements` instance to wrap over\n :param dist: optional `pkg_resources.Distribution` instance for\n this requirement\n \"\"\"\n\n UNKNOWN_VERSION = '?'\n\n def __init__(self, obj, dist=None):\n super(ReqPackage, self).__init__(obj)\n self.dist = dist\n\n @property\n def version_spec(self):\n specs = sorted(self._obj.specs, reverse=True) # `reverse` makes '>' prior to '<'\n return ','.join([''.join(sp) for sp in specs]) if specs else None\n\n @property\n def installed_version(self):\n if not self.dist:\n return guess_version(self.key, self.UNKNOWN_VERSION)\n return self.dist.version\n\n def is_conflicting(self):\n \"\"\"If installed version conflicts with required version\"\"\"\n # unknown installed version is also considered conflicting\n if self.installed_version == self.UNKNOWN_VERSION:\n return True\n ver_spec = (self.version_spec if self.version_spec else '')\n req_version_str = '{0}{1}'.format(self.project_name, ver_spec)\n req_obj = pkg_resources.Requirement.parse(req_version_str)\n return self.installed_version not in req_obj\n\n def render_as_root(self, frozen):\n if not frozen:\n return '{0}=={1}'.format(self.project_name, self.installed_version)\n elif self.dist:\n return self.__class__.frozen_repr(self.dist._obj)\n else:\n return self.project_name\n\n def render_as_branch(self, frozen):\n if not frozen:\n req_ver = self.version_spec if self.version_spec else 'Any'\n return (\n '{0} [required: {1}, installed: {2}]'\n ).format(self.project_name, req_ver, self.installed_version)\n else:\n return self.render_as_root(frozen)\n\n def as_dict(self):\n return {'key': self.key,\n 'package_name': self.project_name,\n 'installed_version': self.installed_version,\n 'required_version': self.version_spec}\n\n\ndef render_tree(tree, list_all=True, show_only=None, frozen=False, exclude=None):\n \"\"\"Convert tree to string representation\n\n :param dict tree: the package tree\n :param bool list_all: whether to list all the pgks at the root\n level or only those that are the\n sub-dependencies\n :param set show_only: set of select packages to be shown in the\n output. This is optional arg, default: None.\n :param bool frozen: whether or not show the names of the pkgs in\n the output that's favourable to pip --freeze\n :param set exclude: set of select packages to be excluded from the\n output. This is optional arg, default: None.\n :returns: string representation of the tree\n :rtype: str\n\n \"\"\"\n tree = sorted_tree(tree)\n branch_keys = set(r.key for r in flatten(tree.values()))\n nodes = tree.keys()\n use_bullets = not frozen\n\n key_tree = dict((k.key, v) for k, v in tree.items())\n get_children = lambda n: key_tree.get(n.key, [])\n\n if show_only:\n nodes = [p for p in nodes\n if p.key in show_only or p.project_name in show_only]\n elif not list_all:\n nodes = [p for p in nodes if p.key not in branch_keys]\n\n def aux(node, parent=None, indent=0, chain=None):\n if exclude and (node.key in exclude or node.project_name in exclude):\n return []\n if chain is None:\n chain = [node.project_name]\n node_str = node.render(parent, frozen)\n if parent:\n prefix = ' '*indent + ('- ' if use_bullets else '')\n node_str = prefix + node_str\n result = [node_str]\n children = [aux(c, node, indent=indent+2,\n chain=chain+[c.project_name])\n for c in get_children(node)\n if c.project_name not in chain]\n result += list(flatten(children))\n return result\n\n lines = flatten([aux(p) for p in nodes])\n return '\\n'.join(lines)\n\n\ndef render_json(tree, indent):\n \"\"\"Converts the tree into a flat json representation.\n\n The json repr will be a list of hashes, each hash having 2 fields:\n - package\n - dependencies: list of dependencies\n\n :param dict tree: dependency tree\n :param int indent: no. of spaces to indent json\n :returns: json representation of the tree\n :rtype: str\n\n \"\"\"\n return json.dumps([{'package': k.as_dict(),\n 'dependencies': [v.as_dict() for v in vs]}\n for k, vs in tree.items()],\n indent=indent)\n\n\ndef render_json_tree(tree, indent):\n \"\"\"Converts the tree into a nested json representation.\n\n The json repr will be a list of hashes, each hash having the following fields:\n - package_name\n - key\n - required_version\n - installed_version\n - dependencies: list of dependencies\n\n :param dict tree: dependency tree\n :param int indent: no. of spaces to indent json\n :returns: json representation of the tree\n :rtype: str\n\n \"\"\"\n tree = sorted_tree(tree)\n branch_keys = set(r.key for r in flatten(tree.values()))\n nodes = [p for p in tree.keys() if p.key not in branch_keys]\n key_tree = dict((k.key, v) for k, v in tree.items())\n get_children = lambda n: key_tree.get(n.key, [])\n\n def aux(node, parent=None, chain=None):\n if chain is None:\n chain = [node.project_name]\n\n d = node.as_dict()\n if parent:\n d['required_version'] = node.version_spec if node.version_spec else 'Any'\n else:\n d['required_version'] = d['installed_version']\n\n d['dependencies'] = [\n aux(c, parent=node, chain=chain+[c.project_name])\n for c in get_children(node)\n if c.project_name not in chain\n ]\n\n return d\n\n return json.dumps([aux(p) for p in nodes], indent=indent)\n\n\ndef dump_graphviz(tree, output_format='dot'):\n \"\"\"Output dependency graph as one of the supported GraphViz output formats.\n\n :param dict tree: dependency graph\n :param string output_format: output format\n :returns: representation of tree in the specified output format\n :rtype: str or binary representation depending on the output format\n\n \"\"\"\n try:\n from graphviz import backend, Digraph\n except ImportError:\n print('graphviz is not available, but necessary for the output '\n 'option. Please install it.', file=sys.stderr)\n sys.exit(1)\n\n if output_format not in backend.FORMATS:\n print('{0} is not a supported output format.'.format(output_format),\n file=sys.stderr)\n print('Supported formats are: {0}'.format(\n ', '.join(sorted(backend.FORMATS))), file=sys.stderr)\n sys.exit(1)\n\n graph = Digraph(format=output_format)\n for package, deps in tree.items():\n project_name = package.project_name\n label = '{0}\\n{1}'.format(project_name, package.version)\n graph.node(project_name, label=label)\n for dep in deps:\n label = dep.version_spec\n if not label:\n label = 'any'\n graph.edge(project_name, dep.project_name, label=label)\n\n # Allow output of dot format, even if GraphViz isn't installed.\n if output_format == 'dot':\n return graph.source\n\n # As it's unknown if the selected output format is binary or not, try to\n # decode it as UTF8 and only print it out in binary if that's not possible.\n try:\n return graph.pipe().decode('utf-8')\n except UnicodeDecodeError:\n return graph.pipe()\n\n\ndef print_graphviz(dump_output):\n \"\"\"Dump the data generated by GraphViz to stdout.\n\n :param dump_output: The output from dump_graphviz\n \"\"\"\n if hasattr(dump_output, 'encode'):\n print(dump_output)\n else:\n with os.fdopen(sys.stdout.fileno(), 'wb') as bytestream:\n bytestream.write(dump_output)\n\n\ndef conflicting_deps(tree):\n \"\"\"Returns dependencies which are not present or conflict with the\n requirements of other packages.\n\n e.g. will warn if pkg1 requires pkg2==2.0 and pkg2==1.0 is installed\n\n :param tree: the requirements tree (dict)\n :returns: dict of DistPackage -> list of unsatisfied/unknown ReqPackage\n :rtype: dict\n\n \"\"\"\n conflicting = defaultdict(list)\n for p, rs in tree.items():\n for req in rs:\n if req.is_conflicting():\n conflicting[p].append(req)\n return conflicting\n\n\ndef cyclic_deps(tree):\n \"\"\"Return cyclic dependencies as list of tuples\n\n :param list pkgs: pkg_resources.Distribution instances\n :param dict pkg_index: mapping of pkgs with their respective keys\n :returns: list of tuples representing cyclic dependencies\n :rtype: generator\n\n \"\"\"\n key_tree = dict((k.key, v) for k, v in tree.items())\n get_children = lambda n: key_tree.get(n.key, [])\n cyclic = []\n for p, rs in tree.items():\n for req in rs:\n if p.key in map(attrgetter('key'), get_children(req)):\n cyclic.append((p, req, p))\n return cyclic\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(description=(\n 'Dependency tree of the installed python packages'\n ))\n parser.add_argument('-v', '--version', action='version',\n version='{0}'.format(__version__))\n parser.add_argument('-f', '--freeze', action='store_true',\n help='Print names so as to write freeze files')\n parser.add_argument('-a', '--all', action='store_true',\n help='list all deps at top level')\n parser.add_argument('-l', '--local-only',\n action='store_true', help=(\n 'If in a virtualenv that has global access '\n 'do not show globally installed packages'\n ))\n parser.add_argument('-u', '--user-only', action='store_true',\n help=(\n 'Only show installations in the user site dir'\n ))\n parser.add_argument('-w', '--warn', action='store', dest='warn',\n nargs='?', default='suppress',\n choices=('silence', 'suppress', 'fail'),\n help=(\n 'Warning control. \"suppress\" will show warnings '\n 'but return 0 whether or not they are present. '\n '\"silence\" will not show warnings at all and '\n 'always return 0. \"fail\" will show warnings and '\n 'return 1 if any are present. The default is '\n '\"suppress\".'\n ))\n parser.add_argument('-r', '--reverse', action='store_true',\n default=False, help=(\n 'Shows the dependency tree in the reverse fashion '\n 'ie. the sub-dependencies are listed with the '\n 'list of packages that need them under them.'\n ))\n parser.add_argument('-p', '--packages',\n help=(\n 'Comma separated list of select packages to show '\n 'in the output. If set, --all will be ignored.'\n ))\n parser.add_argument('-e', '--exclude',\n help=(\n 'Comma separated list of select packages to exclude '\n 'from the output. If set, --all will be ignored.'\n ), metavar='PACKAGES')\n parser.add_argument('-j', '--json', action='store_true', default=False,\n help=(\n 'Display dependency tree as json. This will yield '\n '\"raw\" output that may be used by external tools. '\n 'This option overrides all other options.'\n ))\n parser.add_argument('--json-tree', action='store_true', default=False,\n help=(\n 'Display dependency tree as json which is nested '\n 'the same way as the plain text output printed by default. '\n 'This option overrides all other options (except --json).'\n ))\n parser.add_argument('--graph-output', dest='output_format',\n help=(\n 'Print a dependency graph in the specified output '\n 'format. Available are all formats supported by '\n 'GraphViz, e.g.: dot, jpeg, pdf, png, svg'\n ))\n return parser\n\n\ndef _get_args():\n parser = get_parser()\n return parser.parse_args()\n\n\ndef main():\n args = _get_args()\n pkgs = get_installed_distributions(local_only=args.local_only,\n user_only=args.user_only)\n\n dist_index = build_dist_index(pkgs)\n tree = construct_tree(dist_index)\n\n if args.json:\n print(render_json(tree, indent=4))\n return 0\n elif args.json_tree:\n print(render_json_tree(tree, indent=4))\n return 0\n elif args.output_format:\n output = dump_graphviz(tree, output_format=args.output_format)\n print_graphviz(output)\n return 0\n\n return_code = 0\n\n # show warnings about possibly conflicting deps if found and\n # warnings are enabled\n if args.warn != 'silence':\n conflicting = conflicting_deps(tree)\n if conflicting:\n print('Warning!!! Possibly conflicting dependencies found:',\n file=sys.stderr)\n for p, reqs in conflicting.items():\n pkg = p.render_as_root(False)\n print('* {}'.format(pkg), file=sys.stderr)\n for req in reqs:\n req_str = req.render_as_branch(False)\n print(' - {}'.format(req_str), file=sys.stderr)\n print('-'*72, file=sys.stderr)\n\n cyclic = cyclic_deps(tree)\n if cyclic:\n print('Warning!! Cyclic dependencies found:', file=sys.stderr)\n for a, b, c in cyclic:\n print('* {0} => {1} => {2}'.format(a.project_name,\n b.project_name,\n c.project_name),\n file=sys.stderr)\n print('-'*72, file=sys.stderr)\n\n if args.warn == 'fail' and (conflicting or cyclic):\n return_code = 1\n\n show_only = set(args.packages.split(',')) if args.packages else None\n exclude = set(args.exclude.split(',')) if args.exclude else None\n\n if show_only and exclude and (show_only & exclude):\n print('Conflicting packages found in --packages and --exclude lists.', file=sys.stderr)\n sys.exit(1)\n\n tree = render_tree(tree if not args.reverse else reverse_tree(tree),\n list_all=args.all, show_only=show_only,\n frozen=args.freeze, exclude=exclude)\n print(tree)\n return return_code\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"path": "pipenv/vendor/pipdeptree.py"
}
] | [
{
"content": "from __future__ import print_function\nimport os\nimport sys\nfrom itertools import chain\nfrom collections import defaultdict\nimport argparse\nfrom operator import attrgetter\nimport json\nfrom importlib import import_module\n\ntry:\n from collections import OrderedDict\nexcept ImportError:\n from ordereddict import OrderedDict\n\npardir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nsys.path.append(pardir)\nfrom pipenv.vendor.pip_shims import get_installed_distributions, FrozenRequirement\n\nimport pkg_resources\n# inline:\n# from graphviz import backend, Digraph\n\n\n__version__ = '0.13.0'\n\n\nflatten = chain.from_iterable\n\n\ndef build_dist_index(pkgs):\n \"\"\"Build an index pkgs by their key as a dict.\n\n :param list pkgs: list of pkg_resources.Distribution instances\n :returns: index of the pkgs by the pkg key\n :rtype: dict\n\n \"\"\"\n return dict((p.key, DistPackage(p)) for p in pkgs)\n\n\ndef construct_tree(index):\n \"\"\"Construct tree representation of the pkgs from the index.\n\n The keys of the dict representing the tree will be objects of type\n DistPackage and the values will be list of ReqPackage objects.\n\n :param dict index: dist index ie. index of pkgs by their keys\n :returns: tree of pkgs and their dependencies\n :rtype: dict\n\n \"\"\"\n return dict((p, [ReqPackage(r, index.get(r.key))\n for r in p.requires()])\n for p in index.values())\n\n\ndef sorted_tree(tree):\n \"\"\"Sorts the dict representation of the tree\n\n The root packages as well as the intermediate packages are sorted\n in the alphabetical order of the package names.\n\n :param dict tree: the pkg dependency tree obtained by calling\n `construct_tree` function\n :returns: sorted tree\n :rtype: collections.OrderedDict\n\n \"\"\"\n return OrderedDict(sorted([(k, sorted(v, key=attrgetter('key')))\n for k, v in tree.items()],\n key=lambda kv: kv[0].key))\n\n\ndef find_tree_root(tree, key):\n \"\"\"Find a root in a tree by it's key\n\n :param dict tree: the pkg dependency tree obtained by calling\n `construct_tree` function\n :param str key: key of the root node to find\n :returns: a root node if found else None\n :rtype: mixed\n\n \"\"\"\n result = [p for p in tree.keys() if p.key == key]\n assert len(result) in [0, 1]\n return None if len(result) == 0 else result[0]\n\n\ndef reverse_tree(tree):\n \"\"\"Reverse the dependency tree.\n\n ie. the keys of the resulting dict are objects of type\n ReqPackage and the values are lists of DistPackage objects.\n\n :param dict tree: the pkg dependency tree obtained by calling\n `construct_tree` function\n :returns: reversed tree\n :rtype: dict\n\n \"\"\"\n rtree = defaultdict(list)\n child_keys = set(c.key for c in flatten(tree.values()))\n for k, vs in tree.items():\n for v in vs:\n node = find_tree_root(rtree, v.key) or v\n rtree[node].append(k.as_required_by(v))\n if k.key not in child_keys:\n rtree[k.as_requirement()] = []\n return rtree\n\n\ndef guess_version(pkg_key, default='?'):\n \"\"\"Guess the version of a pkg when pip doesn't provide it\n\n :param str pkg_key: key of the package\n :param str default: default version to return if unable to find\n :returns: version\n :rtype: string\n\n \"\"\"\n try:\n m = import_module(pkg_key)\n except ImportError:\n return default\n else:\n return getattr(m, '__version__', default)\n\n\nclass Package(object):\n \"\"\"Abstract class for wrappers around objects that pip returns.\n\n This class needs to be subclassed with implementations for\n `render_as_root` and `render_as_branch` methods.\n\n \"\"\"\n\n def __init__(self, obj):\n self._obj = obj\n self.project_name = obj.project_name\n self.key = obj.key\n\n def render_as_root(self, frozen):\n return NotImplementedError\n\n def render_as_branch(self, frozen):\n return NotImplementedError\n\n def render(self, parent=None, frozen=False):\n if not parent:\n return self.render_as_root(frozen)\n else:\n return self.render_as_branch(frozen)\n\n @staticmethod\n def frozen_repr(obj):\n fr = FrozenRequirement.from_dist(obj, [])\n return str(fr).strip()\n\n def __getattr__(self, key):\n return getattr(self._obj, key)\n\n def __repr__(self):\n return '<{0}(\"{1}\")>'.format(self.__class__.__name__, self.key)\n\n\nclass DistPackage(Package):\n \"\"\"Wrapper class for pkg_resources.Distribution instances\n\n :param obj: pkg_resources.Distribution to wrap over\n :param req: optional ReqPackage object to associate this\n DistPackage with. This is useful for displaying the\n tree in reverse\n \"\"\"\n\n def __init__(self, obj, req=None):\n super(DistPackage, self).__init__(obj)\n self.version_spec = None\n self.req = req\n\n def render_as_root(self, frozen):\n if not frozen:\n return '{0}=={1}'.format(self.project_name, self.version)\n else:\n return self.__class__.frozen_repr(self._obj)\n\n def render_as_branch(self, frozen):\n assert self.req is not None\n if not frozen:\n parent_ver_spec = self.req.version_spec\n parent_str = self.req.project_name\n if parent_ver_spec:\n parent_str += parent_ver_spec\n return (\n '{0}=={1} [requires: {2}]'\n ).format(self.project_name, self.version, parent_str)\n else:\n return self.render_as_root(frozen)\n\n def as_requirement(self):\n \"\"\"Return a ReqPackage representation of this DistPackage\"\"\"\n return ReqPackage(self._obj.as_requirement(), dist=self)\n\n def as_required_by(self, req):\n \"\"\"Return a DistPackage instance associated to a requirement\n\n This association is necessary for displaying the tree in\n reverse.\n\n :param ReqPackage req: the requirement to associate with\n :returns: DistPackage instance\n\n \"\"\"\n return self.__class__(self._obj, req)\n\n def as_dict(self):\n return {'key': self.key,\n 'package_name': self.project_name,\n 'installed_version': self.version}\n\n\nclass ReqPackage(Package):\n \"\"\"Wrapper class for Requirements instance\n\n :param obj: The `Requirements` instance to wrap over\n :param dist: optional `pkg_resources.Distribution` instance for\n this requirement\n \"\"\"\n\n UNKNOWN_VERSION = '?'\n\n def __init__(self, obj, dist=None):\n super(ReqPackage, self).__init__(obj)\n self.dist = dist\n\n @property\n def version_spec(self):\n specs = sorted(self._obj.specs, reverse=True) # `reverse` makes '>' prior to '<'\n return ','.join([''.join(sp) for sp in specs]) if specs else None\n\n @property\n def installed_version(self):\n if not self.dist:\n return guess_version(self.key, self.UNKNOWN_VERSION)\n return self.dist.version\n\n def is_conflicting(self):\n \"\"\"If installed version conflicts with required version\"\"\"\n # unknown installed version is also considered conflicting\n if self.installed_version == self.UNKNOWN_VERSION:\n return True\n ver_spec = (self.version_spec if self.version_spec else '')\n req_version_str = '{0}{1}'.format(self.project_name, ver_spec)\n req_obj = pkg_resources.Requirement.parse(req_version_str)\n return self.installed_version not in req_obj\n\n def render_as_root(self, frozen):\n if not frozen:\n return '{0}=={1}'.format(self.project_name, self.installed_version)\n elif self.dist:\n return self.__class__.frozen_repr(self.dist._obj)\n else:\n return self.project_name\n\n def render_as_branch(self, frozen):\n if not frozen:\n req_ver = self.version_spec if self.version_spec else 'Any'\n return (\n '{0} [required: {1}, installed: {2}]'\n ).format(self.project_name, req_ver, self.installed_version)\n else:\n return self.render_as_root(frozen)\n\n def as_dict(self):\n return {'key': self.key,\n 'package_name': self.project_name,\n 'installed_version': self.installed_version,\n 'required_version': self.version_spec}\n\n\ndef render_tree(tree, list_all=True, show_only=None, frozen=False, exclude=None):\n \"\"\"Convert tree to string representation\n\n :param dict tree: the package tree\n :param bool list_all: whether to list all the pgks at the root\n level or only those that are the\n sub-dependencies\n :param set show_only: set of select packages to be shown in the\n output. This is optional arg, default: None.\n :param bool frozen: whether or not show the names of the pkgs in\n the output that's favourable to pip --freeze\n :param set exclude: set of select packages to be excluded from the\n output. This is optional arg, default: None.\n :returns: string representation of the tree\n :rtype: str\n\n \"\"\"\n tree = sorted_tree(tree)\n branch_keys = set(r.key for r in flatten(tree.values()))\n nodes = tree.keys()\n use_bullets = not frozen\n\n key_tree = dict((k.key, v) for k, v in tree.items())\n get_children = lambda n: key_tree.get(n.key, [])\n\n if show_only:\n nodes = [p for p in nodes\n if p.key in show_only or p.project_name in show_only]\n elif not list_all:\n nodes = [p for p in nodes if p.key not in branch_keys]\n\n def aux(node, parent=None, indent=0, chain=None):\n if exclude and (node.key in exclude or node.project_name in exclude):\n return []\n if chain is None:\n chain = [node.project_name]\n node_str = node.render(parent, frozen)\n if parent:\n prefix = ' '*indent + ('- ' if use_bullets else '')\n node_str = prefix + node_str\n result = [node_str]\n children = [aux(c, node, indent=indent+2,\n chain=chain+[c.project_name])\n for c in get_children(node)\n if c.project_name not in chain]\n result += list(flatten(children))\n return result\n\n lines = flatten([aux(p) for p in nodes])\n return '\\n'.join(lines)\n\n\ndef render_json(tree, indent):\n \"\"\"Converts the tree into a flat json representation.\n\n The json repr will be a list of hashes, each hash having 2 fields:\n - package\n - dependencies: list of dependencies\n\n :param dict tree: dependency tree\n :param int indent: no. of spaces to indent json\n :returns: json representation of the tree\n :rtype: str\n\n \"\"\"\n return json.dumps([{'package': k.as_dict(),\n 'dependencies': [v.as_dict() for v in vs]}\n for k, vs in tree.items()],\n indent=indent)\n\n\ndef render_json_tree(tree, indent):\n \"\"\"Converts the tree into a nested json representation.\n\n The json repr will be a list of hashes, each hash having the following fields:\n - package_name\n - key\n - required_version\n - installed_version\n - dependencies: list of dependencies\n\n :param dict tree: dependency tree\n :param int indent: no. of spaces to indent json\n :returns: json representation of the tree\n :rtype: str\n\n \"\"\"\n tree = sorted_tree(tree)\n branch_keys = set(r.key for r in flatten(tree.values()))\n nodes = [p for p in tree.keys() if p.key not in branch_keys]\n key_tree = dict((k.key, v) for k, v in tree.items())\n get_children = lambda n: key_tree.get(n.key, [])\n\n def aux(node, parent=None, chain=None):\n if chain is None:\n chain = [node.project_name]\n\n d = node.as_dict()\n if parent:\n d['required_version'] = node.version_spec if node.version_spec else 'Any'\n else:\n d['required_version'] = d['installed_version']\n\n d['dependencies'] = [\n aux(c, parent=node, chain=chain+[c.project_name])\n for c in get_children(node)\n if c.project_name not in chain\n ]\n\n return d\n\n return json.dumps([aux(p) for p in nodes], indent=indent)\n\n\ndef dump_graphviz(tree, output_format='dot'):\n \"\"\"Output dependency graph as one of the supported GraphViz output formats.\n\n :param dict tree: dependency graph\n :param string output_format: output format\n :returns: representation of tree in the specified output format\n :rtype: str or binary representation depending on the output format\n\n \"\"\"\n try:\n from graphviz import backend, Digraph\n except ImportError:\n print('graphviz is not available, but necessary for the output '\n 'option. Please install it.', file=sys.stderr)\n sys.exit(1)\n\n if output_format not in backend.FORMATS:\n print('{0} is not a supported output format.'.format(output_format),\n file=sys.stderr)\n print('Supported formats are: {0}'.format(\n ', '.join(sorted(backend.FORMATS))), file=sys.stderr)\n sys.exit(1)\n\n graph = Digraph(format=output_format)\n for package, deps in tree.items():\n project_name = package.project_name\n label = '{0}\\n{1}'.format(project_name, package.version)\n graph.node(project_name, label=label)\n for dep in deps:\n label = dep.version_spec\n if not label:\n label = 'any'\n graph.edge(project_name, dep.project_name, label=label)\n\n # Allow output of dot format, even if GraphViz isn't installed.\n if output_format == 'dot':\n return graph.source\n\n # As it's unknown if the selected output format is binary or not, try to\n # decode it as UTF8 and only print it out in binary if that's not possible.\n try:\n return graph.pipe().decode('utf-8')\n except UnicodeDecodeError:\n return graph.pipe()\n\n\ndef print_graphviz(dump_output):\n \"\"\"Dump the data generated by GraphViz to stdout.\n\n :param dump_output: The output from dump_graphviz\n \"\"\"\n if hasattr(dump_output, 'encode'):\n print(dump_output)\n else:\n with os.fdopen(sys.stdout.fileno(), 'wb') as bytestream:\n bytestream.write(dump_output)\n\n\ndef conflicting_deps(tree):\n \"\"\"Returns dependencies which are not present or conflict with the\n requirements of other packages.\n\n e.g. will warn if pkg1 requires pkg2==2.0 and pkg2==1.0 is installed\n\n :param tree: the requirements tree (dict)\n :returns: dict of DistPackage -> list of unsatisfied/unknown ReqPackage\n :rtype: dict\n\n \"\"\"\n conflicting = defaultdict(list)\n for p, rs in tree.items():\n for req in rs:\n if req.is_conflicting():\n conflicting[p].append(req)\n return conflicting\n\n\ndef cyclic_deps(tree):\n \"\"\"Return cyclic dependencies as list of tuples\n\n :param list pkgs: pkg_resources.Distribution instances\n :param dict pkg_index: mapping of pkgs with their respective keys\n :returns: list of tuples representing cyclic dependencies\n :rtype: generator\n\n \"\"\"\n key_tree = dict((k.key, v) for k, v in tree.items())\n get_children = lambda n: key_tree.get(n.key, [])\n cyclic = []\n for p, rs in tree.items():\n for req in rs:\n if p.key in map(attrgetter('key'), get_children(req)):\n cyclic.append((p, req, p))\n return cyclic\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(description=(\n 'Dependency tree of the installed python packages'\n ))\n parser.add_argument('-v', '--version', action='version',\n version='{0}'.format(__version__))\n parser.add_argument('-f', '--freeze', action='store_true',\n help='Print names so as to write freeze files')\n parser.add_argument('-a', '--all', action='store_true',\n help='list all deps at top level')\n parser.add_argument('-l', '--local-only',\n action='store_true', help=(\n 'If in a virtualenv that has global access '\n 'do not show globally installed packages'\n ))\n parser.add_argument('-u', '--user-only', action='store_true',\n help=(\n 'Only show installations in the user site dir'\n ))\n parser.add_argument('-w', '--warn', action='store', dest='warn',\n nargs='?', default='suppress',\n choices=('silence', 'suppress', 'fail'),\n help=(\n 'Warning control. \"suppress\" will show warnings '\n 'but return 0 whether or not they are present. '\n '\"silence\" will not show warnings at all and '\n 'always return 0. \"fail\" will show warnings and '\n 'return 1 if any are present. The default is '\n '\"suppress\".'\n ))\n parser.add_argument('-r', '--reverse', action='store_true',\n default=False, help=(\n 'Shows the dependency tree in the reverse fashion '\n 'ie. the sub-dependencies are listed with the '\n 'list of packages that need them under them.'\n ))\n parser.add_argument('-p', '--packages',\n help=(\n 'Comma separated list of select packages to show '\n 'in the output. If set, --all will be ignored.'\n ))\n parser.add_argument('-e', '--exclude',\n help=(\n 'Comma separated list of select packages to exclude '\n 'from the output. If set, --all will be ignored.'\n ), metavar='PACKAGES')\n parser.add_argument('-j', '--json', action='store_true', default=False,\n help=(\n 'Display dependency tree as json. This will yield '\n '\"raw\" output that may be used by external tools. '\n 'This option overrides all other options.'\n ))\n parser.add_argument('--json-tree', action='store_true', default=False,\n help=(\n 'Display dependency tree as json which is nested '\n 'the same way as the plain text output printed by default. '\n 'This option overrides all other options (except --json).'\n ))\n parser.add_argument('--graph-output', dest='output_format',\n help=(\n 'Print a dependency graph in the specified output '\n 'format. Available are all formats supported by '\n 'GraphViz, e.g.: dot, jpeg, pdf, png, svg'\n ))\n return parser\n\n\ndef _get_args():\n parser = get_parser()\n return parser.parse_args()\n\n\ndef main():\n args = _get_args()\n pkgs = get_installed_distributions(local_only=args.local_only,\n user_only=args.user_only)\n\n dist_index = build_dist_index(pkgs)\n tree = construct_tree(dist_index)\n\n if args.json:\n print(render_json(tree, indent=4))\n return 0\n elif args.json_tree:\n print(render_json_tree(tree, indent=4))\n return 0\n elif args.output_format:\n output = dump_graphviz(tree, output_format=args.output_format)\n print_graphviz(output)\n return 0\n\n return_code = 0\n\n # show warnings about possibly conflicting deps if found and\n # warnings are enabled\n if args.warn != 'silence':\n conflicting = conflicting_deps(tree)\n if conflicting:\n print('Warning!!! Possibly conflicting dependencies found:',\n file=sys.stderr)\n for p, reqs in conflicting.items():\n pkg = p.render_as_root(False)\n print('* {}'.format(pkg), file=sys.stderr)\n for req in reqs:\n req_str = req.render_as_branch(False)\n print(' - {}'.format(req_str), file=sys.stderr)\n print('-'*72, file=sys.stderr)\n\n cyclic = cyclic_deps(tree)\n if cyclic:\n print('Warning!! Cyclic dependencies found:', file=sys.stderr)\n for a, b, c in cyclic:\n print('* {0} => {1} => {2}'.format(a.project_name,\n b.project_name,\n c.project_name),\n file=sys.stderr)\n print('-'*72, file=sys.stderr)\n\n if args.warn == 'fail' and (conflicting or cyclic):\n return_code = 1\n\n show_only = set(args.packages.split(',')) if args.packages else None\n exclude = set(args.exclude.split(',')) if args.exclude else None\n\n if show_only and exclude and (show_only & exclude):\n print('Conflicting packages found in --packages and --exclude lists.', file=sys.stderr)\n sys.exit(1)\n\n tree = render_tree(tree if not args.reverse else reverse_tree(tree),\n list_all=args.all, show_only=show_only,\n frozen=args.freeze, exclude=exclude)\n print(tree)\n return return_code\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"path": "pipenv/vendor/pipdeptree.py"
}
] | diff --git a/news/2952.bugfix b/news/2952.bugfix
new file mode 100644
index 0000000000..df640991bc
--- /dev/null
+++ b/news/2952.bugfix
@@ -0,0 +1 @@
+Fixed a bug with importing local vendored dependencies when running ``pipenv graph``.
diff --git a/pipenv/vendor/pipdeptree.py b/pipenv/vendor/pipdeptree.py
index 9cce0325e7..2082fc8a36 100644
--- a/pipenv/vendor/pipdeptree.py
+++ b/pipenv/vendor/pipdeptree.py
@@ -13,6 +13,8 @@
except ImportError:
from ordereddict import OrderedDict
+pardir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+sys.path.append(pardir)
from pipenv.vendor.pip_shims import get_installed_distributions, FrozenRequirement
import pkg_resources
diff --git a/tasks/vendoring/patches/vendor/pipdeptree-updated-pip18.patch b/tasks/vendoring/patches/vendor/pipdeptree-updated-pip18.patch
index e3ff9bbf29..d479ebfa39 100644
--- a/tasks/vendoring/patches/vendor/pipdeptree-updated-pip18.patch
+++ b/tasks/vendoring/patches/vendor/pipdeptree-updated-pip18.patch
@@ -1,8 +1,8 @@
diff --git a/pipenv/vendor/pipdeptree.py b/pipenv/vendor/pipdeptree.py
-index 7820aa5..9cce032 100644
+index 7820aa5..2082fc8 100644
--- a/pipenv/vendor/pipdeptree.py
+++ b/pipenv/vendor/pipdeptree.py
-@@ -13,11 +13,7 @@ try:
+@@ -13,11 +13,9 @@ try:
except ImportError:
from ordereddict import OrderedDict
@@ -11,6 +11,8 @@ index 7820aa5..9cce032 100644
- from pipenv.patched.notpip._internal.operations.freeze import FrozenRequirement
-except ImportError:
- from pipenv.patched.notpip import get_installed_distributions, FrozenRequirement
++pardir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
++sys.path.append(pardir)
+from pipenv.vendor.pip_shims import get_installed_distributions, FrozenRequirement
import pkg_resources
|
DataBiosphere__toil-1406 | Scaler thread shutdown error
File "/home/rnaenv/bin/toil-rnaseq", line 11, in <module>
sys.exit(main())
File "/home/rnaenv/local/lib/python2.7/site-packages/toil_rnaseq/rnaseq_cgl_pipeline.py", line 573, in main
Job.Runner.startToil(Job.wrapJobFn(map_job, download_sample, samples, config), args)
File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 738, in startToil
return toil.start(job)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 655, in start
return self._runMainLoop(rootJobGraph)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 963, in _runMainLoop
jobCache=self._jobCache).run()
File "/usr/local/lib/python2.7/dist-packages/toil/leader.py", line 170, in run
self.clusterScaler.shutdown()
File "/usr/local/lib/python2.7/dist-packages/toil/provisioners/clusterScaler.py", line 262, in shutdown
self.scaler.join()
AttributeError: 'NoneType' object has no attribute 'join'
| [
{
"content": "# Copyright (C) 2015-2016 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport logging\nfrom collections import deque\nfrom threading import Lock\n\nfrom bd2k.util.exceptions import require\nfrom bd2k.util.threading import ExceptionalThread\nfrom bd2k.util.throttle import throttle\n\nfrom toil.batchSystems.abstractBatchSystem import AbstractScalableBatchSystem\nfrom toil.common import Config\nfrom toil.provisioners.abstractProvisioner import AbstractProvisioner, Shape\n\nlogger = logging.getLogger(__name__)\n\n# A *deficit* exists when we have more jobs that can run on preemptable nodes than we have\n# preemptable nodes. In order to not block these jobs, we want to increase the number of non-\n# preemptable nodes that we have and need for just non-preemptable jobs. However, we may still\n# prefer waiting for preemptable instances to come available.\n#\n# To accommodate this, we set the delta to the difference between the number of provisioned\n# preemptable nodes and the number of nodes that were requested. when the non-preemptable thread\n# wants to provision nodes, it will multiply this delta times a preference for preemptable vs.\n# non-preemptable nodes.\n\n_preemptableNodeDeficit = 0\n\nclass RecentJobShapes(object):\n \"\"\"\n Used to track the 'shapes' of the last N jobs run (see Shape).\n \"\"\"\n\n def __init__(self, config, nodeShape, N=1000):\n # As a prior we start of with 10 jobs each with the default memory, cores, and disk. To\n # estimate the running time we use the the default wall time of each node allocation,\n # so that one job will fill the time per node.\n self.jobShapes = deque(maxlen=N,\n iterable=10 * [Shape(wallTime=nodeShape.wallTime,\n memory=config.defaultMemory,\n cores=config.defaultCores,\n disk=config.defaultDisk)])\n # Calls to add and getLastNJobShapes may be concurrent\n self.lock = Lock()\n # Number of jobs to average over\n self.N = N\n\n def add(self, jobShape):\n \"\"\"\n Adds a job shape as the last completed job.\n :param Shape jobShape: The memory, core and disk requirements of the completed job\n \"\"\"\n with self.lock:\n self.jobShapes.append(jobShape)\n\n def get(self):\n \"\"\"\n Gets the last N job shapes added.\n \"\"\"\n with self.lock:\n return list(self.jobShapes)\n\n\ndef binPacking(jobShapes, nodeShape):\n \"\"\"\n Use a first fit decreasing (FFD) bin packing like algorithm to calculate an approximate\n minimum number of nodes that will fit the given list of jobs.\n :param Shape nodeShape: The properties of an atomic node allocation, in terms of wall-time,\n memory, cores and local disk.\n :param list[Shape] jobShapes: A list of shapes, each representing a job.\n Let a *node reservation* be an interval of time that a node is reserved for, it is defined by\n an integer number of node-allocations.\n For a node reservation its *jobs* are the set of jobs that will be run within the node\n reservation.\n A minimal node reservation has time equal to one atomic node allocation, or the minimum\n number node allocations to run the longest running job in its jobs.\n :rtype: int\n :returns: The minimum number of minimal node allocations estimated to be required to run all\n the jobs in jobShapes.\n \"\"\"\n logger.debug('Running bin packing for node shape %s and %s job(s).', nodeShape, len(jobShapes))\n # Sort in descending order from largest to smallest. The FFD like-strategy will pack the jobs in order from longest\n # to shortest.\n jobShapes.sort()\n jobShapes.reverse()\n assert len(jobShapes) == 0 or jobShapes[0] >= jobShapes[-1]\n\n class NodeReservation(object):\n \"\"\"\n Represents a node reservation. To represent the resources available in a reservation a\n node reservation is represented as a sequence of Shapes, each giving the resources free\n within the given interval of time\n \"\"\"\n\n def __init__(self, shape):\n # The wall-time and resource available\n self.shape = shape\n # The next portion of the reservation\n self.nReservation = None\n\n nodeReservations = [] # The list of node reservations\n\n for jS in jobShapes:\n def addToReservation():\n \"\"\"\n Function adds the job, jS, to the first node reservation in which it will fit (this\n is the bin-packing aspect)\n \"\"\"\n\n def fits(x, y):\n \"\"\"\n Check if a job shape's resource requirements will fit within a given node allocation\n \"\"\"\n return y.memory <= x.memory and y.cores <= x.cores and y.disk <= x.disk\n\n def subtract(x, y):\n \"\"\"\n Adjust available resources of a node allocation as a job is scheduled within it.\n \"\"\"\n return Shape(x.wallTime, x.memory - y.memory, x.cores - y.cores, x.disk - y.disk)\n\n def split(x, y, t):\n \"\"\"\n Partition a node allocation into two\n \"\"\"\n return (Shape(t, x.memory - y.memory, x.cores - y.cores, x.disk - y.disk),\n NodeReservation(Shape(x.wallTime - t, x.memory, x.cores, x.disk)))\n\n i = 0 # Index of node reservation\n while True:\n # Case a new node reservation is required\n if i == len(nodeReservations):\n x = NodeReservation(subtract(nodeShape, jS))\n nodeReservations.append(x)\n t = nodeShape.wallTime\n while t < jS.wallTime:\n y = NodeReservation(x.shape)\n t += nodeShape.wallTime\n x.nReservation = y\n x = y\n return\n\n # Attempt to add the job to node reservation i\n x = nodeReservations[i]\n y = x\n t = 0\n \n while True:\n if fits(y.shape, jS):\n t += y.shape.wallTime\n \n # If the jS fits in the node allocation from x to y\n if t >= jS.wallTime:\n t = 0\n while x != y:\n x.shape = subtract(x.shape, jS)\n t += x.shape.wallTime\n x = x.nReservation\n assert x == y\n assert jS.wallTime - t <= x.shape.wallTime\n if jS.wallTime - t < x.shape.wallTime:\n x.shape, nS = split(x.shape, jS, jS.wallTime - t)\n nS.nReservation = x.nReservation\n x.nReservation = nS\n else:\n assert jS.wallTime - t == x.shape.wallTime\n x.shape = subtract(x.shape, jS)\n return \n \n # If the job would fit, but is longer than the total node allocation\n # extend the node allocation\n elif y.nReservation == None and x == nodeReservations[i]:\n # Extend the node reservation to accommodate jS\n y.nReservation = NodeReservation(nodeShape)\n \n else: # Does not fit, reset\n x = y.nReservation\n t = 0\n \n y = y.nReservation\n if y is None:\n # Reached the end of the reservation without success so stop trying to\n # add to reservation i\n break\n i += 1\n\n addToReservation()\n logger.debug(\"Done running bin packing for node shape %s and %s job(s) resulting in %s node \"\n \"reservations.\", nodeShape, len(jobShapes), len(nodeReservations))\n return len(nodeReservations)\n\n\nclass ClusterScaler(object):\n def __init__(self, provisioner, leader, config):\n \"\"\"\n Class manages automatically scaling the number of worker nodes.\n :param AbstractProvisioner provisioner: Provisioner instance to scale.\n :param toil.leader.Leader leader: \n :param Config config: Config object from which to draw parameters.\n \"\"\"\n self.provisioner = provisioner\n self.leader = leader\n self.config = config\n # Indicates that the scaling threads should shutdown\n self.stop = False\n\n assert config.maxPreemptableNodes >= 0 and config.maxNodes >= 0\n require(config.maxPreemptableNodes + config.maxNodes > 0,\n 'Either --maxNodes or --maxPreemptableNodes must be non-zero.')\n \n self.preemptableScaler = ScalerThread(self, preemptable=True) if self.config.maxPreemptableNodes > 0 else None\n\n self.scaler = ScalerThread(self, preemptable=False) if self.config.maxNodes > 0 else None\n\n def start(self):\n \"\"\" \n Start the cluster scaler thread(s).\n \"\"\"\n if self.preemptableScaler != None:\n self.preemptableScaler.start()\n\n if self.scaler != None:\n self.scaler.start()\n\n def check(self):\n \"\"\"\n Attempt to join any existing scaler threads that may have died or finished. This insures\n any exceptions raised in the threads are propagated in a timely fashion.\n \"\"\"\n exception = False\n for scalerThread in [self.preemptableScaler, self.scaler]:\n if scalerThread is not None:\n try:\n scalerThread.join(timeout=0)\n except Exception as e:\n logger.exception(e)\n exception = True\n if exception:\n raise RuntimeError('The cluster scaler has exited due to an exception')\n\n def shutdown(self):\n \"\"\"\n Shutdown the cluster.\n \"\"\"\n self.stop = True\n for scaler in self.preemptableScaler, self.scaler:\n if scaler is not None:\n self.scaler.join()\n\n def addCompletedJob(self, job, wallTime):\n \"\"\"\n Adds the shape of a completed job to the queue, allowing the scalar to use the last N\n completed jobs in factoring how many nodes are required in the cluster.\n :param toil.job.JobNode job: The memory, core and disk requirements of the completed job\n :param int wallTime: The wall-time taken to complete the job in seconds.\n \"\"\"\n s = Shape(wallTime=wallTime, memory=job.memory, cores=job.cores, disk=job.disk)\n if job.preemptable and self.preemptableScaler is not None:\n self.preemptableScaler.jobShapes.add(s)\n else:\n self.scaler.jobShapes.add(s)\n\n\nclass ScalerThread(ExceptionalThread):\n \"\"\"\n A thread that automatically scales the number of either preemptable or non-preemptable worker\n nodes according to the number of jobs queued and the resource requirements of the last N\n completed jobs.\n The scaling calculation is essentially as follows: Use the RecentJobShapes instance to\n calculate how many nodes, n, can be used to productively compute the last N completed\n jobs. Let M be the number of jobs issued to the batch system. The number of nodes\n required is then estimated to be alpha * n * M/N, where alpha is a scaling factor used to\n adjust the balance between under- and over- provisioning the cluster.\n At each scaling decision point a comparison between the current, C, and newly estimated\n number of nodes is made. If the absolute difference is less than beta * C then no change\n is made, else the size of the cluster is adapted. The beta factor is an inertia parameter\n that prevents continual fluctuations in the number of nodes.\n \"\"\"\n def __init__(self, scaler, preemptable):\n \"\"\"\n :param ClusterScaler scaler: the parent class\n \"\"\"\n super(ScalerThread, self).__init__(name='preemptable-scaler' if preemptable else 'scaler')\n self.scaler = scaler\n self.preemptable = preemptable\n self.nodeTypeString = (\"preemptable\" if self.preemptable else \"non-preemptable\") + \" nodes\" # Used for logging\n # Resource requirements and wall-time of an atomic node allocation\n self.nodeShape = scaler.provisioner.getNodeShape(preemptable=preemptable)\n # Monitors the requirements of the N most recently completed jobs\n self.jobShapes = RecentJobShapes(scaler.config, self.nodeShape)\n # Minimum/maximum number of either preemptable or non-preemptable nodes in the cluster\n self.minNodes = scaler.config.minPreemptableNodes if preemptable else scaler.config.minNodes\n self.maxNodes = scaler.config.maxPreemptableNodes if preemptable else scaler.config.maxNodes\n if isinstance(self.scaler.leader.batchSystem, AbstractScalableBatchSystem):\n self.totalNodes = len(self.scaler.leader.batchSystem.getNodes(self.preemptable))\n else:\n self.totalNodes = 0\n logger.info('Starting with %s %s(s) in the cluster.', self.totalNodes, self.nodeTypeString)\n \n if scaler.config.clusterStats:\n self.scaler.provisioner.startStats(preemptable=preemptable)\n\n def tryRun(self):\n global _preemptableNodeDeficit\n\n while not self.scaler.stop:\n with throttle(self.scaler.config.scaleInterval):\n # Estimate the number of nodes to run the issued jobs.\n \n # Number of jobs issued\n queueSize = self.scaler.leader.getNumberOfJobsIssued(preemptable=self.preemptable)\n \n # Job shapes of completed jobs\n recentJobShapes = self.jobShapes.get()\n assert len(recentJobShapes) > 0\n \n # Estimate of number of nodes needed to run recent jobs\n nodesToRunRecentJobs = binPacking(recentJobShapes, self.nodeShape)\n \n # Actual calculation of the estimated number of nodes required\n estimatedNodes = 0 if queueSize == 0 else max(1, int(round(\n self.scaler.config.alphaPacking\n * nodesToRunRecentJobs\n * float(queueSize) / len(recentJobShapes))))\n \n # Account for case where the average historical runtime of completed jobs is less\n # than the runtime of currently running jobs. This is important\n # to avoid a deadlock where the estimated number of nodes to run the jobs\n # is too small to schedule a set service jobs and their dependent jobs, leading\n # to service jobs running indefinitely.\n \n # How many jobs are currently running and their average runtime.\n numberOfRunningJobs, currentAvgRuntime = self.scaler.leader.getNumberAndAvgRuntimeOfCurrentlyRunningJobs()\n \n # Average runtime of recently completed jobs\n historicalAvgRuntime = sum(map(lambda jS : jS.wallTime, recentJobShapes))\n \n # Ratio of avg. runtime of currently running and completed jobs\n runtimeCorrection = float(currentAvgRuntime)/historicalAvgRuntime if currentAvgRuntime > historicalAvgRuntime and numberOfRunningJobs >= estimatedNodes else 1.0\n \n # Make correction, if necessary (only do so if cluster is busy and average runtime is higher than historical\n # average)\n if runtimeCorrection != 1.0:\n logger.warn(\"Historical avg. runtime (%s) is less than current avg. runtime (%s) and cluster\"\n \" is being well utilised (%s running jobs), increasing cluster requirement by: %s\" % \n (historicalAvgRuntime, currentAvgRuntime, numberOfRunningJobs, runtimeCorrection))\n estimatedNodes *= runtimeCorrection\n\n # If we're the non-preemptable scaler, we need to see if we have a deficit of\n # preemptable nodes that we should compensate for.\n if not self.preemptable:\n compensation = self.scaler.config.preemptableCompensation\n assert 0.0 <= compensation <= 1.0\n # The number of nodes we provision as compensation for missing preemptable\n # nodes is the product of the deficit (the number of preemptable nodes we did\n # _not_ allocate) and configuration preference.\n compensationNodes = int(round(_preemptableNodeDeficit * compensation))\n logger.info('Adding %d preemptable nodes to compensate for a deficit of %d '\n 'non-preemptable ones.', compensationNodes, _preemptableNodeDeficit)\n estimatedNodes += compensationNodes\n\n fix_my_name = (0 if nodesToRunRecentJobs <= 0\n else len(recentJobShapes) / float(nodesToRunRecentJobs))\n logger.info('Estimating that cluster needs %s %s of shape %s, from current '\n 'size of %s, given a queue size of %s, the number of jobs per node '\n 'estimated to be %s, an alpha parameter of %s and a run-time length correction of %s.',\n estimatedNodes, self.nodeTypeString, self.nodeShape, \n self.totalNodes, queueSize, fix_my_name,\n self.scaler.config.alphaPacking, runtimeCorrection)\n\n # Use inertia parameter to stop small fluctuations\n if estimatedNodes <= self.totalNodes * self.scaler.config.betaInertia <= estimatedNodes:\n logger.debug('Difference in new (%s) and previous estimates in number of '\n '%s (%s) required is within beta (%s), making no change.',\n estimatedNodes, self.nodeTypeString, self.totalNodes, self.scaler.config.betaInertia)\n estimatedNodes = self.totalNodes\n\n # Bound number using the max and min node parameters\n if estimatedNodes > self.maxNodes:\n logger.info('Limiting the estimated number of necessary %s (%s) to the '\n 'configured maximum (%s).', self.nodeTypeString, estimatedNodes, self.maxNodes)\n estimatedNodes = self.maxNodes\n elif estimatedNodes < self.minNodes:\n logger.info('Raising the estimated number of necessary %s (%s) to the '\n 'configured mininimum (%s).', self.nodeTypeString, estimatedNodes, self.minNodes)\n estimatedNodes = self.minNodes\n\n if estimatedNodes != self.totalNodes:\n logger.info('Changing the number of %s from %s to %s.', self.nodeTypeString, self.totalNodes,\n estimatedNodes)\n self.totalNodes = self.scaler.provisioner.setNodeCount(numNodes=estimatedNodes,\n preemptable=self.preemptable)\n \n # If we were scaling up the number of preemptable nodes and failed to meet\n # our target, we need to update the slack so that non-preemptable nodes will\n # be allocated instead and we won't block. If we _did_ meet our target,\n # we need to reset the slack to 0.\n if self.preemptable:\n if self.totalNodes < estimatedNodes:\n deficit = estimatedNodes - self.totalNodes\n logger.info('Preemptable scaler detected deficit of %d nodes.', deficit)\n _preemptableNodeDeficit = deficit\n else:\n _preemptableNodeDeficit = 0\n\n self.scaler.provisioner.checkStats()\n \n self.scaler.provisioner.shutDown(preemptable=self.preemptable)\n logger.info('Scaler exited normally.')\n",
"path": "src/toil/provisioners/clusterScaler.py"
}
] | [
{
"content": "# Copyright (C) 2015-2016 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport logging\nfrom collections import deque\nfrom threading import Lock\n\nfrom bd2k.util.exceptions import require\nfrom bd2k.util.threading import ExceptionalThread\nfrom bd2k.util.throttle import throttle\n\nfrom toil.batchSystems.abstractBatchSystem import AbstractScalableBatchSystem\nfrom toil.common import Config\nfrom toil.provisioners.abstractProvisioner import AbstractProvisioner, Shape\n\nlogger = logging.getLogger(__name__)\n\n# A *deficit* exists when we have more jobs that can run on preemptable nodes than we have\n# preemptable nodes. In order to not block these jobs, we want to increase the number of non-\n# preemptable nodes that we have and need for just non-preemptable jobs. However, we may still\n# prefer waiting for preemptable instances to come available.\n#\n# To accommodate this, we set the delta to the difference between the number of provisioned\n# preemptable nodes and the number of nodes that were requested. when the non-preemptable thread\n# wants to provision nodes, it will multiply this delta times a preference for preemptable vs.\n# non-preemptable nodes.\n\n_preemptableNodeDeficit = 0\n\nclass RecentJobShapes(object):\n \"\"\"\n Used to track the 'shapes' of the last N jobs run (see Shape).\n \"\"\"\n\n def __init__(self, config, nodeShape, N=1000):\n # As a prior we start of with 10 jobs each with the default memory, cores, and disk. To\n # estimate the running time we use the the default wall time of each node allocation,\n # so that one job will fill the time per node.\n self.jobShapes = deque(maxlen=N,\n iterable=10 * [Shape(wallTime=nodeShape.wallTime,\n memory=config.defaultMemory,\n cores=config.defaultCores,\n disk=config.defaultDisk)])\n # Calls to add and getLastNJobShapes may be concurrent\n self.lock = Lock()\n # Number of jobs to average over\n self.N = N\n\n def add(self, jobShape):\n \"\"\"\n Adds a job shape as the last completed job.\n :param Shape jobShape: The memory, core and disk requirements of the completed job\n \"\"\"\n with self.lock:\n self.jobShapes.append(jobShape)\n\n def get(self):\n \"\"\"\n Gets the last N job shapes added.\n \"\"\"\n with self.lock:\n return list(self.jobShapes)\n\n\ndef binPacking(jobShapes, nodeShape):\n \"\"\"\n Use a first fit decreasing (FFD) bin packing like algorithm to calculate an approximate\n minimum number of nodes that will fit the given list of jobs.\n :param Shape nodeShape: The properties of an atomic node allocation, in terms of wall-time,\n memory, cores and local disk.\n :param list[Shape] jobShapes: A list of shapes, each representing a job.\n Let a *node reservation* be an interval of time that a node is reserved for, it is defined by\n an integer number of node-allocations.\n For a node reservation its *jobs* are the set of jobs that will be run within the node\n reservation.\n A minimal node reservation has time equal to one atomic node allocation, or the minimum\n number node allocations to run the longest running job in its jobs.\n :rtype: int\n :returns: The minimum number of minimal node allocations estimated to be required to run all\n the jobs in jobShapes.\n \"\"\"\n logger.debug('Running bin packing for node shape %s and %s job(s).', nodeShape, len(jobShapes))\n # Sort in descending order from largest to smallest. The FFD like-strategy will pack the jobs in order from longest\n # to shortest.\n jobShapes.sort()\n jobShapes.reverse()\n assert len(jobShapes) == 0 or jobShapes[0] >= jobShapes[-1]\n\n class NodeReservation(object):\n \"\"\"\n Represents a node reservation. To represent the resources available in a reservation a\n node reservation is represented as a sequence of Shapes, each giving the resources free\n within the given interval of time\n \"\"\"\n\n def __init__(self, shape):\n # The wall-time and resource available\n self.shape = shape\n # The next portion of the reservation\n self.nReservation = None\n\n nodeReservations = [] # The list of node reservations\n\n for jS in jobShapes:\n def addToReservation():\n \"\"\"\n Function adds the job, jS, to the first node reservation in which it will fit (this\n is the bin-packing aspect)\n \"\"\"\n\n def fits(x, y):\n \"\"\"\n Check if a job shape's resource requirements will fit within a given node allocation\n \"\"\"\n return y.memory <= x.memory and y.cores <= x.cores and y.disk <= x.disk\n\n def subtract(x, y):\n \"\"\"\n Adjust available resources of a node allocation as a job is scheduled within it.\n \"\"\"\n return Shape(x.wallTime, x.memory - y.memory, x.cores - y.cores, x.disk - y.disk)\n\n def split(x, y, t):\n \"\"\"\n Partition a node allocation into two\n \"\"\"\n return (Shape(t, x.memory - y.memory, x.cores - y.cores, x.disk - y.disk),\n NodeReservation(Shape(x.wallTime - t, x.memory, x.cores, x.disk)))\n\n i = 0 # Index of node reservation\n while True:\n # Case a new node reservation is required\n if i == len(nodeReservations):\n x = NodeReservation(subtract(nodeShape, jS))\n nodeReservations.append(x)\n t = nodeShape.wallTime\n while t < jS.wallTime:\n y = NodeReservation(x.shape)\n t += nodeShape.wallTime\n x.nReservation = y\n x = y\n return\n\n # Attempt to add the job to node reservation i\n x = nodeReservations[i]\n y = x\n t = 0\n \n while True:\n if fits(y.shape, jS):\n t += y.shape.wallTime\n \n # If the jS fits in the node allocation from x to y\n if t >= jS.wallTime:\n t = 0\n while x != y:\n x.shape = subtract(x.shape, jS)\n t += x.shape.wallTime\n x = x.nReservation\n assert x == y\n assert jS.wallTime - t <= x.shape.wallTime\n if jS.wallTime - t < x.shape.wallTime:\n x.shape, nS = split(x.shape, jS, jS.wallTime - t)\n nS.nReservation = x.nReservation\n x.nReservation = nS\n else:\n assert jS.wallTime - t == x.shape.wallTime\n x.shape = subtract(x.shape, jS)\n return \n \n # If the job would fit, but is longer than the total node allocation\n # extend the node allocation\n elif y.nReservation == None and x == nodeReservations[i]:\n # Extend the node reservation to accommodate jS\n y.nReservation = NodeReservation(nodeShape)\n \n else: # Does not fit, reset\n x = y.nReservation\n t = 0\n \n y = y.nReservation\n if y is None:\n # Reached the end of the reservation without success so stop trying to\n # add to reservation i\n break\n i += 1\n\n addToReservation()\n logger.debug(\"Done running bin packing for node shape %s and %s job(s) resulting in %s node \"\n \"reservations.\", nodeShape, len(jobShapes), len(nodeReservations))\n return len(nodeReservations)\n\n\nclass ClusterScaler(object):\n def __init__(self, provisioner, leader, config):\n \"\"\"\n Class manages automatically scaling the number of worker nodes.\n :param AbstractProvisioner provisioner: Provisioner instance to scale.\n :param toil.leader.Leader leader: \n :param Config config: Config object from which to draw parameters.\n \"\"\"\n self.provisioner = provisioner\n self.leader = leader\n self.config = config\n # Indicates that the scaling threads should shutdown\n self.stop = False\n\n assert config.maxPreemptableNodes >= 0 and config.maxNodes >= 0\n require(config.maxPreemptableNodes + config.maxNodes > 0,\n 'Either --maxNodes or --maxPreemptableNodes must be non-zero.')\n \n self.preemptableScaler = ScalerThread(self, preemptable=True) if self.config.maxPreemptableNodes > 0 else None\n\n self.scaler = ScalerThread(self, preemptable=False) if self.config.maxNodes > 0 else None\n\n def start(self):\n \"\"\" \n Start the cluster scaler thread(s).\n \"\"\"\n if self.preemptableScaler != None:\n self.preemptableScaler.start()\n\n if self.scaler != None:\n self.scaler.start()\n\n def check(self):\n \"\"\"\n Attempt to join any existing scaler threads that may have died or finished. This insures\n any exceptions raised in the threads are propagated in a timely fashion.\n \"\"\"\n exception = False\n for scalerThread in [self.preemptableScaler, self.scaler]:\n if scalerThread is not None:\n try:\n scalerThread.join(timeout=0)\n except Exception as e:\n logger.exception(e)\n exception = True\n if exception:\n raise RuntimeError('The cluster scaler has exited due to an exception')\n\n def shutdown(self):\n \"\"\"\n Shutdown the cluster.\n \"\"\"\n self.stop = True\n for scaler in self.preemptableScaler, self.scaler:\n if scaler is not None:\n scaler.join()\n\n def addCompletedJob(self, job, wallTime):\n \"\"\"\n Adds the shape of a completed job to the queue, allowing the scalar to use the last N\n completed jobs in factoring how many nodes are required in the cluster.\n :param toil.job.JobNode job: The memory, core and disk requirements of the completed job\n :param int wallTime: The wall-time taken to complete the job in seconds.\n \"\"\"\n s = Shape(wallTime=wallTime, memory=job.memory, cores=job.cores, disk=job.disk)\n if job.preemptable and self.preemptableScaler is not None:\n self.preemptableScaler.jobShapes.add(s)\n else:\n self.scaler.jobShapes.add(s)\n\n\nclass ScalerThread(ExceptionalThread):\n \"\"\"\n A thread that automatically scales the number of either preemptable or non-preemptable worker\n nodes according to the number of jobs queued and the resource requirements of the last N\n completed jobs.\n The scaling calculation is essentially as follows: Use the RecentJobShapes instance to\n calculate how many nodes, n, can be used to productively compute the last N completed\n jobs. Let M be the number of jobs issued to the batch system. The number of nodes\n required is then estimated to be alpha * n * M/N, where alpha is a scaling factor used to\n adjust the balance between under- and over- provisioning the cluster.\n At each scaling decision point a comparison between the current, C, and newly estimated\n number of nodes is made. If the absolute difference is less than beta * C then no change\n is made, else the size of the cluster is adapted. The beta factor is an inertia parameter\n that prevents continual fluctuations in the number of nodes.\n \"\"\"\n def __init__(self, scaler, preemptable):\n \"\"\"\n :param ClusterScaler scaler: the parent class\n \"\"\"\n super(ScalerThread, self).__init__(name='preemptable-scaler' if preemptable else 'scaler')\n self.scaler = scaler\n self.preemptable = preemptable\n self.nodeTypeString = (\"preemptable\" if self.preemptable else \"non-preemptable\") + \" nodes\" # Used for logging\n # Resource requirements and wall-time of an atomic node allocation\n self.nodeShape = scaler.provisioner.getNodeShape(preemptable=preemptable)\n # Monitors the requirements of the N most recently completed jobs\n self.jobShapes = RecentJobShapes(scaler.config, self.nodeShape)\n # Minimum/maximum number of either preemptable or non-preemptable nodes in the cluster\n self.minNodes = scaler.config.minPreemptableNodes if preemptable else scaler.config.minNodes\n self.maxNodes = scaler.config.maxPreemptableNodes if preemptable else scaler.config.maxNodes\n if isinstance(self.scaler.leader.batchSystem, AbstractScalableBatchSystem):\n self.totalNodes = len(self.scaler.leader.batchSystem.getNodes(self.preemptable))\n else:\n self.totalNodes = 0\n logger.info('Starting with %s %s(s) in the cluster.', self.totalNodes, self.nodeTypeString)\n \n if scaler.config.clusterStats:\n self.scaler.provisioner.startStats(preemptable=preemptable)\n\n def tryRun(self):\n global _preemptableNodeDeficit\n\n while not self.scaler.stop:\n with throttle(self.scaler.config.scaleInterval):\n # Estimate the number of nodes to run the issued jobs.\n \n # Number of jobs issued\n queueSize = self.scaler.leader.getNumberOfJobsIssued(preemptable=self.preemptable)\n \n # Job shapes of completed jobs\n recentJobShapes = self.jobShapes.get()\n assert len(recentJobShapes) > 0\n \n # Estimate of number of nodes needed to run recent jobs\n nodesToRunRecentJobs = binPacking(recentJobShapes, self.nodeShape)\n \n # Actual calculation of the estimated number of nodes required\n estimatedNodes = 0 if queueSize == 0 else max(1, int(round(\n self.scaler.config.alphaPacking\n * nodesToRunRecentJobs\n * float(queueSize) / len(recentJobShapes))))\n \n # Account for case where the average historical runtime of completed jobs is less\n # than the runtime of currently running jobs. This is important\n # to avoid a deadlock where the estimated number of nodes to run the jobs\n # is too small to schedule a set service jobs and their dependent jobs, leading\n # to service jobs running indefinitely.\n \n # How many jobs are currently running and their average runtime.\n numberOfRunningJobs, currentAvgRuntime = self.scaler.leader.getNumberAndAvgRuntimeOfCurrentlyRunningJobs()\n \n # Average runtime of recently completed jobs\n historicalAvgRuntime = sum(map(lambda jS : jS.wallTime, recentJobShapes))\n \n # Ratio of avg. runtime of currently running and completed jobs\n runtimeCorrection = float(currentAvgRuntime)/historicalAvgRuntime if currentAvgRuntime > historicalAvgRuntime and numberOfRunningJobs >= estimatedNodes else 1.0\n \n # Make correction, if necessary (only do so if cluster is busy and average runtime is higher than historical\n # average)\n if runtimeCorrection != 1.0:\n logger.warn(\"Historical avg. runtime (%s) is less than current avg. runtime (%s) and cluster\"\n \" is being well utilised (%s running jobs), increasing cluster requirement by: %s\" % \n (historicalAvgRuntime, currentAvgRuntime, numberOfRunningJobs, runtimeCorrection))\n estimatedNodes *= runtimeCorrection\n\n # If we're the non-preemptable scaler, we need to see if we have a deficit of\n # preemptable nodes that we should compensate for.\n if not self.preemptable:\n compensation = self.scaler.config.preemptableCompensation\n assert 0.0 <= compensation <= 1.0\n # The number of nodes we provision as compensation for missing preemptable\n # nodes is the product of the deficit (the number of preemptable nodes we did\n # _not_ allocate) and configuration preference.\n compensationNodes = int(round(_preemptableNodeDeficit * compensation))\n logger.info('Adding %d preemptable nodes to compensate for a deficit of %d '\n 'non-preemptable ones.', compensationNodes, _preemptableNodeDeficit)\n estimatedNodes += compensationNodes\n\n fix_my_name = (0 if nodesToRunRecentJobs <= 0\n else len(recentJobShapes) / float(nodesToRunRecentJobs))\n logger.info('Estimating that cluster needs %s %s of shape %s, from current '\n 'size of %s, given a queue size of %s, the number of jobs per node '\n 'estimated to be %s, an alpha parameter of %s and a run-time length correction of %s.',\n estimatedNodes, self.nodeTypeString, self.nodeShape, \n self.totalNodes, queueSize, fix_my_name,\n self.scaler.config.alphaPacking, runtimeCorrection)\n\n # Use inertia parameter to stop small fluctuations\n if estimatedNodes <= self.totalNodes * self.scaler.config.betaInertia <= estimatedNodes:\n logger.debug('Difference in new (%s) and previous estimates in number of '\n '%s (%s) required is within beta (%s), making no change.',\n estimatedNodes, self.nodeTypeString, self.totalNodes, self.scaler.config.betaInertia)\n estimatedNodes = self.totalNodes\n\n # Bound number using the max and min node parameters\n if estimatedNodes > self.maxNodes:\n logger.info('Limiting the estimated number of necessary %s (%s) to the '\n 'configured maximum (%s).', self.nodeTypeString, estimatedNodes, self.maxNodes)\n estimatedNodes = self.maxNodes\n elif estimatedNodes < self.minNodes:\n logger.info('Raising the estimated number of necessary %s (%s) to the '\n 'configured mininimum (%s).', self.nodeTypeString, estimatedNodes, self.minNodes)\n estimatedNodes = self.minNodes\n\n if estimatedNodes != self.totalNodes:\n logger.info('Changing the number of %s from %s to %s.', self.nodeTypeString, self.totalNodes,\n estimatedNodes)\n self.totalNodes = self.scaler.provisioner.setNodeCount(numNodes=estimatedNodes,\n preemptable=self.preemptable)\n \n # If we were scaling up the number of preemptable nodes and failed to meet\n # our target, we need to update the slack so that non-preemptable nodes will\n # be allocated instead and we won't block. If we _did_ meet our target,\n # we need to reset the slack to 0.\n if self.preemptable:\n if self.totalNodes < estimatedNodes:\n deficit = estimatedNodes - self.totalNodes\n logger.info('Preemptable scaler detected deficit of %d nodes.', deficit)\n _preemptableNodeDeficit = deficit\n else:\n _preemptableNodeDeficit = 0\n\n self.scaler.provisioner.checkStats()\n \n self.scaler.provisioner.shutDown(preemptable=self.preemptable)\n logger.info('Scaler exited normally.')\n",
"path": "src/toil/provisioners/clusterScaler.py"
}
] | diff --git a/src/toil/provisioners/clusterScaler.py b/src/toil/provisioners/clusterScaler.py
index 009989f56b..4facdbb5fd 100644
--- a/src/toil/provisioners/clusterScaler.py
+++ b/src/toil/provisioners/clusterScaler.py
@@ -259,7 +259,7 @@ def shutdown(self):
self.stop = True
for scaler in self.preemptableScaler, self.scaler:
if scaler is not None:
- self.scaler.join()
+ scaler.join()
def addCompletedJob(self, job, wallTime):
"""
|
django-cms__django-filer-1116 | Uncaught ReferenceError: django is not defined
When I try to create a new model with the content creation wizard that has a FilerImageField, I get an `Uncaught ReferenceError: django is not defined` at:
VM1967 dropzone.init.js:11
VM1969 popup_handling.js:46
VM1970 widget.js:4
(index):155
Why do I get this and how can I avoid it?
### Reproducible with:
#### modify `aldryn_newsblog/cms_wizards.py`:
class CreateNewsBlogArticleForm(BaseFormMixin, TranslatableModelForm):
# ...
class Meta:
model = Article
fields = ['title', 'app_config', 'featured_image']
# ...
#### or: create a test `foo` app:
My `models.py` file:
from django.db import models
from django.core.urlresolvers import reverse
from filer.fields.image import FilerImageField
class Foo(models.Model):
slug = models.SlugField(unique=True)
image = FilerImageField(related_name='foo_picture', null=True)
def get_absolute_url(self):
return reverse('foo', kwargs={'slug': self.slug})
My `forms.py` file:
from django import forms
from .models import Foo
class FooWizardForm(forms.ModelForm):
class Meta:
model = Foo
exclude = []
My `cms_wizards.py` file:
from cms.wizards.wizard_base import Wizard
from cms.wizards.wizard_pool import wizard_pool
from .forms import FooWizardForm
class FooWizard(Wizard):
pass
foo_wizard = FooWizard(
title="Foo",
weight=200,
form=FooWizardForm,
description="Create a new Foo"
)
wizard_pool.register(foo_wizard)
My `views.py` file:
from django.views.generic import DetailView
from .models import Foo
class FooView(DetailView):
model = Foo
My `urls.py` file:
from foo.views import FooView
from django.conf import settings
from django.contrib import admin
from django.conf.urls import url, include
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^foo/(?P<slug>[-\w]+)/$', FooView.as_view(), name='foo'),
url(r'^', include('cms.urls'))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
{
"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\n\nimport logging\nimport warnings\n\nfrom django import forms\nfrom django.contrib.admin.sites import site\nfrom django.contrib.admin.widgets import ForeignKeyRawIdWidget\nfrom django.db import models\nfrom django.template.loader import render_to_string\nfrom django.utils.http import urlencode\nfrom django.utils.safestring import mark_safe\n\nfrom .. import settings as filer_settings\nfrom ..models import File\nfrom ..utils.compatibility import LTE_DJANGO_1_8, reverse, truncate_words\nfrom ..utils.model_label import get_model_label\n\nlogger = logging.getLogger(__name__)\n\n\nclass AdminFileWidget(ForeignKeyRawIdWidget):\n choices = None\n\n def render(self, name, value, attrs=None):\n obj = self.obj_for_value(value)\n css_id = attrs.get('id', 'id_image_x')\n related_url = None\n if value:\n try:\n file_obj = File.objects.get(pk=value)\n related_url = file_obj.logical_folder.get_admin_directory_listing_url_path()\n except Exception as e:\n # catch exception and manage it. We can re-raise it for debugging\n # purposes and/or just logging it, provided user configured\n # proper logging configuration\n if filer_settings.FILER_ENABLE_LOGGING:\n logger.error('Error while rendering file widget: %s', e)\n if filer_settings.FILER_DEBUG:\n raise\n if not related_url:\n related_url = reverse('admin:filer-directory_listing-last')\n params = self.url_parameters()\n params['_pick'] = 'file'\n if params:\n lookup_url = '?' + urlencode(sorted(params.items()))\n else:\n lookup_url = ''\n if 'class' not in attrs:\n # The JavaScript looks for this hook.\n attrs['class'] = 'vForeignKeyRawIdAdminField'\n # rendering the super for ForeignKeyRawIdWidget on purpose here because\n # we only need the input and none of the other stuff that\n # ForeignKeyRawIdWidget adds\n hidden_input = super(ForeignKeyRawIdWidget, self).render(name, value, attrs)\n context = {\n 'hidden_input': hidden_input,\n 'lookup_url': '%s%s' % (related_url, lookup_url),\n 'object': obj,\n 'lookup_name': name,\n 'id': css_id,\n 'admin_icon_delete': (\n 'admin/img/icon_deletelink.gif' if LTE_DJANGO_1_8\n else 'admin/img/icon-deletelink.svg'\n ),\n }\n html = render_to_string('admin/filer/widgets/admin_file.html', context)\n return mark_safe(html)\n\n def label_for_value(self, value):\n obj = self.obj_for_value(value)\n return ' <strong>%s</strong>' % truncate_words(obj, 14)\n\n def obj_for_value(self, value):\n try:\n key = self.rel.get_related_field().name\n obj = self.rel.to._default_manager.get(**{key: value})\n except:\n obj = None\n return obj\n\n class Media(object):\n css = {\n 'all': [\n 'filer/css/admin_filer.css',\n ]\n }\n js = (\n 'filer/js/libs/dropzone.min.js',\n 'filer/js/addons/dropzone.init.js',\n 'filer/js/addons/popup_handling.js',\n 'filer/js/addons/widget.js',\n )\n\n\nclass AdminFileFormField(forms.ModelChoiceField):\n widget = AdminFileWidget\n\n def __init__(self, rel, queryset, to_field_name, *args, **kwargs):\n self.rel = rel\n self.queryset = queryset\n self.to_field_name = to_field_name\n self.max_value = None\n self.min_value = None\n kwargs.pop('widget', None)\n super(AdminFileFormField, self).__init__(queryset, widget=self.widget(rel, site), *args, **kwargs)\n\n def widget_attrs(self, widget):\n widget.required = self.required\n return {}\n\n\nclass FilerFileField(models.ForeignKey):\n default_form_class = AdminFileFormField\n default_model_class = File\n\n def __init__(self, **kwargs):\n # We hard-code the `to` argument for ForeignKey.__init__\n dfl = get_model_label(self.default_model_class)\n if \"to\" in kwargs.keys(): # pragma: no cover\n old_to = get_model_label(kwargs.pop(\"to\"))\n if old_to != dfl:\n msg = \"%s can only be a ForeignKey to %s; %s passed\" % (\n self.__class__.__name__, dfl, old_to\n )\n warnings.warn(msg, SyntaxWarning)\n kwargs['to'] = dfl\n super(FilerFileField, self).__init__(**kwargs)\n\n def formfield(self, **kwargs):\n # This is a fairly standard way to set up some defaults\n # while letting the caller override them.\n defaults = {\n 'form_class': self.default_form_class,\n }\n try:\n defaults['rel'] = self.remote_field\n except AttributeError:\n defaults['rel'] = self.rel\n defaults.update(kwargs)\n return super(FilerFileField, self).formfield(**defaults)\n",
"path": "filer/fields/file.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\n\nimport logging\nimport warnings\n\nfrom django import forms\nfrom django.contrib.admin.sites import site\nfrom django.contrib.admin.widgets import ForeignKeyRawIdWidget\nfrom django.db import models\nfrom django.template.loader import render_to_string\nfrom django.utils.http import urlencode\nfrom django.utils.safestring import mark_safe\n\nfrom .. import settings as filer_settings\nfrom ..models import File\nfrom ..utils.compatibility import LTE_DJANGO_1_8, reverse, truncate_words\nfrom ..utils.model_label import get_model_label\n\nlogger = logging.getLogger(__name__)\n\n\nclass AdminFileWidget(ForeignKeyRawIdWidget):\n choices = None\n\n def render(self, name, value, attrs=None):\n obj = self.obj_for_value(value)\n css_id = attrs.get('id', 'id_image_x')\n related_url = None\n if value:\n try:\n file_obj = File.objects.get(pk=value)\n related_url = file_obj.logical_folder.get_admin_directory_listing_url_path()\n except Exception as e:\n # catch exception and manage it. We can re-raise it for debugging\n # purposes and/or just logging it, provided user configured\n # proper logging configuration\n if filer_settings.FILER_ENABLE_LOGGING:\n logger.error('Error while rendering file widget: %s', e)\n if filer_settings.FILER_DEBUG:\n raise\n if not related_url:\n related_url = reverse('admin:filer-directory_listing-last')\n params = self.url_parameters()\n params['_pick'] = 'file'\n if params:\n lookup_url = '?' + urlencode(sorted(params.items()))\n else:\n lookup_url = ''\n if 'class' not in attrs:\n # The JavaScript looks for this hook.\n attrs['class'] = 'vForeignKeyRawIdAdminField'\n # rendering the super for ForeignKeyRawIdWidget on purpose here because\n # we only need the input and none of the other stuff that\n # ForeignKeyRawIdWidget adds\n hidden_input = super(ForeignKeyRawIdWidget, self).render(name, value, attrs)\n context = {\n 'hidden_input': hidden_input,\n 'lookup_url': '%s%s' % (related_url, lookup_url),\n 'object': obj,\n 'lookup_name': name,\n 'id': css_id,\n 'admin_icon_delete': (\n 'admin/img/icon_deletelink.gif' if LTE_DJANGO_1_8\n else 'admin/img/icon-deletelink.svg'\n ),\n }\n html = render_to_string('admin/filer/widgets/admin_file.html', context)\n return mark_safe(html)\n\n def label_for_value(self, value):\n obj = self.obj_for_value(value)\n return ' <strong>%s</strong>' % truncate_words(obj, 14)\n\n def obj_for_value(self, value):\n try:\n key = self.rel.get_related_field().name\n obj = self.rel.to._default_manager.get(**{key: value})\n except:\n obj = None\n return obj\n\n class Media(object):\n css = {\n 'all': [\n 'filer/css/admin_filer.css',\n ]\n }\n js = (\n 'admin/js/vendor/jquery/jquery.js',\n 'admin/js/jquery.init.js',\n 'filer/js/libs/dropzone.min.js',\n 'filer/js/addons/dropzone.init.js',\n 'filer/js/addons/popup_handling.js',\n 'filer/js/addons/widget.js',\n )\n\n\nclass AdminFileFormField(forms.ModelChoiceField):\n widget = AdminFileWidget\n\n def __init__(self, rel, queryset, to_field_name, *args, **kwargs):\n self.rel = rel\n self.queryset = queryset\n self.to_field_name = to_field_name\n self.max_value = None\n self.min_value = None\n kwargs.pop('widget', None)\n super(AdminFileFormField, self).__init__(queryset, widget=self.widget(rel, site), *args, **kwargs)\n\n def widget_attrs(self, widget):\n widget.required = self.required\n return {}\n\n\nclass FilerFileField(models.ForeignKey):\n default_form_class = AdminFileFormField\n default_model_class = File\n\n def __init__(self, **kwargs):\n # We hard-code the `to` argument for ForeignKey.__init__\n dfl = get_model_label(self.default_model_class)\n if \"to\" in kwargs.keys(): # pragma: no cover\n old_to = get_model_label(kwargs.pop(\"to\"))\n if old_to != dfl:\n msg = \"%s can only be a ForeignKey to %s; %s passed\" % (\n self.__class__.__name__, dfl, old_to\n )\n warnings.warn(msg, SyntaxWarning)\n kwargs['to'] = dfl\n super(FilerFileField, self).__init__(**kwargs)\n\n def formfield(self, **kwargs):\n # This is a fairly standard way to set up some defaults\n # while letting the caller override them.\n defaults = {\n 'form_class': self.default_form_class,\n }\n try:\n defaults['rel'] = self.remote_field\n except AttributeError:\n defaults['rel'] = self.rel\n defaults.update(kwargs)\n return super(FilerFileField, self).formfield(**defaults)\n",
"path": "filer/fields/file.py"
}
] | diff --git a/filer/fields/file.py b/filer/fields/file.py
index dd336e27f..156184182 100644
--- a/filer/fields/file.py
+++ b/filer/fields/file.py
@@ -87,6 +87,8 @@ class Media(object):
]
}
js = (
+ 'admin/js/vendor/jquery/jquery.js',
+ 'admin/js/jquery.init.js',
'filer/js/libs/dropzone.min.js',
'filer/js/addons/dropzone.init.js',
'filer/js/addons/popup_handling.js',
|
Kinto__kinto-1786 | Remove colander deprecations
```
/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.6/site-packages/cornice/validators/_colander.py:110: DeprecationWarning: Setting schema to a class is deprecated. Set schema to an instance instead.
schema = _ensure_instantiated(schema)
```
| [
{
"content": "import logging\n\nimport colander\nfrom cornice.validators import colander_validator\nfrom pyramid import httpexceptions\nfrom pyramid.security import NO_PERMISSION_REQUIRED\n\nfrom kinto.core import errors\nfrom kinto.core import Service\nfrom kinto.core.errors import ErrorSchema\nfrom kinto.core.utils import merge_dicts, build_request, build_response\nfrom kinto.core.resource.viewset import CONTENT_TYPES\n\n\nsubrequest_logger = logging.getLogger('subrequest.summary')\n\nvalid_http_method = colander.OneOf(('GET', 'HEAD', 'DELETE', 'TRACE',\n 'POST', 'PUT', 'PATCH'))\n\n\ndef string_values(node, cstruct):\n \"\"\"Validate that a ``colander.Mapping`` only has strings in its values.\n\n .. warning::\n\n Should be associated to a ``colander.Mapping`` schema node.\n \"\"\"\n are_strings = [isinstance(v, str) for v in cstruct.values()]\n if not all(are_strings):\n error_msg = '{} contains non string value'.format(cstruct)\n raise colander.Invalid(node, error_msg)\n\n\nclass BatchRequestSchema(colander.MappingSchema):\n method = colander.SchemaNode(colander.String(),\n validator=valid_http_method,\n missing=colander.drop)\n path = colander.SchemaNode(colander.String(),\n validator=colander.Regex('^/'))\n headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n validator=string_values,\n missing=colander.drop)\n body = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n missing=colander.drop)\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n\nclass BatchPayloadSchema(colander.MappingSchema):\n defaults = BatchRequestSchema(missing=colander.drop).clone()\n requests = colander.SchemaNode(colander.Sequence(),\n BatchRequestSchema())\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # On defaults, path is not mandatory.\n self.get('defaults').get('path').missing = colander.drop\n\n def deserialize(self, cstruct=colander.null):\n \"\"\"Preprocess received data to carefully merge defaults.\n \"\"\"\n if cstruct is not colander.null:\n defaults = cstruct.get('defaults')\n requests = cstruct.get('requests')\n if isinstance(defaults, dict) and isinstance(requests, list):\n for request in requests:\n if isinstance(request, dict):\n merge_dicts(request, defaults)\n return super().deserialize(cstruct)\n\n\nclass BatchRequest(colander.MappingSchema):\n body = BatchPayloadSchema()\n\n\nclass BatchResponseSchema(colander.MappingSchema):\n status = colander.SchemaNode(colander.Integer())\n path = colander.SchemaNode(colander.String())\n headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n validator=string_values,\n missing=colander.drop)\n body = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n missing=colander.drop)\n\n\nclass BatchResponseBodySchema(colander.MappingSchema):\n responses = colander.SequenceSchema(BatchResponseSchema(missing=colander.drop))\n\n\nclass BatchResponse(colander.MappingSchema):\n body = BatchResponseBodySchema()\n\n\nclass ErrorResponseSchema(colander.MappingSchema):\n body = ErrorSchema()\n\n\nbatch_responses = {\n '200': BatchResponse(description='Return a list of operation responses.'),\n '400': ErrorResponseSchema(description='The request was badly formatted.'),\n 'default': ErrorResponseSchema(description='an unknown error occurred.')\n}\n\nbatch = Service(name='batch', path='/batch',\n description='Batch operations')\n\n\[email protected](schema=BatchRequest,\n validators=(colander_validator,),\n content_type=CONTENT_TYPES,\n permission=NO_PERMISSION_REQUIRED,\n tags=['Batch'], operation_id='batch',\n response_schemas=batch_responses)\ndef post_batch(request):\n requests = request.validated['body']['requests']\n\n request.log_context(batch_size=len(requests))\n\n limit = request.registry.settings['batch_max_requests']\n if limit and len(requests) > int(limit):\n error_msg = 'Number of requests is limited to {}'.format(limit)\n request.errors.add('body', 'requests', error_msg)\n return\n\n if any([batch.path in req['path'] for req in requests]):\n error_msg = 'Recursive call on {} endpoint is forbidden.'.format(batch.path)\n request.errors.add('body', 'requests', error_msg)\n return\n\n responses = []\n\n for subrequest_spec in requests:\n subrequest = build_request(request, subrequest_spec)\n\n log_context = {**request.log_context(),\n 'path': subrequest.path,\n 'method': subrequest.method}\n try:\n # Invoke subrequest without individual transaction.\n resp, subrequest = request.follow_subrequest(subrequest,\n use_tweens=False)\n except httpexceptions.HTTPException as e:\n # Since some request in the batch failed, we need to stop the parent request\n # through Pyramid's transaction manager. 5XX errors are already caught by\n # pyramid_tm's commit_veto\n # https://github.com/Kinto/kinto/issues/624\n if e.status_code == 409:\n request.tm.abort()\n\n if e.content_type == 'application/json':\n resp = e\n else:\n # JSONify raw Pyramid errors.\n resp = errors.http_error(e)\n\n subrequest_logger.info('subrequest.summary', extra=log_context)\n\n dict_resp = build_response(resp, subrequest)\n responses.append(dict_resp)\n\n return {\n 'responses': responses\n }\n",
"path": "kinto/core/views/batch.py"
}
] | [
{
"content": "import logging\n\nimport colander\nfrom cornice.validators import colander_validator\nfrom pyramid import httpexceptions\nfrom pyramid.security import NO_PERMISSION_REQUIRED\n\nfrom kinto.core import errors\nfrom kinto.core import Service\nfrom kinto.core.errors import ErrorSchema\nfrom kinto.core.utils import merge_dicts, build_request, build_response\nfrom kinto.core.resource.viewset import CONTENT_TYPES\n\n\nsubrequest_logger = logging.getLogger('subrequest.summary')\n\nvalid_http_method = colander.OneOf(('GET', 'HEAD', 'DELETE', 'TRACE',\n 'POST', 'PUT', 'PATCH'))\n\n\ndef string_values(node, cstruct):\n \"\"\"Validate that a ``colander.Mapping`` only has strings in its values.\n\n .. warning::\n\n Should be associated to a ``colander.Mapping`` schema node.\n \"\"\"\n are_strings = [isinstance(v, str) for v in cstruct.values()]\n if not all(are_strings):\n error_msg = '{} contains non string value'.format(cstruct)\n raise colander.Invalid(node, error_msg)\n\n\nclass BatchRequestSchema(colander.MappingSchema):\n method = colander.SchemaNode(colander.String(),\n validator=valid_http_method,\n missing=colander.drop)\n path = colander.SchemaNode(colander.String(),\n validator=colander.Regex('^/'))\n headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n validator=string_values,\n missing=colander.drop)\n body = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n missing=colander.drop)\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n\nclass BatchPayloadSchema(colander.MappingSchema):\n defaults = BatchRequestSchema(missing=colander.drop).clone()\n requests = colander.SchemaNode(colander.Sequence(),\n BatchRequestSchema())\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # On defaults, path is not mandatory.\n self.get('defaults').get('path').missing = colander.drop\n\n def deserialize(self, cstruct=colander.null):\n \"\"\"Preprocess received data to carefully merge defaults.\n \"\"\"\n if cstruct is not colander.null:\n defaults = cstruct.get('defaults')\n requests = cstruct.get('requests')\n if isinstance(defaults, dict) and isinstance(requests, list):\n for request in requests:\n if isinstance(request, dict):\n merge_dicts(request, defaults)\n return super().deserialize(cstruct)\n\n\nclass BatchRequest(colander.MappingSchema):\n body = BatchPayloadSchema()\n\n\nclass BatchResponseSchema(colander.MappingSchema):\n status = colander.SchemaNode(colander.Integer())\n path = colander.SchemaNode(colander.String())\n headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n validator=string_values,\n missing=colander.drop)\n body = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n missing=colander.drop)\n\n\nclass BatchResponseBodySchema(colander.MappingSchema):\n responses = colander.SequenceSchema(BatchResponseSchema(missing=colander.drop))\n\n\nclass BatchResponse(colander.MappingSchema):\n body = BatchResponseBodySchema()\n\n\nclass ErrorResponseSchema(colander.MappingSchema):\n body = ErrorSchema()\n\n\nbatch_responses = {\n '200': BatchResponse(description='Return a list of operation responses.'),\n '400': ErrorResponseSchema(description='The request was badly formatted.'),\n 'default': ErrorResponseSchema(description='an unknown error occurred.')\n}\n\nbatch = Service(name='batch', path='/batch',\n description='Batch operations')\n\n\[email protected](schema=BatchRequest(),\n validators=(colander_validator,),\n content_type=CONTENT_TYPES,\n permission=NO_PERMISSION_REQUIRED,\n tags=['Batch'], operation_id='batch',\n response_schemas=batch_responses)\ndef post_batch(request):\n requests = request.validated['body']['requests']\n\n request.log_context(batch_size=len(requests))\n\n limit = request.registry.settings['batch_max_requests']\n if limit and len(requests) > int(limit):\n error_msg = 'Number of requests is limited to {}'.format(limit)\n request.errors.add('body', 'requests', error_msg)\n return\n\n if any([batch.path in req['path'] for req in requests]):\n error_msg = 'Recursive call on {} endpoint is forbidden.'.format(batch.path)\n request.errors.add('body', 'requests', error_msg)\n return\n\n responses = []\n\n for subrequest_spec in requests:\n subrequest = build_request(request, subrequest_spec)\n\n log_context = {**request.log_context(),\n 'path': subrequest.path,\n 'method': subrequest.method}\n try:\n # Invoke subrequest without individual transaction.\n resp, subrequest = request.follow_subrequest(subrequest,\n use_tweens=False)\n except httpexceptions.HTTPException as e:\n # Since some request in the batch failed, we need to stop the parent request\n # through Pyramid's transaction manager. 5XX errors are already caught by\n # pyramid_tm's commit_veto\n # https://github.com/Kinto/kinto/issues/624\n if e.status_code == 409:\n request.tm.abort()\n\n if e.content_type == 'application/json':\n resp = e\n else:\n # JSONify raw Pyramid errors.\n resp = errors.http_error(e)\n\n subrequest_logger.info('subrequest.summary', extra=log_context)\n\n dict_resp = build_response(resp, subrequest)\n responses.append(dict_resp)\n\n return {\n 'responses': responses\n }\n",
"path": "kinto/core/views/batch.py"
}
] | diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 6c398da4a..bffca1486 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -3,6 +3,14 @@ Changelog
This document describes changes between each past release.
+10.1.2 (2018-10-02)
+-------------------
+
+**Bug fixes**
+
+- Set schema to an instance instead of class (fixes #1781)
+
+
10.1.2 (2018-09-28)
-------------------
diff --git a/CONTRIBUTORS.rst b/CONTRIBUTORS.rst
index e3d3850ba..707d839db 100644
--- a/CONTRIBUTORS.rst
+++ b/CONTRIBUTORS.rst
@@ -32,6 +32,7 @@ Contributors
* FooBarQuaxx
* Greeshma <[email protected]>
* Gabriela Surita <[email protected]>
+* George Smith <[email protected]>
* Greg Guthe <[email protected]>
* Heron Rossi <[email protected]>
* Hiromipaw <[email protected]>
diff --git a/kinto/core/views/batch.py b/kinto/core/views/batch.py
index d61eb08e8..83f444a25 100644
--- a/kinto/core/views/batch.py
+++ b/kinto/core/views/batch.py
@@ -111,7 +111,7 @@ class ErrorResponseSchema(colander.MappingSchema):
description='Batch operations')
[email protected](schema=BatchRequest,
[email protected](schema=BatchRequest(),
validators=(colander_validator,),
content_type=CONTENT_TYPES,
permission=NO_PERMISSION_REQUIRED,
|
mlflow__mlflow-1788 | [BUG] Kubernetes Projects cannot push to private Docker repositories
Thank you for submitting an issue. Please refer to our [issue policy](https://www.github.com/mlflow/mlflow/blob/master/ISSUE_POLICY.md)
for information on what types of issues we address.
For help with debugging your code, please refer to [Stack Overflow](https://stackoverflow.com/questions/tagged/mlflow).
Please do not delete this template unless you are sure your issue is outside its scope.
### System information
- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: No
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: OSX 10.14
- **MLflow installed from (source or binary)**: PyPI
- **MLflow version (run ``mlflow --version``)**: 1.1.0
- **Python version**: 2.7
- **npm version, if running the dev UI**:
- **Exact command to reproduce**: See below
### Describe the problem
When executing an MLflow Project on Kubernetes, MLflow builds a docker image containing the project's contents and attempts to push it to a Docker repository specified by the backend configuration file (see https://mlflow.org/docs/latest/projects.html#execution-guide). When the Docker repository is a private repository, MLflow fails to push the Docker image. This failure occurs even if the user has authenticated with the Docker repository in their shell via `docker login` or provided access credentials in the `~/.docker/config.json` file.
### Code to reproduce issue
The following steps reproduce the issue using the [mlflow/examples/docker example](https://github.com/mlflow/mlflow/tree/master/examples/docker).
1. Clone the MLflow repository and check out `master`.
2. Specify a private Docker repository in the [mlflow/examples/docker/kubernetes_config.json file](https://github.com/mlflow/mlflow/blob/master/examples/docker/kubernetes_config.json). For example, I used the repository `dbczumar/mlflow-k8s-test`:
```
{
"kube-context": "docker-for-desktop",
"kube-job-template-path": "examples/docker/kubernetes_job_template.yaml",
"repository-uri": "dbczumar/mlflow-k8s-test"
}
```
3. Authenticate with the private Docker repository in your shell (either via `docker login` or by providing credentials in your `~/.docker/config.json` file). Confirm that you can push images to this repository.
4. In the same shell, navigate to the root directory of your MLflow repository and run the following command:
```sh
$ mlflow run examples/docker --backend kubernetes --backend-config examples/docker/kubernetes_config.json -P alpha=0.5
```
5. Observe that the Docker image for the Project builds successfully, but the push process to the private repository fails with a 500-level error:
```
2019/07/26 16:38:23 INFO mlflow.projects: === Building docker image dbczumar/mlflow-k8s-test:96eb9e5 ===
2019/07/26 16:38:30 INFO mlflow.projects.kubernetes: === Pushing docker image dbczumar/mlflow-k8s-test:96eb9e5 ===
Traceback (most recent call last):
File "/Users/czumar/anaconda2/bin/mlflow", line 11, in <module>
load_entry_point('mlflow', 'console_scripts', 'mlflow')()
File "/Users/czumar/anaconda2/lib/python2.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/Users/czumar/anaconda2/lib/python2.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/Users/czumar/anaconda2/lib/python2.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/czumar/anaconda2/lib/python2.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/czumar/anaconda2/lib/python2.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/Users/czumar/mlflow/mlflow/cli.py", line 137, in run
run_id=run_id
File "/Users/czumar/mlflow/mlflow/projects/__init__.py", line 265, in run
use_conda=use_conda, storage_dir=storage_dir, synchronous=synchronous, run_id=run_id)
File "/Users/czumar/mlflow/mlflow/projects/__init__.py", line 171, in _run
image_digest = kb.push_image_to_registry(image.tags[0])
File "/Users/czumar/mlflow/mlflow/projects/kubernetes.py", line 23, in push_image_to_registry
return client.images.get_registry_data(image_tag).id
File "/Users/czumar/anaconda2/lib/python2.7/site-packages/docker/models/images.py", line 333, in get_registry_data
attrs=self.client.api.inspect_distribution(name),
File "/Users/czumar/anaconda2/lib/python2.7/site-packages/docker/utils/decorators.py", line 34, in wrapper
return f(self, *args, **kwargs)
File "/Users/czumar/anaconda2/lib/python2.7/site-packages/docker/utils/decorators.py", line 19, in wrapped
return f(self, resource_id, *args, **kwargs)
File "/Users/czumar/anaconda2/lib/python2.7/site-packages/docker/api/image.py", line 266, in inspect_distribution
self._get(self._url("/distribution/{0}/json", image)), True
File "/Users/czumar/anaconda2/lib/python2.7/site-packages/docker/api/client.py", line 262, in _result
self._raise_for_status(response)
File "/Users/czumar/anaconda2/lib/python2.7/site-packages/docker/api/client.py", line 258, in _raise_for_status
raise create_api_error_from_http_exception(e)
File "/Users/czumar/anaconda2/lib/python2.7/site-packages/docker/errors.py", line 31, in create_api_error_from_http_exception
raise cls(e, response=response, explanation=explanation)
docker.errors.APIError: 500 Server Error: Internal Server Error ("errors:
denied: requested access to the resource is denied
unauthorized: authentication required
")
```
| [
{
"content": "import imp\nimport os\nimport sys\nfrom setuptools import setup, find_packages\n\nversion = imp.load_source(\n 'mlflow.version', os.path.join('mlflow', 'version.py')).VERSION\n\n\n# Get a list of all files in the JS directory to include in our module\ndef package_files(directory):\n paths = []\n for (path, _, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\n# Prints out a set of paths (relative to the mlflow/ directory) of files in mlflow/server/js/build\n# to include in the wheel, e.g. \"../mlflow/server/js/build/index.html\"\njs_files = package_files('mlflow/server/js/build')\nmodels_container_server_files = package_files(\"mlflow/models/container\")\nalembic_files = [\"../mlflow/store/db_migrations/alembic.ini\", \"../mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini\"]\n\nsetup(\n name='mlflow',\n version=version,\n packages=find_packages(exclude=['tests', 'tests.*']),\n package_data={\"mlflow\": js_files + models_container_server_files + alembic_files},\n install_requires=[\n 'alembic',\n 'click>=7.0',\n 'cloudpickle',\n 'databricks-cli>=0.8.7',\n 'requests>=2.17.3',\n 'six>=1.10.0',\n 'waitress; platform_system == \"Windows\"',\n 'gunicorn; platform_system != \"Windows\"',\n 'Flask',\n 'numpy',\n 'pandas',\n 'python-dateutil',\n 'protobuf>=3.6.0',\n 'gitpython>=2.1.0',\n 'pyyaml',\n 'querystring_parser',\n 'simplejson',\n 'docker>=3.6.0',\n 'entrypoints',\n 'sqlparse',\n 'sqlalchemy',\n 'docker>=3.6.0',\n 'gorilla',\n ],\n extras_require={\n 'extras':[\n \"scikit-learn; python_version >= '3.5'\",\n # scikit-learn 0.20 is the last version to support Python 2.x & Python 3.4.\n \"scikit-learn==0.20; python_version < '3.5'\",\n 'boto3>=1.7.12',\n 'mleap>=0.8.1',\n 'azure-storage',\n 'google-cloud-storage',\n ],\n },\n entry_points='''\n [console_scripts]\n mlflow=mlflow.cli:cli\n ''',\n zip_safe=False,\n author='Databricks',\n description='MLflow: An ML Workflow Tool',\n long_description=open('README.rst').read(),\n license='Apache License 2.0',\n classifiers=[\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords='ml ai databricks',\n url='https://mlflow.org/'\n)\n",
"path": "setup.py"
}
] | [
{
"content": "import imp\nimport os\nimport sys\nfrom setuptools import setup, find_packages\n\nversion = imp.load_source(\n 'mlflow.version', os.path.join('mlflow', 'version.py')).VERSION\n\n\n# Get a list of all files in the JS directory to include in our module\ndef package_files(directory):\n paths = []\n for (path, _, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\n# Prints out a set of paths (relative to the mlflow/ directory) of files in mlflow/server/js/build\n# to include in the wheel, e.g. \"../mlflow/server/js/build/index.html\"\njs_files = package_files('mlflow/server/js/build')\nmodels_container_server_files = package_files(\"mlflow/models/container\")\nalembic_files = [\"../mlflow/store/db_migrations/alembic.ini\", \"../mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini\"]\n\nsetup(\n name='mlflow',\n version=version,\n packages=find_packages(exclude=['tests', 'tests.*']),\n package_data={\"mlflow\": js_files + models_container_server_files + alembic_files},\n install_requires=[\n 'alembic',\n 'click>=7.0',\n 'cloudpickle',\n 'databricks-cli>=0.8.7',\n 'requests>=2.17.3',\n 'six>=1.10.0',\n 'waitress; platform_system == \"Windows\"',\n 'gunicorn; platform_system != \"Windows\"',\n 'Flask',\n 'numpy',\n 'pandas',\n 'python-dateutil',\n 'protobuf>=3.6.0',\n 'gitpython>=2.1.0',\n 'pyyaml',\n 'querystring_parser',\n 'simplejson',\n 'docker>=4.0.0',\n 'entrypoints',\n 'sqlparse',\n 'sqlalchemy',\n 'gorilla',\n ],\n extras_require={\n 'extras':[\n \"scikit-learn; python_version >= '3.5'\",\n # scikit-learn 0.20 is the last version to support Python 2.x & Python 3.4.\n \"scikit-learn==0.20; python_version < '3.5'\",\n 'boto3>=1.7.12',\n 'mleap>=0.8.1',\n 'azure-storage',\n 'google-cloud-storage',\n ],\n },\n entry_points='''\n [console_scripts]\n mlflow=mlflow.cli:cli\n ''',\n zip_safe=False,\n author='Databricks',\n description='MLflow: An ML Workflow Tool',\n long_description=open('README.rst').read(),\n license='Apache License 2.0',\n classifiers=[\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords='ml ai databricks',\n url='https://mlflow.org/'\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 6fb6f5b9f2264..9804e875bde51 100644
--- a/setup.py
+++ b/setup.py
@@ -45,11 +45,10 @@ def package_files(directory):
'pyyaml',
'querystring_parser',
'simplejson',
- 'docker>=3.6.0',
+ 'docker>=4.0.0',
'entrypoints',
'sqlparse',
'sqlalchemy',
- 'docker>=3.6.0',
'gorilla',
],
extras_require={
|
rootpy__rootpy-511 | 'TCanvas' object has no attribute 'name'
Hi,
I am seeing weird issues with the interactive module. It looks like the TCanvas is not 'decorated' when loading rootpy.interactive.
```
>>> from ROOT import *
>>> t = TCanvas()
>>> from rootpy.interactive import wait
/usr/local/lib/python2.7/site-packages/IPython/frontend.py:30: UserWarning: The top-level `frontend` package has been deprecated. All its subpackages have been moved to the top `IPython` level.
warn("The top-level `frontend` package has been deprecated. "
w>>> wait()
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/usr/local/Cellar/python/2.7.5/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/rootpy-dev-py2.7.egg/rootpy/interactive/rootwait.py", line 206, in wait_for_zero_canvases
log.debug("waiting for canvas {0} to close".format(canvas.name))
AttributeError: 'TCanvas' object has no attribute 'name'
```
Albert
| [
{
"content": "# Copyright 2012 the rootpy developers\n# distributed under the terms of the GNU General Public License\n\"\"\"\nThe functions in this module provide a way of pausing code execution until\ncanvases are closed. This can be useful when testing code and you don't want to\nkeep the objects alive outside of your function.\n\nThe wait function can be called repeatedly to pause multiple times.\n\nwait_for_zero_canvases()\n Keeps root alive until CTRL-c is pressed or all canvases are closed\n\nwait_for_zero_canvases(middle_mouse_close=True)\n allows canvases to be closed with the middle mouse button (see below)\n\nwait is shorthand for wait_for_zero_canvases\n\nExamples\n--------\n\n from rootpy.plotting import Canvas\n from rootpy.interactive import wait\n\n c = Canvas()\n c.Update()\n wait()\n\n c2 = Canvas()\n c2.Update()\n wait(True)\n # This canvas can be killed by middle clicking on it or hitting\n # escape whilst it has focus\n\n\"\"\"\nfrom __future__ import absolute_import\n\nimport threading\nfrom atexit import register\n\nimport ROOT\n\nfrom . import log; log = log[__name__]\nfrom ..defaults import extra_initialization\nfrom ..memory.keepalive import keepalive\nfrom .canvas_events import attach_event_handler\n\n__all__ = [\n 'wait_for_zero_canvases',\n 'wait_for_browser_close',\n 'wait',\n]\n\n_processRootEvents = None\n_finishSchedule = None\n__ACTIVE = False\n\n\n@extra_initialization\ndef fetch_vars():\n global _processRootEvents, _finishSchedule, __ACTIVE\n PyGUIThread = getattr(ROOT, 'PyGUIThread', None)\n if PyGUIThread is not None:\n _processRootEvents = getattr(PyGUIThread, \"_Thread__target\", None)\n _finishSchedule = getattr(PyGUIThread, \"finishSchedule\", None)\n if _processRootEvents is None:\n log.warning(\n \"unable to access ROOT's GUI thread either because \"\n \"PyROOT's finalSetup() was called while in batch mode \"\n \"or because PyROOT is using the new PyOS_InputHook \"\n \"based mechanism that is not yet supported in rootpy \"\n \"(PyConfig.StartGuiThread == 'inputhook' or \"\n \"gSystem.InheritsFrom('TMacOSXSystem')). wait() etc. will \"\n \"instead call raw_input() and wait for [Enter]\")\n else:\n __ACTIVE = True\n\n\ndef wait_failover(caller):\n if not ROOT.gROOT.IsBatch():\n log.warning(\n \"{0} is failing over to raw_input()\".format(caller.__name__))\n raw_input(\"press [Enter] to continue\")\n\n\ndef start_new_gui_thread():\n \"\"\"\n Attempt to start a new GUI thread, if possible.\n\n It is only possible to start one if there was one running on module import.\n \"\"\"\n PyGUIThread = getattr(ROOT, 'PyGUIThread', None)\n\n if PyGUIThread is not None:\n assert not PyGUIThread.isAlive(), \"GUI thread already running!\"\n\n assert _processRootEvents, (\n \"GUI thread wasn't started when rootwait was imported, \"\n \"so it can't be restarted\")\n\n ROOT.keeppolling = 1\n ROOT.PyGUIThread = threading.Thread(\n None, _processRootEvents, None, (ROOT,))\n\n ROOT.PyGUIThread.finishSchedule = _finishSchedule\n ROOT.PyGUIThread.setDaemon(1)\n ROOT.PyGUIThread.start()\n log.debug(\"successfully started a new GUI thread\")\n\n\ndef stop_gui_thread():\n \"\"\"\n Try to stop the GUI thread. If it was running returns True,\n otherwise False.\n \"\"\"\n PyGUIThread = getattr(ROOT, 'PyGUIThread', None)\n\n if PyGUIThread is None or not PyGUIThread.isAlive():\n log.debug(\"no existing GUI thread is runnng\")\n return False\n\n ROOT.keeppolling = 0\n try:\n PyGUIThread.finishSchedule()\n except AttributeError:\n log.debug(\"unable to call finishSchedule() on PyGUIThread\")\n pass\n PyGUIThread.join()\n log.debug(\"successfully stopped the existing GUI thread\")\n return True\n\n\ndef get_visible_canvases():\n \"\"\"\n Return a list of active GUI canvases\n (as opposed to invisible Batch canvases)\n \"\"\"\n try:\n return [c for c in ROOT.gROOT.GetListOfCanvases() if not c.IsBatch()]\n except AttributeError:\n # We might be exiting and ROOT.gROOT will raise an AttributeError\n return []\n\n\ndef run_application_until_done():\n\n had_gui_thread = stop_gui_thread()\n\n ROOT.gApplication._threaded = True\n ROOT.gApplication.Run(True)\n\n if had_gui_thread:\n start_new_gui_thread()\n\n\ndef dispatcher(f):\n disp = ROOT.TPyDispatcher(f)\n keepalive(disp, f)\n return disp\n\n\ndef wait_for_zero_canvases(middle_mouse_close=False):\n \"\"\"\n Wait for all canvases to be closed, or CTRL-c.\n\n If `middle_mouse_close`, middle click will shut the canvas.\n\n incpy.ignore\n \"\"\"\n if not __ACTIVE:\n wait_failover(wait_for_zero_canvases)\n return\n\n @dispatcher\n def count_canvases():\n \"\"\"\n Count the number of active canvases and finish gApplication.Run()\n if there are none remaining.\n\n incpy.ignore\n \"\"\"\n if not get_visible_canvases():\n try:\n ROOT.gSystem.ExitLoop()\n except AttributeError:\n # We might be exiting and ROOT.gROOT will raise an AttributeError\n pass\n\n @dispatcher\n def exit_application_loop():\n \"\"\"\n Signal handler for CTRL-c to cause gApplication.Run() to finish.\n\n incpy.ignore\n \"\"\"\n ROOT.gSystem.ExitLoop()\n\n # Handle CTRL-c\n sh = ROOT.TSignalHandler(ROOT.kSigInterrupt, True)\n sh.Add()\n sh.Connect(\"Notified()\", \"TPyDispatcher\",\n exit_application_loop, \"Dispatch()\")\n\n visible_canvases = get_visible_canvases()\n\n for canvas in visible_canvases:\n log.debug(\"waiting for canvas {0} to close\".format(canvas.name))\n canvas.Update()\n\n if middle_mouse_close:\n attach_event_handler(canvas)\n\n if not getattr(canvas, \"_py_close_dispatcher_attached\", False):\n # Attach a handler only once to each canvas\n canvas._py_close_dispatcher_attached = True\n canvas.Connect(\"Closed()\", \"TPyDispatcher\",\n count_canvases, \"Dispatch()\")\n keepalive(canvas, count_canvases)\n\n if visible_canvases and not ROOT.gROOT.IsBatch():\n run_application_until_done()\n\n # Disconnect from canvases\n for canvas in visible_canvases:\n if getattr(canvas, \"_py_close_dispatcher_attached\", False):\n canvas._py_close_dispatcher_attached = False\n canvas.Disconnect(\"Closed()\", count_canvases, \"Dispatch()\")\n\nwait = wait_for_zero_canvases\n\n\ndef wait_for_frame(frame):\n \"\"\"\n wait until a TGMainFrame is closed or ctrl-c\n \"\"\"\n if not frame:\n # It's already closed or maybe we're in batch mode\n return\n\n @dispatcher\n def close():\n ROOT.gSystem.ExitLoop()\n\n if not getattr(frame, \"_py_close_dispatcher_attached\", False):\n frame._py_close_dispatcher_attached = True\n frame.Connect(\"CloseWindow()\", \"TPyDispatcher\", close, \"Dispatch()\")\n\n @dispatcher\n def exit_application_loop():\n \"\"\"\n Signal handler for CTRL-c to cause gApplication.Run() to finish.\n\n incpy.ignore\n \"\"\"\n ROOT.gSystem.ExitLoop()\n\n # Handle CTRL-c\n sh = ROOT.TSignalHandler(ROOT.kSigInterrupt, True)\n sh.Add()\n sh.Connect(\"Notified()\", \"TPyDispatcher\",\n exit_application_loop, \"Dispatch()\")\n\n if not ROOT.gROOT.IsBatch():\n run_application_until_done()\n # Need to disconnect to prevent close handler from running when python\n # teardown has already commenced.\n frame.Disconnect(\"CloseWindow()\", close, \"Dispatch()\")\n\n\ndef wait_for_browser_close(b):\n \"\"\"\n Can be used to wait until a TBrowser is closed\n \"\"\"\n if b:\n if not __ACTIVE:\n wait_failover(wait_for_browser_close)\n return\n wait_for_frame(b.GetBrowserImp().GetMainFrame())\n\n\ndef prevent_close_with_canvases():\n \"\"\"\n Register a handler which prevents python from exiting until\n all canvases are closed\n \"\"\"\n register(wait_for_zero_canvases)\n",
"path": "rootpy/interactive/rootwait.py"
}
] | [
{
"content": "# Copyright 2012 the rootpy developers\n# distributed under the terms of the GNU General Public License\n\"\"\"\nThe functions in this module provide a way of pausing code execution until\ncanvases are closed. This can be useful when testing code and you don't want to\nkeep the objects alive outside of your function.\n\nThe wait function can be called repeatedly to pause multiple times.\n\nwait_for_zero_canvases()\n Keeps root alive until CTRL-c is pressed or all canvases are closed\n\nwait_for_zero_canvases(middle_mouse_close=True)\n allows canvases to be closed with the middle mouse button (see below)\n\nwait is shorthand for wait_for_zero_canvases\n\nExamples\n--------\n\n from rootpy.plotting import Canvas\n from rootpy.interactive import wait\n\n c = Canvas()\n c.Update()\n wait()\n\n c2 = Canvas()\n c2.Update()\n wait(True)\n # This canvas can be killed by middle clicking on it or hitting\n # escape whilst it has focus\n\n\"\"\"\nfrom __future__ import absolute_import\n\nimport threading\nfrom atexit import register\n\nimport ROOT\n\nfrom . import log; log = log[__name__]\nfrom ..defaults import extra_initialization\nfrom ..memory.keepalive import keepalive\nfrom .canvas_events import attach_event_handler\n\n__all__ = [\n 'wait_for_zero_canvases',\n 'wait_for_browser_close',\n 'wait',\n]\n\n_processRootEvents = None\n_finishSchedule = None\n__ACTIVE = False\n\n\n@extra_initialization\ndef fetch_vars():\n global _processRootEvents, _finishSchedule, __ACTIVE\n PyGUIThread = getattr(ROOT, 'PyGUIThread', None)\n if PyGUIThread is not None:\n _processRootEvents = getattr(PyGUIThread, \"_Thread__target\", None)\n _finishSchedule = getattr(PyGUIThread, \"finishSchedule\", None)\n if _processRootEvents is None:\n log.warning(\n \"unable to access ROOT's GUI thread either because \"\n \"PyROOT's finalSetup() was called while in batch mode \"\n \"or because PyROOT is using the new PyOS_InputHook \"\n \"based mechanism that is not yet supported in rootpy \"\n \"(PyConfig.StartGuiThread == 'inputhook' or \"\n \"gSystem.InheritsFrom('TMacOSXSystem')). wait() etc. will \"\n \"instead call raw_input() and wait for [Enter]\")\n else:\n __ACTIVE = True\n\n\ndef wait_failover(caller):\n if not ROOT.gROOT.IsBatch():\n log.warning(\n \"{0} is failing over to raw_input()\".format(caller.__name__))\n raw_input(\"press [Enter] to continue\")\n\n\ndef start_new_gui_thread():\n \"\"\"\n Attempt to start a new GUI thread, if possible.\n\n It is only possible to start one if there was one running on module import.\n \"\"\"\n PyGUIThread = getattr(ROOT, 'PyGUIThread', None)\n\n if PyGUIThread is not None:\n assert not PyGUIThread.isAlive(), \"GUI thread already running!\"\n\n assert _processRootEvents, (\n \"GUI thread wasn't started when rootwait was imported, \"\n \"so it can't be restarted\")\n\n ROOT.keeppolling = 1\n ROOT.PyGUIThread = threading.Thread(\n None, _processRootEvents, None, (ROOT,))\n\n ROOT.PyGUIThread.finishSchedule = _finishSchedule\n ROOT.PyGUIThread.setDaemon(1)\n ROOT.PyGUIThread.start()\n log.debug(\"successfully started a new GUI thread\")\n\n\ndef stop_gui_thread():\n \"\"\"\n Try to stop the GUI thread. If it was running returns True,\n otherwise False.\n \"\"\"\n PyGUIThread = getattr(ROOT, 'PyGUIThread', None)\n\n if PyGUIThread is None or not PyGUIThread.isAlive():\n log.debug(\"no existing GUI thread is runnng\")\n return False\n\n ROOT.keeppolling = 0\n try:\n PyGUIThread.finishSchedule()\n except AttributeError:\n log.debug(\"unable to call finishSchedule() on PyGUIThread\")\n pass\n PyGUIThread.join()\n log.debug(\"successfully stopped the existing GUI thread\")\n return True\n\n\ndef get_visible_canvases():\n \"\"\"\n Return a list of active GUI canvases\n (as opposed to invisible Batch canvases)\n \"\"\"\n try:\n return [c for c in ROOT.gROOT.GetListOfCanvases() if not c.IsBatch()]\n except AttributeError:\n # We might be exiting and ROOT.gROOT will raise an AttributeError\n return []\n\n\ndef run_application_until_done():\n\n had_gui_thread = stop_gui_thread()\n\n ROOT.gApplication._threaded = True\n ROOT.gApplication.Run(True)\n\n if had_gui_thread:\n start_new_gui_thread()\n\n\ndef dispatcher(f):\n disp = ROOT.TPyDispatcher(f)\n keepalive(disp, f)\n return disp\n\n\ndef wait_for_zero_canvases(middle_mouse_close=False):\n \"\"\"\n Wait for all canvases to be closed, or CTRL-c.\n\n If `middle_mouse_close`, middle click will shut the canvas.\n\n incpy.ignore\n \"\"\"\n if not __ACTIVE:\n wait_failover(wait_for_zero_canvases)\n return\n\n @dispatcher\n def count_canvases():\n \"\"\"\n Count the number of active canvases and finish gApplication.Run()\n if there are none remaining.\n\n incpy.ignore\n \"\"\"\n if not get_visible_canvases():\n try:\n ROOT.gSystem.ExitLoop()\n except AttributeError:\n # We might be exiting and ROOT.gROOT will raise an AttributeError\n pass\n\n @dispatcher\n def exit_application_loop():\n \"\"\"\n Signal handler for CTRL-c to cause gApplication.Run() to finish.\n\n incpy.ignore\n \"\"\"\n ROOT.gSystem.ExitLoop()\n\n # Handle CTRL-c\n sh = ROOT.TSignalHandler(ROOT.kSigInterrupt, True)\n sh.Add()\n sh.Connect(\"Notified()\", \"TPyDispatcher\",\n exit_application_loop, \"Dispatch()\")\n\n visible_canvases = get_visible_canvases()\n\n for canvas in visible_canvases:\n log.debug(\"waiting for canvas {0} to close\".format(canvas.GetName()))\n canvas.Update()\n\n if middle_mouse_close:\n attach_event_handler(canvas)\n\n if not getattr(canvas, \"_py_close_dispatcher_attached\", False):\n # Attach a handler only once to each canvas\n canvas._py_close_dispatcher_attached = True\n canvas.Connect(\"Closed()\", \"TPyDispatcher\",\n count_canvases, \"Dispatch()\")\n keepalive(canvas, count_canvases)\n\n if visible_canvases and not ROOT.gROOT.IsBatch():\n run_application_until_done()\n\n # Disconnect from canvases\n for canvas in visible_canvases:\n if getattr(canvas, \"_py_close_dispatcher_attached\", False):\n canvas._py_close_dispatcher_attached = False\n canvas.Disconnect(\"Closed()\", count_canvases, \"Dispatch()\")\n\nwait = wait_for_zero_canvases\n\n\ndef wait_for_frame(frame):\n \"\"\"\n wait until a TGMainFrame is closed or ctrl-c\n \"\"\"\n if not frame:\n # It's already closed or maybe we're in batch mode\n return\n\n @dispatcher\n def close():\n ROOT.gSystem.ExitLoop()\n\n if not getattr(frame, \"_py_close_dispatcher_attached\", False):\n frame._py_close_dispatcher_attached = True\n frame.Connect(\"CloseWindow()\", \"TPyDispatcher\", close, \"Dispatch()\")\n\n @dispatcher\n def exit_application_loop():\n \"\"\"\n Signal handler for CTRL-c to cause gApplication.Run() to finish.\n\n incpy.ignore\n \"\"\"\n ROOT.gSystem.ExitLoop()\n\n # Handle CTRL-c\n sh = ROOT.TSignalHandler(ROOT.kSigInterrupt, True)\n sh.Add()\n sh.Connect(\"Notified()\", \"TPyDispatcher\",\n exit_application_loop, \"Dispatch()\")\n\n if not ROOT.gROOT.IsBatch():\n run_application_until_done()\n # Need to disconnect to prevent close handler from running when python\n # teardown has already commenced.\n frame.Disconnect(\"CloseWindow()\", close, \"Dispatch()\")\n\n\ndef wait_for_browser_close(b):\n \"\"\"\n Can be used to wait until a TBrowser is closed\n \"\"\"\n if b:\n if not __ACTIVE:\n wait_failover(wait_for_browser_close)\n return\n wait_for_frame(b.GetBrowserImp().GetMainFrame())\n\n\ndef prevent_close_with_canvases():\n \"\"\"\n Register a handler which prevents python from exiting until\n all canvases are closed\n \"\"\"\n register(wait_for_zero_canvases)\n",
"path": "rootpy/interactive/rootwait.py"
}
] | diff --git a/rootpy/interactive/rootwait.py b/rootpy/interactive/rootwait.py
index c0a87afe..754687fa 100644
--- a/rootpy/interactive/rootwait.py
+++ b/rootpy/interactive/rootwait.py
@@ -203,7 +203,7 @@ def exit_application_loop():
visible_canvases = get_visible_canvases()
for canvas in visible_canvases:
- log.debug("waiting for canvas {0} to close".format(canvas.name))
+ log.debug("waiting for canvas {0} to close".format(canvas.GetName()))
canvas.Update()
if middle_mouse_close:
|
pydantic__pydantic-1253 | Broken loading list of tuples.
# Bug
Output of `python -c "import pydantic.utils; print(pydantic.utils.version_info())"`:
```
pydantic version: 1.4
pydantic compiled: True
install path: /home/**removed**/venv/lib/python3.7/site-packages/pydantic
python version: 3.7.3 (default, Apr 3 2019, 19:16:38) [GCC 8.0.1 20180414 (experimental) [trunk revision 259383]]
platform: Linux-4.19.86-041986-generic-x86_64-with-Ubuntu-18.04-bionic
optional deps. installed: []
```
There is pretty strange behaviour on loading nested list of tuples. I firstly think that this might be intended, but then found out that parse_obj and parse_obj_as give different execution flow which frustrates me.
```py
from pydantic import BaseModel
class OperationData(BaseModel):
id: str
class Operation(BaseModel):
__root__: Tuple[int, OperationData]
data = [0, {'id': '1.11.0'}]
# this one works as expected
print(Operation.parse_obj(data))
# printed: __root__=(0, OperationData(id='1.11.0'))
# However, this one doesn't
print(parse_obj_as(Operation, data))
# Traceback (most recent call last):
# File "/home/**removed**/protocol/base.py", line 238, in <module>
# print(parse_obj_as(Operation, data))
# File "pydantic/tools.py", line 35, in pydantic.tools.parse_obj_as
# File "pydantic/main.py", line 283, in pydantic.main.BaseModel.__init__
# pydantic.error_wrappers.ValidationError: 1 validation error for ParsingModel[Operation]
#__root__
# value is not a valid dict (type=type_error.dict)
# Which is not a big problem. The problem is that I have nested class
class OperationsBatch(BaseModel):
batch_desc: str
operations: List[Operation]
# and it produces same exception on
print(OperationsBatch.parse_obj({'batch_desc': '123', 'operations': [data, data]}))
# Traceback (most recent call last):
# File "/home/**removed**/protocol/base.py", line 243, in <module>
# OperationsBatch.parse_obj({'batch_desc': '123', 'operations': [data, data]})
# File "pydantic/main.py", line 402, in pydantic.main.BaseModel.parse_obj
# File "pydantic/main.py", line 283, in pydantic.main.BaseModel.__init__
# pydantic.error_wrappers.ValidationError: 2 validation errors for OperationsBatch
# operations -> 0
# value is not a valid dict (type=type_error.dict)
# operations -> 1
# value is not a valid dict (type=type_error.dict)
```
It doesn't look like a right behaviour.
| [
{
"content": "import json\nimport sys\nimport warnings\nfrom abc import ABCMeta\nfrom copy import deepcopy\nfrom enum import Enum\nfrom functools import partial\nfrom pathlib import Path\nfrom types import FunctionType\nfrom typing import (\n TYPE_CHECKING,\n AbstractSet,\n Any,\n Callable,\n Dict,\n List,\n Optional,\n Tuple,\n Type,\n TypeVar,\n Union,\n cast,\n no_type_check,\n)\n\nfrom .class_validators import ROOT_KEY, ValidatorGroup, extract_root_validators, extract_validators, inherit_validators\nfrom .error_wrappers import ErrorWrapper, ValidationError\nfrom .errors import ConfigError, DictError, ExtraError, MissingError\nfrom .fields import SHAPE_MAPPING, ModelField, Undefined\nfrom .json import custom_pydantic_encoder, pydantic_encoder\nfrom .parse import Protocol, load_file, load_str_bytes\nfrom .schema import model_schema\nfrom .types import PyObject, StrBytes\nfrom .typing import AnyCallable, AnyType, ForwardRef, is_classvar, resolve_annotations, update_field_forward_refs\nfrom .utils import (\n GetterDict,\n Representation,\n ValueItems,\n generate_model_signature,\n lenient_issubclass,\n sequence_like,\n validate_field_name,\n)\n\nif TYPE_CHECKING:\n from inspect import Signature\n from .class_validators import ValidatorListDict\n from .types import ModelOrDc\n from .typing import CallableGenerator, TupleGenerator, DictStrAny, DictAny, SetStr\n from .typing import AbstractSetIntStr, DictIntStrAny, ReprArgs # noqa: F401\n\n ConfigType = Type['BaseConfig']\n Model = TypeVar('Model', bound='BaseModel')\n\ntry:\n import cython # type: ignore\nexcept ImportError:\n compiled: bool = False\nelse: # pragma: no cover\n try:\n compiled = cython.compiled\n except AttributeError:\n compiled = False\n\n__all__ = 'BaseConfig', 'BaseModel', 'Extra', 'compiled', 'create_model', 'validate_model'\n\n\nclass Extra(str, Enum):\n allow = 'allow'\n ignore = 'ignore'\n forbid = 'forbid'\n\n\nclass BaseConfig:\n title = None\n anystr_strip_whitespace = False\n min_anystr_length = None\n max_anystr_length = None\n validate_all = False\n extra = Extra.ignore\n allow_mutation = True\n allow_population_by_field_name = False\n use_enum_values = False\n fields: Dict[str, Union[str, Dict[str, str]]] = {}\n validate_assignment = False\n error_msg_templates: Dict[str, str] = {}\n arbitrary_types_allowed = False\n orm_mode: bool = False\n getter_dict: Type[GetterDict] = GetterDict\n alias_generator: Optional[Callable[[str], str]] = None\n keep_untouched: Tuple[type, ...] = ()\n schema_extra: Union[Dict[str, Any], Callable[[Dict[str, Any]], None]] = {}\n json_loads: Callable[[str], Any] = json.loads\n json_dumps: Callable[..., str] = json.dumps\n json_encoders: Dict[AnyType, AnyCallable] = {}\n\n @classmethod\n def get_field_info(cls, name: str) -> Dict[str, Any]:\n fields_value = cls.fields.get(name)\n\n if isinstance(fields_value, str):\n field_info: Dict[str, Any] = {'alias': fields_value}\n elif isinstance(fields_value, dict):\n field_info = fields_value\n else:\n field_info = {}\n\n if 'alias' in field_info:\n field_info.setdefault('alias_priority', 2)\n\n if field_info.get('alias_priority', 0) <= 1 and cls.alias_generator:\n alias = cls.alias_generator(name)\n if not isinstance(alias, str):\n raise TypeError(f'Config.alias_generator must return str, not {type(alias)}')\n field_info.update(alias=alias, alias_priority=1)\n return field_info\n\n @classmethod\n def prepare_field(cls, field: 'ModelField') -> None:\n \"\"\"\n Optional hook to check or modify fields during model creation.\n \"\"\"\n pass\n\n\ndef inherit_config(self_config: 'ConfigType', parent_config: 'ConfigType') -> 'ConfigType':\n if not self_config:\n base_classes = (parent_config,)\n elif self_config == parent_config:\n base_classes = (self_config,)\n else:\n base_classes = self_config, parent_config # type: ignore\n return type('Config', base_classes, {})\n\n\nEXTRA_LINK = 'https://pydantic-docs.helpmanual.io/usage/model_config/'\n\n\ndef prepare_config(config: Type[BaseConfig], cls_name: str) -> None:\n if not isinstance(config.extra, Extra):\n try:\n config.extra = Extra(config.extra)\n except ValueError:\n raise ValueError(f'\"{cls_name}\": {config.extra} is not a valid value for \"extra\"')\n\n if hasattr(config, 'allow_population_by_alias'):\n warnings.warn(\n f'{cls_name}: \"allow_population_by_alias\" is deprecated and replaced by \"allow_population_by_field_name\"',\n DeprecationWarning,\n )\n config.allow_population_by_field_name = config.allow_population_by_alias # type: ignore\n\n if hasattr(config, 'case_insensitive') and any('BaseSettings.Config' in c.__qualname__ for c in config.__mro__):\n warnings.warn(\n f'{cls_name}: \"case_insensitive\" is deprecated on BaseSettings config and replaced by '\n f'\"case_sensitive\" (default False)',\n DeprecationWarning,\n )\n config.case_sensitive = not config.case_insensitive # type: ignore\n\n\ndef is_valid_field(name: str) -> bool:\n if not name.startswith('_'):\n return True\n return ROOT_KEY == name\n\n\ndef validate_custom_root_type(fields: Dict[str, ModelField]) -> None:\n if len(fields) > 1:\n raise ValueError('__root__ cannot be mixed with other fields')\n\n\nUNTOUCHED_TYPES = FunctionType, property, type, classmethod, staticmethod\n\n\nclass ModelMetaclass(ABCMeta):\n @no_type_check # noqa C901\n def __new__(mcs, name, bases, namespace, **kwargs): # noqa C901\n fields: Dict[str, ModelField] = {}\n config = BaseConfig\n validators: 'ValidatorListDict' = {}\n fields_defaults: Dict[str, Any] = {}\n\n pre_root_validators, post_root_validators = [], []\n for base in reversed(bases):\n if issubclass(base, BaseModel) and base != BaseModel:\n fields.update(deepcopy(base.__fields__))\n config = inherit_config(base.__config__, config)\n validators = inherit_validators(base.__validators__, validators)\n pre_root_validators += base.__pre_root_validators__\n post_root_validators += base.__post_root_validators__\n\n config = inherit_config(namespace.get('Config'), config)\n validators = inherit_validators(extract_validators(namespace), validators)\n vg = ValidatorGroup(validators)\n\n for f in fields.values():\n if not f.required:\n fields_defaults[f.name] = f.default\n\n f.set_config(config)\n extra_validators = vg.get_validators(f.name)\n if extra_validators:\n f.class_validators.update(extra_validators)\n # re-run prepare to add extra validators\n f.populate_validators()\n\n prepare_config(config, name)\n\n class_vars = set()\n if (namespace.get('__module__'), namespace.get('__qualname__')) != ('pydantic.main', 'BaseModel'):\n annotations = resolve_annotations(namespace.get('__annotations__', {}), namespace.get('__module__', None))\n untouched_types = UNTOUCHED_TYPES + config.keep_untouched\n # annotation only fields need to come first in fields\n for ann_name, ann_type in annotations.items():\n if is_classvar(ann_type):\n class_vars.add(ann_name)\n elif is_valid_field(ann_name):\n validate_field_name(bases, ann_name)\n value = namespace.get(ann_name, Undefined)\n if (\n isinstance(value, untouched_types)\n and ann_type != PyObject\n and not lenient_issubclass(getattr(ann_type, '__origin__', None), Type)\n ):\n continue\n fields[ann_name] = inferred = ModelField.infer(\n name=ann_name,\n value=value,\n annotation=ann_type,\n class_validators=vg.get_validators(ann_name),\n config=config,\n )\n if not inferred.required:\n fields_defaults[ann_name] = inferred.default\n\n for var_name, value in namespace.items():\n if (\n var_name not in annotations\n and is_valid_field(var_name)\n and not isinstance(value, untouched_types)\n and var_name not in class_vars\n ):\n validate_field_name(bases, var_name)\n inferred = ModelField.infer(\n name=var_name,\n value=value,\n annotation=annotations.get(var_name),\n class_validators=vg.get_validators(var_name),\n config=config,\n )\n if var_name in fields and inferred.type_ != fields[var_name].type_:\n raise TypeError(\n f'The type of {name}.{var_name} differs from the new default value; '\n f'if you wish to change the type of this field, please use a type annotation'\n )\n fields[var_name] = inferred\n if not inferred.required:\n fields_defaults[var_name] = inferred.default\n\n _custom_root_type = ROOT_KEY in fields\n if _custom_root_type:\n validate_custom_root_type(fields)\n vg.check_for_unused()\n if config.json_encoders:\n json_encoder = partial(custom_pydantic_encoder, config.json_encoders)\n else:\n json_encoder = pydantic_encoder\n pre_rv_new, post_rv_new = extract_root_validators(namespace)\n new_namespace = {\n '__config__': config,\n '__fields__': fields,\n '__field_defaults__': fields_defaults,\n '__validators__': vg.validators,\n '__pre_root_validators__': pre_root_validators + pre_rv_new,\n '__post_root_validators__': post_root_validators + post_rv_new,\n '__schema_cache__': {},\n '__json_encoder__': staticmethod(json_encoder),\n '__custom_root_type__': _custom_root_type,\n **{n: v for n, v in namespace.items() if n not in fields},\n }\n\n cls = super().__new__(mcs, name, bases, new_namespace, **kwargs)\n cls.__signature__ = generate_model_signature(cls.__init__, fields, config)\n return cls\n\n\nclass BaseModel(metaclass=ModelMetaclass):\n if TYPE_CHECKING:\n # populated by the metaclass, defined here to help IDEs only\n __fields__: Dict[str, ModelField] = {}\n __field_defaults__: Dict[str, Any] = {}\n __validators__: Dict[str, AnyCallable] = {}\n __pre_root_validators__: List[AnyCallable]\n __post_root_validators__: List[Tuple[bool, AnyCallable]]\n __config__: Type[BaseConfig] = BaseConfig\n __root__: Any = None\n __json_encoder__: Callable[[Any], Any] = lambda x: x\n __schema_cache__: 'DictAny' = {}\n __custom_root_type__: bool = False\n __signature__: 'Signature'\n\n Config = BaseConfig\n __slots__ = ('__dict__', '__fields_set__')\n # equivalent of inheriting from Representation\n __repr_name__ = Representation.__repr_name__\n __repr_str__ = Representation.__repr_str__\n __pretty__ = Representation.__pretty__\n __str__ = Representation.__str__\n __repr__ = Representation.__repr__\n\n def __init__(__pydantic_self__, **data: Any) -> None:\n \"\"\"\n Create a new model by parsing and validating input data from keyword arguments.\n\n Raises ValidationError if the input data cannot be parsed to form a valid model.\n \"\"\"\n # Uses something other than `self` the first arg to allow \"self\" as a settable attribute\n if TYPE_CHECKING:\n __pydantic_self__.__dict__: Dict[str, Any] = {}\n __pydantic_self__.__fields_set__: 'SetStr' = set()\n values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data)\n if validation_error:\n raise validation_error\n object.__setattr__(__pydantic_self__, '__dict__', values)\n object.__setattr__(__pydantic_self__, '__fields_set__', fields_set)\n\n @no_type_check\n def __setattr__(self, name, value):\n if self.__config__.extra is not Extra.allow and name not in self.__fields__:\n raise ValueError(f'\"{self.__class__.__name__}\" object has no field \"{name}\"')\n elif not self.__config__.allow_mutation:\n raise TypeError(f'\"{self.__class__.__name__}\" is immutable and does not support item assignment')\n elif self.__config__.validate_assignment:\n known_field = self.__fields__.get(name, None)\n if known_field:\n value, error_ = known_field.validate(value, self.dict(exclude={name}), loc=name, cls=self.__class__)\n if error_:\n raise ValidationError([error_], type(self))\n self.__dict__[name] = value\n self.__fields_set__.add(name)\n\n def __getstate__(self) -> 'DictAny':\n return {'__dict__': self.__dict__, '__fields_set__': self.__fields_set__}\n\n def __setstate__(self, state: 'DictAny') -> None:\n object.__setattr__(self, '__dict__', state['__dict__'])\n object.__setattr__(self, '__fields_set__', state['__fields_set__'])\n\n def dict(\n self,\n *,\n include: Union['AbstractSetIntStr', 'DictIntStrAny'] = None,\n exclude: Union['AbstractSetIntStr', 'DictIntStrAny'] = None,\n by_alias: bool = False,\n skip_defaults: bool = None,\n exclude_unset: bool = False,\n exclude_defaults: bool = False,\n exclude_none: bool = False,\n ) -> 'DictStrAny':\n \"\"\"\n Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.\n\n \"\"\"\n if skip_defaults is not None:\n warnings.warn(\n f'{self.__class__.__name__}.dict(): \"skip_defaults\" is deprecated and replaced by \"exclude_unset\"',\n DeprecationWarning,\n )\n exclude_unset = skip_defaults\n\n return dict(\n self._iter(\n to_dict=True,\n by_alias=by_alias,\n include=include,\n exclude=exclude,\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n exclude_none=exclude_none,\n )\n )\n\n def json(\n self,\n *,\n include: Union['AbstractSetIntStr', 'DictIntStrAny'] = None,\n exclude: Union['AbstractSetIntStr', 'DictIntStrAny'] = None,\n by_alias: bool = False,\n skip_defaults: bool = None,\n exclude_unset: bool = False,\n exclude_defaults: bool = False,\n exclude_none: bool = False,\n encoder: Optional[Callable[[Any], Any]] = None,\n **dumps_kwargs: Any,\n ) -> str:\n \"\"\"\n Generate a JSON representation of the model, `include` and `exclude` arguments as per `dict()`.\n\n `encoder` is an optional function to supply as `default` to json.dumps(), other arguments as per `json.dumps()`.\n \"\"\"\n if skip_defaults is not None:\n warnings.warn(\n f'{self.__class__.__name__}.json(): \"skip_defaults\" is deprecated and replaced by \"exclude_unset\"',\n DeprecationWarning,\n )\n exclude_unset = skip_defaults\n encoder = cast(Callable[[Any], Any], encoder or self.__json_encoder__)\n data = self.dict(\n include=include,\n exclude=exclude,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n exclude_none=exclude_none,\n )\n if self.__custom_root_type__:\n data = data[ROOT_KEY]\n return self.__config__.json_dumps(data, default=encoder, **dumps_kwargs)\n\n @classmethod\n def parse_obj(cls: Type['Model'], obj: Any) -> 'Model':\n if cls.__custom_root_type__ and (\n not (isinstance(obj, dict) and obj.keys() == {ROOT_KEY}) or cls.__fields__[ROOT_KEY].shape == SHAPE_MAPPING\n ):\n obj = {ROOT_KEY: obj}\n elif not isinstance(obj, dict):\n try:\n obj = dict(obj)\n except (TypeError, ValueError) as e:\n exc = TypeError(f'{cls.__name__} expected dict not {type(obj).__name__}')\n raise ValidationError([ErrorWrapper(exc, loc=ROOT_KEY)], cls) from e\n return cls(**obj)\n\n @classmethod\n def parse_raw(\n cls: Type['Model'],\n b: StrBytes,\n *,\n content_type: str = None,\n encoding: str = 'utf8',\n proto: Protocol = None,\n allow_pickle: bool = False,\n ) -> 'Model':\n try:\n obj = load_str_bytes(\n b,\n proto=proto,\n content_type=content_type,\n encoding=encoding,\n allow_pickle=allow_pickle,\n json_loads=cls.__config__.json_loads,\n )\n except (ValueError, TypeError, UnicodeDecodeError) as e:\n raise ValidationError([ErrorWrapper(e, loc=ROOT_KEY)], cls)\n return cls.parse_obj(obj)\n\n @classmethod\n def parse_file(\n cls: Type['Model'],\n path: Union[str, Path],\n *,\n content_type: str = None,\n encoding: str = 'utf8',\n proto: Protocol = None,\n allow_pickle: bool = False,\n ) -> 'Model':\n obj = load_file(\n path,\n proto=proto,\n content_type=content_type,\n encoding=encoding,\n allow_pickle=allow_pickle,\n json_loads=cls.__config__.json_loads,\n )\n return cls.parse_obj(obj)\n\n @classmethod\n def from_orm(cls: Type['Model'], obj: Any) -> 'Model':\n if not cls.__config__.orm_mode:\n raise ConfigError('You must have the config attribute orm_mode=True to use from_orm')\n obj = cls._decompose_class(obj)\n m = cls.__new__(cls)\n values, fields_set, validation_error = validate_model(cls, obj)\n if validation_error:\n raise validation_error\n object.__setattr__(m, '__dict__', values)\n object.__setattr__(m, '__fields_set__', fields_set)\n return m\n\n @classmethod\n def construct(cls: Type['Model'], _fields_set: Optional['SetStr'] = None, **values: Any) -> 'Model':\n \"\"\"\n Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.\n Default values are respected, but no other validation is performed.\n \"\"\"\n m = cls.__new__(cls)\n object.__setattr__(m, '__dict__', {**deepcopy(cls.__field_defaults__), **values})\n if _fields_set is None:\n _fields_set = set(values.keys())\n object.__setattr__(m, '__fields_set__', _fields_set)\n return m\n\n def copy(\n self: 'Model',\n *,\n include: Union['AbstractSetIntStr', 'DictIntStrAny'] = None,\n exclude: Union['AbstractSetIntStr', 'DictIntStrAny'] = None,\n update: 'DictStrAny' = None,\n deep: bool = False,\n ) -> 'Model':\n \"\"\"\n Duplicate a model, optionally choose which fields to include, exclude and change.\n\n :param include: fields to include in new model\n :param exclude: fields to exclude from new model, as with values this takes precedence over include\n :param update: values to change/add in the new model. Note: the data is not validated before creating\n the new model: you should trust this data\n :param deep: set to `True` to make a deep copy of the model\n :return: new model instance\n \"\"\"\n\n v = dict(\n self._iter(to_dict=False, by_alias=False, include=include, exclude=exclude, exclude_unset=False),\n **(update or {}),\n )\n\n if deep:\n v = deepcopy(v)\n\n cls = self.__class__\n m = cls.__new__(cls)\n object.__setattr__(m, '__dict__', v)\n object.__setattr__(m, '__fields_set__', self.__fields_set__.copy())\n return m\n\n @classmethod\n def schema(cls, by_alias: bool = True) -> 'DictStrAny':\n cached = cls.__schema_cache__.get(by_alias)\n if cached is not None:\n return cached\n s = model_schema(cls, by_alias=by_alias)\n cls.__schema_cache__[by_alias] = s\n return s\n\n @classmethod\n def schema_json(cls, *, by_alias: bool = True, **dumps_kwargs: Any) -> str:\n from .json import pydantic_encoder\n\n return cls.__config__.json_dumps(cls.schema(by_alias=by_alias), default=pydantic_encoder, **dumps_kwargs)\n\n @classmethod\n def __get_validators__(cls) -> 'CallableGenerator':\n yield cls.validate\n\n @classmethod\n def validate(cls: Type['Model'], value: Any) -> 'Model':\n if isinstance(value, dict):\n return cls(**value)\n elif isinstance(value, cls):\n return value.copy()\n elif cls.__config__.orm_mode:\n return cls.from_orm(value)\n else:\n try:\n value_as_dict = dict(value)\n except (TypeError, ValueError) as e:\n raise DictError() from e\n return cls(**value_as_dict)\n\n @classmethod\n def _decompose_class(cls: Type['Model'], obj: Any) -> GetterDict:\n return cls.__config__.getter_dict(obj)\n\n @classmethod\n @no_type_check\n def _get_value(\n cls,\n v: Any,\n to_dict: bool,\n by_alias: bool,\n include: Optional[Union['AbstractSetIntStr', 'DictIntStrAny']],\n exclude: Optional[Union['AbstractSetIntStr', 'DictIntStrAny']],\n exclude_unset: bool,\n exclude_defaults: bool,\n exclude_none: bool,\n ) -> Any:\n\n if isinstance(v, BaseModel):\n if to_dict:\n return v.dict(\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n include=include,\n exclude=exclude,\n exclude_none=exclude_none,\n )\n else:\n return v.copy(include=include, exclude=exclude)\n\n value_exclude = ValueItems(v, exclude) if exclude else None\n value_include = ValueItems(v, include) if include else None\n\n if isinstance(v, dict):\n return {\n k_: cls._get_value(\n v_,\n to_dict=to_dict,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n include=value_include and value_include.for_element(k_),\n exclude=value_exclude and value_exclude.for_element(k_),\n exclude_none=exclude_none,\n )\n for k_, v_ in v.items()\n if (not value_exclude or not value_exclude.is_excluded(k_))\n and (not value_include or value_include.is_included(k_))\n }\n\n elif sequence_like(v):\n return type(v)(\n cls._get_value(\n v_,\n to_dict=to_dict,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n include=value_include and value_include.for_element(i),\n exclude=value_exclude and value_exclude.for_element(i),\n exclude_none=exclude_none,\n )\n for i, v_ in enumerate(v)\n if (not value_exclude or not value_exclude.is_excluded(i))\n and (not value_include or value_include.is_included(i))\n )\n\n else:\n return v\n\n @classmethod\n def update_forward_refs(cls, **localns: Any) -> None:\n \"\"\"\n Try to update ForwardRefs on fields based on this Model, globalns and localns.\n \"\"\"\n globalns = sys.modules[cls.__module__].__dict__\n globalns.setdefault(cls.__name__, cls)\n for f in cls.__fields__.values():\n update_field_forward_refs(f, globalns=globalns, localns=localns)\n\n def __iter__(self) -> 'TupleGenerator':\n \"\"\"\n so `dict(model)` works\n \"\"\"\n yield from self.__dict__.items()\n\n def _iter(\n self,\n to_dict: bool = False,\n by_alias: bool = False,\n include: Union['AbstractSetIntStr', 'DictIntStrAny'] = None,\n exclude: Union['AbstractSetIntStr', 'DictIntStrAny'] = None,\n exclude_unset: bool = False,\n exclude_defaults: bool = False,\n exclude_none: bool = False,\n ) -> 'TupleGenerator':\n\n allowed_keys = self._calculate_keys(include=include, exclude=exclude, exclude_unset=exclude_unset)\n if allowed_keys is None and not (to_dict or by_alias or exclude_unset or exclude_defaults or exclude_none):\n # huge boost for plain _iter()\n yield from self.__dict__.items()\n return\n\n value_exclude = ValueItems(self, exclude) if exclude else None\n value_include = ValueItems(self, include) if include else None\n\n for k, v in self.__dict__.items():\n if (\n (allowed_keys is not None and k not in allowed_keys)\n or (exclude_none and v is None)\n or (exclude_defaults and self.__field_defaults__.get(k, _missing) == v)\n ):\n continue\n if by_alias and k in self.__fields__:\n k = self.__fields__[k].alias\n if to_dict or value_include or value_exclude:\n v = self._get_value(\n v,\n to_dict=to_dict,\n by_alias=by_alias,\n include=value_include and value_include.for_element(k),\n exclude=value_exclude and value_exclude.for_element(k),\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n exclude_none=exclude_none,\n )\n yield k, v\n\n def _calculate_keys(\n self,\n include: Optional[Union['AbstractSetIntStr', 'DictIntStrAny']],\n exclude: Optional[Union['AbstractSetIntStr', 'DictIntStrAny']],\n exclude_unset: bool,\n update: Optional['DictStrAny'] = None,\n ) -> Optional[AbstractSet[str]]:\n if include is None and exclude is None and exclude_unset is False:\n return None\n\n keys: AbstractSet[str]\n if exclude_unset:\n keys = self.__fields_set__.copy()\n else:\n keys = self.__dict__.keys()\n\n if include is not None:\n if isinstance(include, dict):\n keys &= include.keys()\n else:\n keys &= include\n\n if update:\n keys -= update.keys()\n\n if exclude:\n if isinstance(exclude, dict):\n keys -= {k for k, v in exclude.items() if v is ...}\n else:\n keys -= exclude\n\n return keys\n\n def __eq__(self, other: Any) -> bool:\n if isinstance(other, BaseModel):\n return self.dict() == other.dict()\n else:\n return self.dict() == other\n\n def __repr_args__(self) -> 'ReprArgs':\n return self.__dict__.items() # type: ignore\n\n @property\n def fields(self) -> Dict[str, ModelField]:\n warnings.warn('`fields` attribute is deprecated, use `__fields__` instead', DeprecationWarning)\n return self.__fields__\n\n def to_string(self, pretty: bool = False) -> str:\n warnings.warn('`model.to_string()` method is deprecated, use `str(model)` instead', DeprecationWarning)\n return str(self)\n\n @property\n def __values__(self) -> 'DictStrAny':\n warnings.warn('`__values__` attribute is deprecated, use `__dict__` instead', DeprecationWarning)\n return self.__dict__\n\n\ndef create_model(\n model_name: str,\n *,\n __config__: Type[BaseConfig] = None,\n __base__: Type[BaseModel] = None,\n __module__: Optional[str] = None,\n __validators__: Dict[str, classmethod] = None,\n **field_definitions: Any,\n) -> Type[BaseModel]:\n \"\"\"\n Dynamically create a model.\n :param model_name: name of the created model\n :param __config__: config class to use for the new model\n :param __base__: base class for the new model to inherit from\n :param __validators__: a dict of method names and @validator class methods\n :param **field_definitions: fields of the model (or extra fields if a base is supplied) in the format\n `<name>=(<type>, <default default>)` or `<name>=<default value> eg. `foobar=(str, ...)` or `foobar=123`\n \"\"\"\n if __base__:\n if __config__ is not None:\n raise ConfigError('to avoid confusion __config__ and __base__ cannot be used together')\n else:\n __base__ = BaseModel\n\n fields = {}\n annotations = {}\n\n for f_name, f_def in field_definitions.items():\n if not is_valid_field(f_name):\n warnings.warn(f'fields may not start with an underscore, ignoring \"{f_name}\"', RuntimeWarning)\n if isinstance(f_def, tuple):\n try:\n f_annotation, f_value = f_def\n except ValueError as e:\n raise ConfigError(\n f'field definitions should either be a tuple of (<type>, <default>) or just a '\n f'default value, unfortunately this means tuples as '\n f'default values are not allowed'\n ) from e\n else:\n f_annotation, f_value = None, f_def\n\n if f_annotation:\n annotations[f_name] = f_annotation\n fields[f_name] = f_value\n\n namespace: 'DictStrAny' = {'__annotations__': annotations, '__module__': __module__}\n if __validators__:\n namespace.update(__validators__)\n namespace.update(fields)\n if __config__:\n namespace['Config'] = inherit_config(__config__, BaseConfig)\n\n return type(model_name, (__base__,), namespace)\n\n\n_missing = object()\n\n\ndef validate_model( # noqa: C901 (ignore complexity)\n model: Type[BaseModel], input_data: 'DictStrAny', cls: 'ModelOrDc' = None\n) -> Tuple['DictStrAny', 'SetStr', Optional[ValidationError]]:\n \"\"\"\n validate data against a model.\n \"\"\"\n values = {}\n errors = []\n # input_data names, possibly alias\n names_used = set()\n # field names, never aliases\n fields_set = set()\n config = model.__config__\n check_extra = config.extra is not Extra.ignore\n cls_ = cls or model\n\n for validator in model.__pre_root_validators__:\n try:\n input_data = validator(cls_, input_data)\n except (ValueError, TypeError, AssertionError) as exc:\n return {}, set(), ValidationError([ErrorWrapper(exc, loc=ROOT_KEY)], cls_)\n\n for name, field in model.__fields__.items():\n if type(field.type_) == ForwardRef:\n raise ConfigError(\n f'field \"{field.name}\" not yet prepared so type is still a ForwardRef, '\n f'you might need to call {cls_.__name__}.update_forward_refs().'\n )\n\n value = input_data.get(field.alias, _missing)\n using_name = False\n if value is _missing and config.allow_population_by_field_name and field.alt_alias:\n value = input_data.get(field.name, _missing)\n using_name = True\n\n if value is _missing:\n if field.required:\n errors.append(ErrorWrapper(MissingError(), loc=field.alias))\n continue\n\n value = field.get_default()\n\n if not config.validate_all and not field.validate_always:\n values[name] = value\n continue\n else:\n fields_set.add(name)\n if check_extra:\n names_used.add(field.name if using_name else field.alias)\n\n v_, errors_ = field.validate(value, values, loc=field.alias, cls=cls_)\n if isinstance(errors_, ErrorWrapper):\n errors.append(errors_)\n elif isinstance(errors_, list):\n errors.extend(errors_)\n else:\n values[name] = v_\n\n if check_extra:\n if isinstance(input_data, GetterDict):\n extra = input_data.extra_keys() - names_used\n else:\n extra = input_data.keys() - names_used\n if extra:\n fields_set |= extra\n if config.extra is Extra.allow:\n for f in extra:\n values[f] = input_data[f]\n else:\n for f in sorted(extra):\n errors.append(ErrorWrapper(ExtraError(), loc=f))\n\n for skip_on_failure, validator in model.__post_root_validators__:\n if skip_on_failure and errors:\n continue\n try:\n values = validator(cls_, values)\n except (ValueError, TypeError, AssertionError) as exc:\n errors.append(ErrorWrapper(exc, loc=ROOT_KEY))\n break\n\n if errors:\n return values, fields_set, ValidationError(errors, cls_)\n else:\n return values, fields_set, None\n",
"path": "pydantic/main.py"
}
] | [
{
"content": "import json\nimport sys\nimport warnings\nfrom abc import ABCMeta\nfrom copy import deepcopy\nfrom enum import Enum\nfrom functools import partial\nfrom pathlib import Path\nfrom types import FunctionType\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Type, TypeVar, Union, cast, no_type_check\n\nfrom .class_validators import ROOT_KEY, ValidatorGroup, extract_root_validators, extract_validators, inherit_validators\nfrom .error_wrappers import ErrorWrapper, ValidationError\nfrom .errors import ConfigError, DictError, ExtraError, MissingError\nfrom .fields import SHAPE_MAPPING, ModelField, Undefined\nfrom .json import custom_pydantic_encoder, pydantic_encoder\nfrom .parse import Protocol, load_file, load_str_bytes\nfrom .schema import model_schema\nfrom .types import PyObject, StrBytes\nfrom .typing import AnyCallable, AnyType, ForwardRef, is_classvar, resolve_annotations, update_field_forward_refs\nfrom .utils import GetterDict, Representation, ValueItems, lenient_issubclass, sequence_like, validate_field_name\n\nif TYPE_CHECKING:\n from .class_validators import ValidatorListDict\n from .types import ModelOrDc\n from .typing import CallableGenerator, TupleGenerator, DictStrAny, DictAny, SetStr\n from .typing import AbstractSetIntStr, DictIntStrAny, ReprArgs # noqa: F401\n\n ConfigType = Type['BaseConfig']\n Model = TypeVar('Model', bound='BaseModel')\n\ntry:\n import cython # type: ignore\nexcept ImportError:\n compiled: bool = False\nelse: # pragma: no cover\n try:\n compiled = cython.compiled\n except AttributeError:\n compiled = False\n\n__all__ = 'BaseConfig', 'BaseModel', 'Extra', 'compiled', 'create_model', 'validate_model'\n\n\nclass Extra(str, Enum):\n allow = 'allow'\n ignore = 'ignore'\n forbid = 'forbid'\n\n\nclass BaseConfig:\n title = None\n anystr_strip_whitespace = False\n min_anystr_length = None\n max_anystr_length = None\n validate_all = False\n extra = Extra.ignore\n allow_mutation = True\n allow_population_by_field_name = False\n use_enum_values = False\n fields: Dict[str, Union[str, Dict[str, str]]] = {}\n validate_assignment = False\n error_msg_templates: Dict[str, str] = {}\n arbitrary_types_allowed = False\n orm_mode: bool = False\n getter_dict: Type[GetterDict] = GetterDict\n alias_generator: Optional[Callable[[str], str]] = None\n keep_untouched: Tuple[type, ...] = ()\n schema_extra: Union[Dict[str, Any], Callable[[Dict[str, Any]], None]] = {}\n json_loads: Callable[[str], Any] = json.loads\n json_dumps: Callable[..., str] = json.dumps\n json_encoders: Dict[AnyType, AnyCallable] = {}\n\n @classmethod\n def get_field_info(cls, name: str) -> Dict[str, Any]:\n fields_value = cls.fields.get(name)\n\n if isinstance(fields_value, str):\n field_info: Dict[str, Any] = {'alias': fields_value}\n elif isinstance(fields_value, dict):\n field_info = fields_value\n else:\n field_info = {}\n\n if 'alias' in field_info:\n field_info.setdefault('alias_priority', 2)\n\n if field_info.get('alias_priority', 0) <= 1 and cls.alias_generator:\n alias = cls.alias_generator(name)\n if not isinstance(alias, str):\n raise TypeError(f'Config.alias_generator must return str, not {type(alias)}')\n field_info.update(alias=alias, alias_priority=1)\n return field_info\n\n @classmethod\n def prepare_field(cls, field: 'ModelField') -> None:\n \"\"\"\n Optional hook to check or modify fields during model creation.\n \"\"\"\n pass\n\n\ndef inherit_config(self_config: 'ConfigType', parent_config: 'ConfigType') -> 'ConfigType':\n if not self_config:\n base_classes = (parent_config,)\n elif self_config == parent_config:\n base_classes = (self_config,)\n else:\n base_classes = self_config, parent_config # type: ignore\n return type('Config', base_classes, {})\n\n\nEXTRA_LINK = 'https://pydantic-docs.helpmanual.io/usage/model_config/'\n\n\ndef prepare_config(config: Type[BaseConfig], cls_name: str) -> None:\n if not isinstance(config.extra, Extra):\n try:\n config.extra = Extra(config.extra)\n except ValueError:\n raise ValueError(f'\"{cls_name}\": {config.extra} is not a valid value for \"extra\"')\n\n if hasattr(config, 'allow_population_by_alias'):\n warnings.warn(\n f'{cls_name}: \"allow_population_by_alias\" is deprecated and replaced by \"allow_population_by_field_name\"',\n DeprecationWarning,\n )\n config.allow_population_by_field_name = config.allow_population_by_alias # type: ignore\n\n if hasattr(config, 'case_insensitive') and any('BaseSettings.Config' in c.__qualname__ for c in config.__mro__):\n warnings.warn(\n f'{cls_name}: \"case_insensitive\" is deprecated on BaseSettings config and replaced by '\n f'\"case_sensitive\" (default False)',\n DeprecationWarning,\n )\n config.case_sensitive = not config.case_insensitive # type: ignore\n\n\ndef is_valid_field(name: str) -> bool:\n if not name.startswith('_'):\n return True\n return ROOT_KEY == name\n\n\ndef validate_custom_root_type(fields: Dict[str, ModelField]) -> None:\n if len(fields) > 1:\n raise ValueError('__root__ cannot be mixed with other fields')\n\n\nUNTOUCHED_TYPES = FunctionType, property, type, classmethod, staticmethod\n\n\nclass ModelMetaclass(ABCMeta):\n @no_type_check # noqa C901\n def __new__(mcs, name, bases, namespace, **kwargs): # noqa C901\n fields: Dict[str, ModelField] = {}\n config = BaseConfig\n validators: 'ValidatorListDict' = {}\n pre_root_validators, post_root_validators = [], []\n for base in reversed(bases):\n if issubclass(base, BaseModel) and base != BaseModel:\n fields.update(deepcopy(base.__fields__))\n config = inherit_config(base.__config__, config)\n validators = inherit_validators(base.__validators__, validators)\n pre_root_validators += base.__pre_root_validators__\n post_root_validators += base.__post_root_validators__\n\n config = inherit_config(namespace.get('Config'), config)\n validators = inherit_validators(extract_validators(namespace), validators)\n vg = ValidatorGroup(validators)\n\n for f in fields.values():\n f.set_config(config)\n extra_validators = vg.get_validators(f.name)\n if extra_validators:\n f.class_validators.update(extra_validators)\n # re-run prepare to add extra validators\n f.populate_validators()\n\n prepare_config(config, name)\n\n class_vars = set()\n if (namespace.get('__module__'), namespace.get('__qualname__')) != ('pydantic.main', 'BaseModel'):\n annotations = resolve_annotations(namespace.get('__annotations__', {}), namespace.get('__module__', None))\n untouched_types = UNTOUCHED_TYPES + config.keep_untouched\n # annotation only fields need to come first in fields\n for ann_name, ann_type in annotations.items():\n if is_classvar(ann_type):\n class_vars.add(ann_name)\n elif is_valid_field(ann_name):\n validate_field_name(bases, ann_name)\n value = namespace.get(ann_name, Undefined)\n if (\n isinstance(value, untouched_types)\n and ann_type != PyObject\n and not lenient_issubclass(getattr(ann_type, '__origin__', None), Type)\n ):\n continue\n fields[ann_name] = ModelField.infer(\n name=ann_name,\n value=value,\n annotation=ann_type,\n class_validators=vg.get_validators(ann_name),\n config=config,\n )\n\n for var_name, value in namespace.items():\n if (\n var_name not in annotations\n and is_valid_field(var_name)\n and not isinstance(value, untouched_types)\n and var_name not in class_vars\n ):\n validate_field_name(bases, var_name)\n inferred = ModelField.infer(\n name=var_name,\n value=value,\n annotation=annotations.get(var_name),\n class_validators=vg.get_validators(var_name),\n config=config,\n )\n if var_name in fields and inferred.type_ != fields[var_name].type_:\n raise TypeError(\n f'The type of {name}.{var_name} differs from the new default value; '\n f'if you wish to change the type of this field, please use a type annotation'\n )\n fields[var_name] = inferred\n\n _custom_root_type = ROOT_KEY in fields\n if _custom_root_type:\n validate_custom_root_type(fields)\n vg.check_for_unused()\n if config.json_encoders:\n json_encoder = partial(custom_pydantic_encoder, config.json_encoders)\n else:\n json_encoder = pydantic_encoder\n pre_rv_new, post_rv_new = extract_root_validators(namespace)\n new_namespace = {\n '__config__': config,\n '__fields__': fields,\n '__field_defaults__': {n: f.default for n, f in fields.items() if not f.required},\n '__validators__': vg.validators,\n '__pre_root_validators__': pre_root_validators + pre_rv_new,\n '__post_root_validators__': post_root_validators + post_rv_new,\n '__schema_cache__': {},\n '__json_encoder__': staticmethod(json_encoder),\n '__custom_root_type__': _custom_root_type,\n **{n: v for n, v in namespace.items() if n not in fields},\n }\n return super().__new__(mcs, name, bases, new_namespace, **kwargs)\n\n\nclass BaseModel(metaclass=ModelMetaclass):\n if TYPE_CHECKING:\n # populated by the metaclass, defined here to help IDEs only\n __fields__: Dict[str, ModelField] = {}\n __field_defaults__: Dict[str, Any] = {}\n __validators__: Dict[str, AnyCallable] = {}\n __pre_root_validators__: List[AnyCallable]\n __post_root_validators__: List[Tuple[bool, AnyCallable]]\n __config__: Type[BaseConfig] = BaseConfig\n __root__: Any = None\n __json_encoder__: Callable[[Any], Any] = lambda x: x\n __schema_cache__: 'DictAny' = {}\n __custom_root_type__: bool = False\n\n Config = BaseConfig\n __slots__ = ('__dict__', '__fields_set__')\n # equivalent of inheriting from Representation\n __repr_name__ = Representation.__repr_name__\n __repr_str__ = Representation.__repr_str__\n __pretty__ = Representation.__pretty__\n __str__ = Representation.__str__\n __repr__ = Representation.__repr__\n\n def __init__(__pydantic_self__, **data: Any) -> None:\n # Uses something other than `self` the first arg to allow \"self\" as a settable attribute\n if TYPE_CHECKING:\n __pydantic_self__.__dict__: Dict[str, Any] = {}\n __pydantic_self__.__fields_set__: 'SetStr' = set()\n values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data)\n if validation_error:\n raise validation_error\n object.__setattr__(__pydantic_self__, '__dict__', values)\n object.__setattr__(__pydantic_self__, '__fields_set__', fields_set)\n\n @no_type_check\n def __setattr__(self, name, value):\n if self.__config__.extra is not Extra.allow and name not in self.__fields__:\n raise ValueError(f'\"{self.__class__.__name__}\" object has no field \"{name}\"')\n elif not self.__config__.allow_mutation:\n raise TypeError(f'\"{self.__class__.__name__}\" is immutable and does not support item assignment')\n elif self.__config__.validate_assignment:\n known_field = self.__fields__.get(name, None)\n if known_field:\n value, error_ = known_field.validate(value, self.dict(exclude={name}), loc=name, cls=self.__class__)\n if error_:\n raise ValidationError([error_], type(self))\n self.__dict__[name] = value\n self.__fields_set__.add(name)\n\n def __getstate__(self) -> 'DictAny':\n return {'__dict__': self.__dict__, '__fields_set__': self.__fields_set__}\n\n def __setstate__(self, state: 'DictAny') -> None:\n object.__setattr__(self, '__dict__', state['__dict__'])\n object.__setattr__(self, '__fields_set__', state['__fields_set__'])\n\n def dict(\n self,\n *,\n include: Union['AbstractSetIntStr', 'DictIntStrAny'] = None,\n exclude: Union['AbstractSetIntStr', 'DictIntStrAny'] = None,\n by_alias: bool = False,\n skip_defaults: bool = None,\n exclude_unset: bool = False,\n exclude_defaults: bool = False,\n exclude_none: bool = False,\n ) -> 'DictStrAny':\n \"\"\"\n Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.\n \"\"\"\n if skip_defaults is not None:\n warnings.warn(\n f'{self.__class__.__name__}.dict(): \"skip_defaults\" is deprecated and replaced by \"exclude_unset\"',\n DeprecationWarning,\n )\n exclude_unset = skip_defaults\n get_key = self._get_key_factory(by_alias)\n get_key = partial(get_key, self.__fields__)\n\n allowed_keys = self._calculate_keys(include=include, exclude=exclude, exclude_unset=exclude_unset)\n return {\n get_key(k): v\n for k, v in self._iter(\n to_dict=True,\n by_alias=by_alias,\n allowed_keys=allowed_keys,\n include=include,\n exclude=exclude,\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n exclude_none=exclude_none,\n )\n }\n\n def _get_key_factory(self, by_alias: bool) -> Callable[..., str]:\n if by_alias:\n return lambda fields, key: fields[key].alias if key in fields else key\n\n return lambda _, key: key\n\n def json(\n self,\n *,\n include: Union['AbstractSetIntStr', 'DictIntStrAny'] = None,\n exclude: Union['AbstractSetIntStr', 'DictIntStrAny'] = None,\n by_alias: bool = False,\n skip_defaults: bool = None,\n exclude_unset: bool = False,\n exclude_defaults: bool = False,\n exclude_none: bool = False,\n encoder: Optional[Callable[[Any], Any]] = None,\n **dumps_kwargs: Any,\n ) -> str:\n \"\"\"\n Generate a JSON representation of the model, `include` and `exclude` arguments as per `dict()`.\n\n `encoder` is an optional function to supply as `default` to json.dumps(), other arguments as per `json.dumps()`.\n \"\"\"\n if skip_defaults is not None:\n warnings.warn(\n f'{self.__class__.__name__}.json(): \"skip_defaults\" is deprecated and replaced by \"exclude_unset\"',\n DeprecationWarning,\n )\n exclude_unset = skip_defaults\n encoder = cast(Callable[[Any], Any], encoder or self.__json_encoder__)\n data = self.dict(\n include=include,\n exclude=exclude,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n exclude_none=exclude_none,\n )\n if self.__custom_root_type__:\n data = data[ROOT_KEY]\n return self.__config__.json_dumps(data, default=encoder, **dumps_kwargs)\n\n @classmethod\n def parse_obj(cls: Type['Model'], obj: Any) -> 'Model':\n if cls.__custom_root_type__ and (\n not (isinstance(obj, dict) and obj.keys() == {ROOT_KEY}) or cls.__fields__[ROOT_KEY].shape == SHAPE_MAPPING\n ):\n obj = {ROOT_KEY: obj}\n elif not isinstance(obj, dict):\n try:\n obj = dict(obj)\n except (TypeError, ValueError) as e:\n exc = TypeError(f'{cls.__name__} expected dict not {type(obj).__name__}')\n raise ValidationError([ErrorWrapper(exc, loc=ROOT_KEY)], cls) from e\n return cls(**obj)\n\n @classmethod\n def parse_raw(\n cls: Type['Model'],\n b: StrBytes,\n *,\n content_type: str = None,\n encoding: str = 'utf8',\n proto: Protocol = None,\n allow_pickle: bool = False,\n ) -> 'Model':\n try:\n obj = load_str_bytes(\n b,\n proto=proto,\n content_type=content_type,\n encoding=encoding,\n allow_pickle=allow_pickle,\n json_loads=cls.__config__.json_loads,\n )\n except (ValueError, TypeError, UnicodeDecodeError) as e:\n raise ValidationError([ErrorWrapper(e, loc=ROOT_KEY)], cls)\n return cls.parse_obj(obj)\n\n @classmethod\n def parse_file(\n cls: Type['Model'],\n path: Union[str, Path],\n *,\n content_type: str = None,\n encoding: str = 'utf8',\n proto: Protocol = None,\n allow_pickle: bool = False,\n ) -> 'Model':\n obj = load_file(\n path,\n proto=proto,\n content_type=content_type,\n encoding=encoding,\n allow_pickle=allow_pickle,\n json_loads=cls.__config__.json_loads,\n )\n return cls.parse_obj(obj)\n\n @classmethod\n def from_orm(cls: Type['Model'], obj: Any) -> 'Model':\n if not cls.__config__.orm_mode:\n raise ConfigError('You must have the config attribute orm_mode=True to use from_orm')\n obj = cls._decompose_class(obj)\n m = cls.__new__(cls)\n values, fields_set, validation_error = validate_model(cls, obj)\n if validation_error:\n raise validation_error\n object.__setattr__(m, '__dict__', values)\n object.__setattr__(m, '__fields_set__', fields_set)\n return m\n\n @classmethod\n def construct(cls: Type['Model'], _fields_set: Optional['SetStr'] = None, **values: Any) -> 'Model':\n \"\"\"\n Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.\n Default values are respected, but no other validation is performed.\n \"\"\"\n m = cls.__new__(cls)\n object.__setattr__(m, '__dict__', {**deepcopy(cls.__field_defaults__), **values})\n if _fields_set is None:\n _fields_set = set(values.keys())\n object.__setattr__(m, '__fields_set__', _fields_set)\n return m\n\n def copy(\n self: 'Model',\n *,\n include: Union['AbstractSetIntStr', 'DictIntStrAny'] = None,\n exclude: Union['AbstractSetIntStr', 'DictIntStrAny'] = None,\n update: 'DictStrAny' = None,\n deep: bool = False,\n ) -> 'Model':\n \"\"\"\n Duplicate a model, optionally choose which fields to include, exclude and change.\n\n :param include: fields to include in new model\n :param exclude: fields to exclude from new model, as with values this takes precedence over include\n :param update: values to change/add in the new model. Note: the data is not validated before creating\n the new model: you should trust this data\n :param deep: set to `True` to make a deep copy of the model\n :return: new model instance\n \"\"\"\n if include is None and exclude is None and update is None:\n # skip constructing values if no arguments are passed\n v = self.__dict__\n else:\n allowed_keys = self._calculate_keys(include=include, exclude=exclude, exclude_unset=False, update=update)\n if allowed_keys is None:\n v = {**self.__dict__, **(update or {})}\n else:\n v = {\n **dict(\n self._iter(\n to_dict=False,\n by_alias=False,\n include=include,\n exclude=exclude,\n exclude_unset=False,\n allowed_keys=allowed_keys,\n )\n ),\n **(update or {}),\n }\n\n if deep:\n v = deepcopy(v)\n\n cls = self.__class__\n m = cls.__new__(cls)\n object.__setattr__(m, '__dict__', v)\n object.__setattr__(m, '__fields_set__', self.__fields_set__.copy())\n return m\n\n @classmethod\n def schema(cls, by_alias: bool = True) -> 'DictStrAny':\n cached = cls.__schema_cache__.get(by_alias)\n if cached is not None:\n return cached\n s = model_schema(cls, by_alias=by_alias)\n cls.__schema_cache__[by_alias] = s\n return s\n\n @classmethod\n def schema_json(cls, *, by_alias: bool = True, **dumps_kwargs: Any) -> str:\n from .json import pydantic_encoder\n\n return cls.__config__.json_dumps(cls.schema(by_alias=by_alias), default=pydantic_encoder, **dumps_kwargs)\n\n @classmethod\n def __get_validators__(cls) -> 'CallableGenerator':\n yield cls.validate\n\n @classmethod\n def validate(cls: Type['Model'], value: Any) -> 'Model':\n if isinstance(value, dict):\n return cls(**value)\n elif isinstance(value, cls):\n return value.copy()\n elif cls.__config__.orm_mode:\n return cls.from_orm(value)\n elif cls.__custom_root_type__:\n return cls.parse_obj(value)\n else:\n try:\n value_as_dict = dict(value)\n except (TypeError, ValueError) as e:\n raise DictError() from e\n return cls(**value_as_dict)\n\n @classmethod\n def _decompose_class(cls: Type['Model'], obj: Any) -> GetterDict:\n return cls.__config__.getter_dict(obj)\n\n @classmethod\n @no_type_check\n def _get_value(\n cls,\n v: Any,\n to_dict: bool,\n by_alias: bool,\n include: Optional[Union['AbstractSetIntStr', 'DictIntStrAny']],\n exclude: Optional[Union['AbstractSetIntStr', 'DictIntStrAny']],\n exclude_unset: bool,\n exclude_defaults: bool,\n exclude_none: bool,\n ) -> Any:\n\n if isinstance(v, BaseModel):\n if to_dict:\n return v.dict(\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n include=include,\n exclude=exclude,\n exclude_none=exclude_none,\n )\n else:\n return v.copy(include=include, exclude=exclude)\n\n value_exclude = ValueItems(v, exclude) if exclude else None\n value_include = ValueItems(v, include) if include else None\n\n if isinstance(v, dict):\n return {\n k_: cls._get_value(\n v_,\n to_dict=to_dict,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n include=value_include and value_include.for_element(k_),\n exclude=value_exclude and value_exclude.for_element(k_),\n exclude_none=exclude_none,\n )\n for k_, v_ in v.items()\n if (not value_exclude or not value_exclude.is_excluded(k_))\n and (not value_include or value_include.is_included(k_))\n }\n\n elif sequence_like(v):\n return type(v)(\n cls._get_value(\n v_,\n to_dict=to_dict,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n include=value_include and value_include.for_element(i),\n exclude=value_exclude and value_exclude.for_element(i),\n exclude_none=exclude_none,\n )\n for i, v_ in enumerate(v)\n if (not value_exclude or not value_exclude.is_excluded(i))\n and (not value_include or value_include.is_included(i))\n )\n\n else:\n return v\n\n @classmethod\n def update_forward_refs(cls, **localns: Any) -> None:\n \"\"\"\n Try to update ForwardRefs on fields based on this Model, globalns and localns.\n \"\"\"\n globalns = sys.modules[cls.__module__].__dict__\n globalns.setdefault(cls.__name__, cls)\n for f in cls.__fields__.values():\n update_field_forward_refs(f, globalns=globalns, localns=localns)\n\n def __iter__(self) -> 'TupleGenerator':\n \"\"\"\n so `dict(model)` works\n \"\"\"\n yield from self._iter()\n\n def _iter(\n self,\n to_dict: bool = False,\n by_alias: bool = False,\n allowed_keys: Optional['SetStr'] = None,\n include: Union['AbstractSetIntStr', 'DictIntStrAny'] = None,\n exclude: Union['AbstractSetIntStr', 'DictIntStrAny'] = None,\n exclude_unset: bool = False,\n exclude_defaults: bool = False,\n exclude_none: bool = False,\n ) -> 'TupleGenerator':\n\n value_exclude = ValueItems(self, exclude) if exclude else None\n value_include = ValueItems(self, include) if include else None\n\n if exclude_defaults:\n if allowed_keys is None:\n allowed_keys = set(self.__fields__)\n for k, v in self.__field_defaults__.items():\n if self.__dict__[k] == v:\n allowed_keys.discard(k)\n\n for k, v in self.__dict__.items():\n if allowed_keys is None or k in allowed_keys:\n value = self._get_value(\n v,\n to_dict=to_dict,\n by_alias=by_alias,\n include=value_include and value_include.for_element(k),\n exclude=value_exclude and value_exclude.for_element(k),\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n exclude_none=exclude_none,\n )\n if not (exclude_none and value is None):\n yield k, value\n\n def _calculate_keys(\n self,\n include: Optional[Union['AbstractSetIntStr', 'DictIntStrAny']],\n exclude: Optional[Union['AbstractSetIntStr', 'DictIntStrAny']],\n exclude_unset: bool,\n update: Optional['DictStrAny'] = None,\n ) -> Optional['SetStr']:\n if include is None and exclude is None and exclude_unset is False:\n return None\n\n if exclude_unset:\n keys = self.__fields_set__.copy()\n else:\n keys = set(self.__dict__.keys())\n\n if include is not None:\n if isinstance(include, dict):\n keys &= include.keys()\n else:\n keys &= include\n\n if update:\n keys -= update.keys()\n\n if exclude:\n if isinstance(exclude, dict):\n keys -= {k for k, v in exclude.items() if v is ...}\n else:\n keys -= exclude\n\n return keys\n\n def __eq__(self, other: Any) -> bool:\n if isinstance(other, BaseModel):\n return self.dict() == other.dict()\n else:\n return self.dict() == other\n\n def __repr_args__(self) -> 'ReprArgs':\n return self.__dict__.items() # type: ignore\n\n @property\n def fields(self) -> Dict[str, ModelField]:\n warnings.warn('`fields` attribute is deprecated, use `__fields__` instead', DeprecationWarning)\n return self.__fields__\n\n def to_string(self, pretty: bool = False) -> str:\n warnings.warn('`model.to_string()` method is deprecated, use `str(model)` instead', DeprecationWarning)\n return str(self)\n\n @property\n def __values__(self) -> 'DictStrAny':\n warnings.warn('`__values__` attribute is deprecated, use `__dict__` instead', DeprecationWarning)\n return self.__dict__\n\n\ndef create_model(\n model_name: str,\n *,\n __config__: Type[BaseConfig] = None,\n __base__: Type[BaseModel] = None,\n __module__: Optional[str] = None,\n __validators__: Dict[str, classmethod] = None,\n **field_definitions: Any,\n) -> Type[BaseModel]:\n \"\"\"\n Dynamically create a model.\n :param model_name: name of the created model\n :param __config__: config class to use for the new model\n :param __base__: base class for the new model to inherit from\n :param __validators__: a dict of method names and @validator class methods\n :param **field_definitions: fields of the model (or extra fields if a base is supplied) in the format\n `<name>=(<type>, <default default>)` or `<name>=<default value> eg. `foobar=(str, ...)` or `foobar=123`\n \"\"\"\n if __base__:\n if __config__ is not None:\n raise ConfigError('to avoid confusion __config__ and __base__ cannot be used together')\n else:\n __base__ = BaseModel\n\n fields = {}\n annotations = {}\n\n for f_name, f_def in field_definitions.items():\n if not is_valid_field(f_name):\n warnings.warn(f'fields may not start with an underscore, ignoring \"{f_name}\"', RuntimeWarning)\n if isinstance(f_def, tuple):\n try:\n f_annotation, f_value = f_def\n except ValueError as e:\n raise ConfigError(\n f'field definitions should either be a tuple of (<type>, <default>) or just a '\n f'default value, unfortunately this means tuples as '\n f'default values are not allowed'\n ) from e\n else:\n f_annotation, f_value = None, f_def\n\n if f_annotation:\n annotations[f_name] = f_annotation\n fields[f_name] = f_value\n\n namespace: 'DictStrAny' = {'__annotations__': annotations, '__module__': __module__}\n if __validators__:\n namespace.update(__validators__)\n namespace.update(fields)\n if __config__:\n namespace['Config'] = inherit_config(__config__, BaseConfig)\n\n return type(model_name, (__base__,), namespace)\n\n\n_missing = object()\n\n\ndef validate_model( # noqa: C901 (ignore complexity)\n model: Type[BaseModel], input_data: 'DictStrAny', cls: 'ModelOrDc' = None\n) -> Tuple['DictStrAny', 'SetStr', Optional[ValidationError]]:\n \"\"\"\n validate data against a model.\n \"\"\"\n values = {}\n errors = []\n # input_data names, possibly alias\n names_used = set()\n # field names, never aliases\n fields_set = set()\n config = model.__config__\n check_extra = config.extra is not Extra.ignore\n cls_ = cls or model\n\n for validator in model.__pre_root_validators__:\n try:\n input_data = validator(cls_, input_data)\n except (ValueError, TypeError, AssertionError) as exc:\n return {}, set(), ValidationError([ErrorWrapper(exc, loc=ROOT_KEY)], cls_)\n\n for name, field in model.__fields__.items():\n if type(field.type_) == ForwardRef:\n raise ConfigError(\n f'field \"{field.name}\" not yet prepared so type is still a ForwardRef, '\n f'you might need to call {cls_.__name__}.update_forward_refs().'\n )\n\n value = input_data.get(field.alias, _missing)\n using_name = False\n if value is _missing and config.allow_population_by_field_name and field.alt_alias:\n value = input_data.get(field.name, _missing)\n using_name = True\n\n if value is _missing:\n if field.required:\n errors.append(ErrorWrapper(MissingError(), loc=field.alias))\n continue\n\n if field.default is None:\n # deepcopy is quite slow on None\n value = None\n else:\n value = deepcopy(field.default)\n\n if not config.validate_all and not field.validate_always:\n values[name] = value\n continue\n else:\n fields_set.add(name)\n if check_extra:\n names_used.add(field.name if using_name else field.alias)\n\n v_, errors_ = field.validate(value, values, loc=field.alias, cls=cls_)\n if isinstance(errors_, ErrorWrapper):\n errors.append(errors_)\n elif isinstance(errors_, list):\n errors.extend(errors_)\n else:\n values[name] = v_\n\n if check_extra:\n if isinstance(input_data, GetterDict):\n extra = input_data.extra_keys() - names_used\n else:\n extra = input_data.keys() - names_used\n if extra:\n fields_set |= extra\n if config.extra is Extra.allow:\n for f in extra:\n values[f] = input_data[f]\n else:\n for f in sorted(extra):\n errors.append(ErrorWrapper(ExtraError(), loc=f))\n\n for skip_on_failure, validator in model.__post_root_validators__:\n if skip_on_failure and errors:\n continue\n try:\n values = validator(cls_, values)\n except (ValueError, TypeError, AssertionError) as exc:\n errors.append(ErrorWrapper(exc, loc=ROOT_KEY))\n break\n\n if errors:\n return values, fields_set, ValidationError(errors, cls_)\n else:\n return values, fields_set, None\n",
"path": "pydantic/main.py"
}
] | diff --git a/changes/1190-Shados.md b/changes/1190-Shados.md
new file mode 100644
index 00000000000..7cc66b1259b
--- /dev/null
+++ b/changes/1190-Shados.md
@@ -0,0 +1 @@
+Fixed parsing of nested 'custom root type' models.
diff --git a/pydantic/main.py b/pydantic/main.py
index 993f55c22f9..e007f98368a 100644
--- a/pydantic/main.py
+++ b/pydantic/main.py
@@ -546,6 +546,8 @@ def validate(cls: Type['Model'], value: Any) -> 'Model':
return value.copy()
elif cls.__config__.orm_mode:
return cls.from_orm(value)
+ elif cls.__custom_root_type__:
+ return cls.parse_obj(value)
else:
try:
value_as_dict = dict(value)
diff --git a/tests/test_parse.py b/tests/test_parse.py
index ba854aa3276..a0260fba8b8 100644
--- a/tests/test_parse.py
+++ b/tests/test_parse.py
@@ -1,10 +1,10 @@
import json
import pickle
-from typing import List, Union
+from typing import List, Tuple, Union
import pytest
-from pydantic import BaseModel, Field, Protocol, ValidationError
+from pydantic import BaseModel, Field, Protocol, ValidationError, parse_obj_as
class Model(BaseModel):
@@ -57,6 +57,55 @@ class MyModel(BaseModel):
assert m.__root__ == ['a']
+def test_parse_nested_root_list():
+ class NestedData(BaseModel):
+ id: str
+
+ class NestedModel(BaseModel):
+ __root__: List[NestedData]
+
+ class MyModel(BaseModel):
+ nested: NestedModel
+
+ m = MyModel.parse_obj({'nested': [{'id': 'foo'}]})
+ assert isinstance(m.nested, NestedModel)
+ assert isinstance(m.nested.__root__[0], NestedData)
+
+
+def test_parse_nested_root_tuple():
+ class NestedData(BaseModel):
+ id: str
+
+ class NestedModel(BaseModel):
+ __root__: Tuple[int, NestedData]
+
+ class MyModel(BaseModel):
+ nested: List[NestedModel]
+
+ data = [0, {'id': 'foo'}]
+ m = MyModel.parse_obj({'nested': [data]})
+ assert isinstance(m.nested[0], NestedModel)
+ assert isinstance(m.nested[0].__root__[1], NestedData)
+
+ nested = parse_obj_as(NestedModel, data)
+ assert isinstance(nested, NestedModel)
+
+
+def test_parse_nested_custom_root():
+ class NestedModel(BaseModel):
+ __root__: List[str]
+
+ class MyModel(BaseModel):
+ __root__: NestedModel
+
+ nested = ['foo', 'bar']
+ m = MyModel.parse_obj(nested)
+ assert isinstance(m, MyModel)
+ assert isinstance(m.__root__, NestedModel)
+ assert isinstance(m.__root__.__root__, List)
+ assert isinstance(m.__root__.__root__[0], str)
+
+
def test_json():
assert Model.parse_raw('{"a": 12, "b": 8}') == Model(a=12, b=8)
|
pantsbuild__pants-15341 | Use of relative PATH for docker-tool shims prevents use of credential helpers
**Describe the bug**
I'm trying to set up [tools](https://www.pantsbuild.org/docs/reference-docker#section-tools) in my repo's `docker` subsystem, to plug in the [ECR credential helper](https://github.com/awslabs/amazon-ecr-credential-helper). To do so I added the following to `pants.toml`:
```toml
[docker]
tools = ["docker-credential-ecr-login", "sh"]
```
When I run `./pants package path/to/Dockerfile`, I get the error:
```
failed to solve with frontend dockerfile.v0: failed to create LLB definition: rpc error: code = Unknown desc = error getting credentials - err: docker-credential-ecr-login resolves to executable in current directory (./.shims/bin/docker-credential-ecr-login), out: ``
```
If I run the above with `--no-process-cleanup` and `cd` into the tmpdir, I see:
1. There are shims for both tools under `.shims/bin`
2. The shims behave as expected when I use them directly
3. `__run.sh` sets `PATH=.shims/bin`
If I edit `__run.sh` to instead set `PATH=<absolute-path-to-tmpdir>/.shims/bin`, the build works.
**Pants version**
2.11.0+git9ac327d4
**OS**
MacOS
**Additional info**
Docker Desktop v4.7.1 (77678)
Docker Engine v20.10.14
| [
{
"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport os\nfrom dataclasses import dataclass\nfrom typing import Mapping\n\nfrom pants.backend.docker.subsystems.docker_options import DockerOptions\nfrom pants.backend.docker.util_rules.docker_build_args import DockerBuildArgs\nfrom pants.core.util_rules.system_binaries import (\n BinaryPath,\n BinaryPathRequest,\n BinaryPaths,\n BinaryPathTest,\n BinaryShims,\n BinaryShimsRequest,\n)\nfrom pants.engine.environment import Environment, EnvironmentRequest\nfrom pants.engine.fs import Digest\nfrom pants.engine.process import Process, ProcessCacheScope\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.util.logging import LogLevel\nfrom pants.util.strutil import pluralize\n\n\n# The base class is decorated with `frozen_after_init`.\n@dataclass\nclass DockerBinary(BinaryPath):\n \"\"\"The `docker` binary.\"\"\"\n\n extra_env: Mapping[str, str]\n extra_input_digests: Mapping[str, Digest] | None\n\n def __init__(\n self,\n path: str,\n fingerprint: str | None = None,\n extra_env: Mapping[str, str] | None = None,\n extra_input_digests: Mapping[str, Digest] | None = None,\n ) -> None:\n self.extra_env = {} if extra_env is None else extra_env\n self.extra_input_digests = extra_input_digests\n super().__init__(path, fingerprint)\n\n def _get_process_environment(self, env: Mapping[str, str]) -> Mapping[str, str]:\n if not self.extra_env:\n return env\n\n res = {**self.extra_env, **env}\n\n # Merge the PATH entries, in case they are present in both `env` and `self.extra_env`.\n res[\"PATH\"] = os.pathsep.join(\n p for p in (m.get(\"PATH\") for m in (self.extra_env, env)) if p\n )\n return res\n\n def build_image(\n self,\n tags: tuple[str, ...],\n digest: Digest,\n dockerfile: str,\n build_args: DockerBuildArgs,\n context_root: str,\n env: Mapping[str, str],\n extra_args: tuple[str, ...] = (),\n ) -> Process:\n args = [self.path, \"build\", *extra_args]\n\n for tag in tags:\n args.extend([\"--tag\", tag])\n\n for build_arg in build_args:\n args.extend([\"--build-arg\", build_arg])\n\n args.extend([\"--file\", dockerfile])\n\n # Docker context root.\n args.append(context_root)\n\n return Process(\n argv=tuple(args),\n description=(\n f\"Building docker image {tags[0]}\"\n + (f\" +{pluralize(len(tags)-1, 'additional tag')}.\" if len(tags) > 1 else \"\")\n ),\n env=self._get_process_environment(env),\n input_digest=digest,\n immutable_input_digests=self.extra_input_digests,\n cache_scope=ProcessCacheScope.PER_SESSION,\n )\n\n def push_image(self, tag: str, env: Mapping[str, str] | None = None) -> Process:\n return Process(\n argv=(self.path, \"push\", tag),\n cache_scope=ProcessCacheScope.PER_SESSION,\n description=f\"Pushing docker image {tag}\",\n env=self._get_process_environment(env or {}),\n immutable_input_digests=self.extra_input_digests,\n )\n\n def run_image(\n self,\n tag: str,\n *,\n docker_run_args: tuple[str, ...] | None = None,\n image_args: tuple[str, ...] | None = None,\n env: Mapping[str, str] | None = None,\n ) -> Process:\n return Process(\n argv=(self.path, \"run\", *(docker_run_args or []), tag, *(image_args or [])),\n cache_scope=ProcessCacheScope.PER_SESSION,\n description=f\"Running docker image {tag}\",\n env=self._get_process_environment(env or {}),\n immutable_input_digests=self.extra_input_digests,\n )\n\n\n@dataclass(frozen=True)\nclass DockerBinaryRequest:\n pass\n\n\n@rule(desc=\"Finding the `docker` binary and related tooling\", level=LogLevel.DEBUG)\nasync def find_docker(\n docker_request: DockerBinaryRequest, docker_options: DockerOptions\n) -> DockerBinary:\n env = await Get(Environment, EnvironmentRequest([\"PATH\"]))\n search_path = docker_options.executable_search_path(env)\n request = BinaryPathRequest(\n binary_name=\"docker\",\n search_path=search_path,\n test=BinaryPathTest(args=[\"-v\"]),\n )\n paths = await Get(BinaryPaths, BinaryPathRequest, request)\n first_path = paths.first_path_or_raise(request, rationale=\"interact with the docker daemon\")\n\n if not docker_options.tools:\n return DockerBinary(first_path.path, first_path.fingerprint)\n\n tools = await Get(\n BinaryShims,\n BinaryShimsRequest,\n BinaryShimsRequest.for_binaries(\n *docker_options.tools,\n rationale=\"use docker\",\n output_directory=\"bin\",\n search_path=search_path,\n ),\n )\n tools_path = \".shims\"\n extra_env = {\"PATH\": os.path.join(tools_path, tools.bin_directory)}\n extra_input_digests = {tools_path: tools.digest}\n\n return DockerBinary(\n first_path.path,\n first_path.fingerprint,\n extra_env=extra_env,\n extra_input_digests=extra_input_digests,\n )\n\n\n@rule\nasync def get_docker() -> DockerBinary:\n return await Get(DockerBinary, DockerBinaryRequest())\n\n\ndef rules():\n return collect_rules()\n",
"path": "src/python/pants/backend/docker/util_rules/docker_binary.py"
}
] | [
{
"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport os\nfrom dataclasses import dataclass\nfrom typing import Mapping\n\nfrom pants.backend.docker.subsystems.docker_options import DockerOptions\nfrom pants.backend.docker.util_rules.docker_build_args import DockerBuildArgs\nfrom pants.core.util_rules.system_binaries import (\n BinaryPath,\n BinaryPathRequest,\n BinaryPaths,\n BinaryPathTest,\n BinaryShims,\n BinaryShimsRequest,\n)\nfrom pants.engine.environment import Environment, EnvironmentRequest\nfrom pants.engine.fs import Digest\nfrom pants.engine.process import Process, ProcessCacheScope\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.util.logging import LogLevel\nfrom pants.util.strutil import pluralize\n\n\n# The base class is decorated with `frozen_after_init`.\n@dataclass\nclass DockerBinary(BinaryPath):\n \"\"\"The `docker` binary.\"\"\"\n\n extra_env: Mapping[str, str]\n extra_input_digests: Mapping[str, Digest] | None\n\n def __init__(\n self,\n path: str,\n fingerprint: str | None = None,\n extra_env: Mapping[str, str] | None = None,\n extra_input_digests: Mapping[str, Digest] | None = None,\n ) -> None:\n self.extra_env = {} if extra_env is None else extra_env\n self.extra_input_digests = extra_input_digests\n super().__init__(path, fingerprint)\n\n def _get_process_environment(self, env: Mapping[str, str]) -> Mapping[str, str]:\n if not self.extra_env:\n return env\n\n res = {**self.extra_env, **env}\n\n # Merge the PATH entries, in case they are present in both `env` and `self.extra_env`.\n res[\"PATH\"] = os.pathsep.join(\n p for p in (m.get(\"PATH\") for m in (self.extra_env, env)) if p\n )\n return res\n\n def build_image(\n self,\n tags: tuple[str, ...],\n digest: Digest,\n dockerfile: str,\n build_args: DockerBuildArgs,\n context_root: str,\n env: Mapping[str, str],\n extra_args: tuple[str, ...] = (),\n ) -> Process:\n args = [self.path, \"build\", *extra_args]\n\n for tag in tags:\n args.extend([\"--tag\", tag])\n\n for build_arg in build_args:\n args.extend([\"--build-arg\", build_arg])\n\n args.extend([\"--file\", dockerfile])\n\n # Docker context root.\n args.append(context_root)\n\n return Process(\n argv=tuple(args),\n description=(\n f\"Building docker image {tags[0]}\"\n + (f\" +{pluralize(len(tags)-1, 'additional tag')}.\" if len(tags) > 1 else \"\")\n ),\n env=self._get_process_environment(env),\n input_digest=digest,\n immutable_input_digests=self.extra_input_digests,\n cache_scope=ProcessCacheScope.PER_SESSION,\n )\n\n def push_image(self, tag: str, env: Mapping[str, str] | None = None) -> Process:\n return Process(\n argv=(self.path, \"push\", tag),\n cache_scope=ProcessCacheScope.PER_SESSION,\n description=f\"Pushing docker image {tag}\",\n env=self._get_process_environment(env or {}),\n immutable_input_digests=self.extra_input_digests,\n )\n\n def run_image(\n self,\n tag: str,\n *,\n docker_run_args: tuple[str, ...] | None = None,\n image_args: tuple[str, ...] | None = None,\n env: Mapping[str, str] | None = None,\n ) -> Process:\n return Process(\n argv=(self.path, \"run\", *(docker_run_args or []), tag, *(image_args or [])),\n cache_scope=ProcessCacheScope.PER_SESSION,\n description=f\"Running docker image {tag}\",\n env=self._get_process_environment(env or {}),\n immutable_input_digests=self.extra_input_digests,\n )\n\n\n@dataclass(frozen=True)\nclass DockerBinaryRequest:\n pass\n\n\n@rule(desc=\"Finding the `docker` binary and related tooling\", level=LogLevel.DEBUG)\nasync def find_docker(\n docker_request: DockerBinaryRequest, docker_options: DockerOptions\n) -> DockerBinary:\n env = await Get(Environment, EnvironmentRequest([\"PATH\"]))\n search_path = docker_options.executable_search_path(env)\n request = BinaryPathRequest(\n binary_name=\"docker\",\n search_path=search_path,\n test=BinaryPathTest(args=[\"-v\"]),\n )\n paths = await Get(BinaryPaths, BinaryPathRequest, request)\n first_path = paths.first_path_or_raise(request, rationale=\"interact with the docker daemon\")\n\n if not docker_options.tools:\n return DockerBinary(first_path.path, first_path.fingerprint)\n\n tools = await Get(\n BinaryShims,\n BinaryShimsRequest,\n BinaryShimsRequest.for_binaries(\n *docker_options.tools,\n rationale=\"use docker\",\n output_directory=\"bin\",\n search_path=search_path,\n ),\n )\n tools_path = \".shims\"\n extra_env = {\"PATH\": os.path.join(\"{chroot}\", tools_path, tools.bin_directory)}\n extra_input_digests = {tools_path: tools.digest}\n\n return DockerBinary(\n first_path.path,\n first_path.fingerprint,\n extra_env=extra_env,\n extra_input_digests=extra_input_digests,\n )\n\n\n@rule\nasync def get_docker() -> DockerBinary:\n return await Get(DockerBinary, DockerBinaryRequest())\n\n\ndef rules():\n return collect_rules()\n",
"path": "src/python/pants/backend/docker/util_rules/docker_binary.py"
}
] | diff --git a/src/python/pants/backend/docker/util_rules/docker_binary.py b/src/python/pants/backend/docker/util_rules/docker_binary.py
index 2cc8fa0adbf..aa53df01192 100644
--- a/src/python/pants/backend/docker/util_rules/docker_binary.py
+++ b/src/python/pants/backend/docker/util_rules/docker_binary.py
@@ -150,7 +150,7 @@ async def find_docker(
),
)
tools_path = ".shims"
- extra_env = {"PATH": os.path.join(tools_path, tools.bin_directory)}
+ extra_env = {"PATH": os.path.join("{chroot}", tools_path, tools.bin_directory)}
extra_input_digests = {tools_path: tools.digest}
return DockerBinary(
|
tobymao__sqlglot-2165 | Spark raw String Support, comonly used with regexes
This fails with sqlglot:
```python
import sqlglot
sql = """select regexp_replace('100-200', r'([^0-9])', '')"""
sqlglot.parse_one(sql, read="databricks")
```
**Official Documentation**
https://spark.apache.org/docs/latest/sql-ref-literals.html
| [
{
"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp\nfrom sqlglot.dialects.dialect import rename_func\nfrom sqlglot.dialects.spark2 import Spark2\nfrom sqlglot.helper import seq_get\n\n\ndef _parse_datediff(args: t.List) -> exp.Expression:\n \"\"\"\n Although Spark docs don't mention the \"unit\" argument, Spark3 added support for\n it at some point. Databricks also supports this variant (see below).\n\n For example, in spark-sql (v3.3.1):\n - SELECT DATEDIFF('2020-01-01', '2020-01-05') results in -4\n - SELECT DATEDIFF(day, '2020-01-01', '2020-01-05') results in 4\n\n See also:\n - https://docs.databricks.com/sql/language-manual/functions/datediff3.html\n - https://docs.databricks.com/sql/language-manual/functions/datediff.html\n \"\"\"\n unit = None\n this = seq_get(args, 0)\n expression = seq_get(args, 1)\n\n if len(args) == 3:\n unit = this\n this = args[2]\n\n return exp.DateDiff(\n this=exp.TsOrDsToDate(this=this), expression=exp.TsOrDsToDate(this=expression), unit=unit\n )\n\n\nclass Spark(Spark2):\n class Parser(Spark2.Parser):\n FUNCTIONS = {\n **Spark2.Parser.FUNCTIONS,\n \"ANY_VALUE\": lambda args: exp.AnyValue(\n this=seq_get(args, 0), ignore_nulls=seq_get(args, 1)\n ),\n \"DATEDIFF\": _parse_datediff,\n }\n\n FUNCTION_PARSERS = Spark2.Parser.FUNCTION_PARSERS.copy()\n FUNCTION_PARSERS.pop(\"ANY_VALUE\")\n\n class Generator(Spark2.Generator):\n TYPE_MAPPING = {\n **Spark2.Generator.TYPE_MAPPING,\n exp.DataType.Type.MONEY: \"DECIMAL(15, 4)\",\n exp.DataType.Type.SMALLMONEY: \"DECIMAL(6, 4)\",\n exp.DataType.Type.UNIQUEIDENTIFIER: \"STRING\",\n }\n\n TRANSFORMS = {\n **Spark2.Generator.TRANSFORMS,\n exp.StartsWith: rename_func(\"STARTSWITH\"),\n exp.TimestampAdd: lambda self, e: self.func(\n \"DATEADD\", e.args.get(\"unit\") or \"DAY\", e.expression, e.this\n ),\n }\n TRANSFORMS.pop(exp.AnyValue)\n TRANSFORMS.pop(exp.DateDiff)\n TRANSFORMS.pop(exp.Group)\n\n def anyvalue_sql(self, expression: exp.AnyValue) -> str:\n return self.function_fallback_sql(expression)\n\n def datediff_sql(self, expression: exp.DateDiff) -> str:\n unit = self.sql(expression, \"unit\")\n end = self.sql(expression, \"this\")\n start = self.sql(expression, \"expression\")\n\n if unit:\n return self.func(\"DATEDIFF\", unit, start, end)\n\n return self.func(\"DATEDIFF\", end, start)\n",
"path": "sqlglot/dialects/spark.py"
}
] | [
{
"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp\nfrom sqlglot.dialects.dialect import rename_func\nfrom sqlglot.dialects.spark2 import Spark2\nfrom sqlglot.helper import seq_get\n\n\ndef _parse_datediff(args: t.List) -> exp.Expression:\n \"\"\"\n Although Spark docs don't mention the \"unit\" argument, Spark3 added support for\n it at some point. Databricks also supports this variant (see below).\n\n For example, in spark-sql (v3.3.1):\n - SELECT DATEDIFF('2020-01-01', '2020-01-05') results in -4\n - SELECT DATEDIFF(day, '2020-01-01', '2020-01-05') results in 4\n\n See also:\n - https://docs.databricks.com/sql/language-manual/functions/datediff3.html\n - https://docs.databricks.com/sql/language-manual/functions/datediff.html\n \"\"\"\n unit = None\n this = seq_get(args, 0)\n expression = seq_get(args, 1)\n\n if len(args) == 3:\n unit = this\n this = args[2]\n\n return exp.DateDiff(\n this=exp.TsOrDsToDate(this=this), expression=exp.TsOrDsToDate(this=expression), unit=unit\n )\n\n\nclass Spark(Spark2):\n class Tokenizer(Spark2.Tokenizer):\n RAW_STRINGS = [\n (prefix + q, q)\n for q in t.cast(t.List[str], Spark2.Tokenizer.QUOTES)\n for prefix in (\"r\", \"R\")\n ]\n\n class Parser(Spark2.Parser):\n FUNCTIONS = {\n **Spark2.Parser.FUNCTIONS,\n \"ANY_VALUE\": lambda args: exp.AnyValue(\n this=seq_get(args, 0), ignore_nulls=seq_get(args, 1)\n ),\n \"DATEDIFF\": _parse_datediff,\n }\n\n FUNCTION_PARSERS = Spark2.Parser.FUNCTION_PARSERS.copy()\n FUNCTION_PARSERS.pop(\"ANY_VALUE\")\n\n class Generator(Spark2.Generator):\n TYPE_MAPPING = {\n **Spark2.Generator.TYPE_MAPPING,\n exp.DataType.Type.MONEY: \"DECIMAL(15, 4)\",\n exp.DataType.Type.SMALLMONEY: \"DECIMAL(6, 4)\",\n exp.DataType.Type.UNIQUEIDENTIFIER: \"STRING\",\n }\n\n TRANSFORMS = {\n **Spark2.Generator.TRANSFORMS,\n exp.StartsWith: rename_func(\"STARTSWITH\"),\n exp.TimestampAdd: lambda self, e: self.func(\n \"DATEADD\", e.args.get(\"unit\") or \"DAY\", e.expression, e.this\n ),\n }\n TRANSFORMS.pop(exp.AnyValue)\n TRANSFORMS.pop(exp.DateDiff)\n TRANSFORMS.pop(exp.Group)\n\n def anyvalue_sql(self, expression: exp.AnyValue) -> str:\n return self.function_fallback_sql(expression)\n\n def datediff_sql(self, expression: exp.DateDiff) -> str:\n unit = self.sql(expression, \"unit\")\n end = self.sql(expression, \"this\")\n start = self.sql(expression, \"expression\")\n\n if unit:\n return self.func(\"DATEDIFF\", unit, start, end)\n\n return self.func(\"DATEDIFF\", end, start)\n",
"path": "sqlglot/dialects/spark.py"
}
] | diff --git a/sqlglot/dialects/spark.py b/sqlglot/dialects/spark.py
index a4435f6692..9d4a1abeb2 100644
--- a/sqlglot/dialects/spark.py
+++ b/sqlglot/dialects/spark.py
@@ -35,6 +35,13 @@ def _parse_datediff(args: t.List) -> exp.Expression:
class Spark(Spark2):
+ class Tokenizer(Spark2.Tokenizer):
+ RAW_STRINGS = [
+ (prefix + q, q)
+ for q in t.cast(t.List[str], Spark2.Tokenizer.QUOTES)
+ for prefix in ("r", "R")
+ ]
+
class Parser(Spark2.Parser):
FUNCTIONS = {
**Spark2.Parser.FUNCTIONS,
diff --git a/tests/dialects/test_spark.py b/tests/dialects/test_spark.py
index a892b0f110..becb66a18c 100644
--- a/tests/dialects/test_spark.py
+++ b/tests/dialects/test_spark.py
@@ -239,6 +239,14 @@ def test_spark(self):
self.validate_identity("TRIM(LEADING 'SL' FROM 'SSparkSQLS')")
self.validate_identity("TRIM(TRAILING 'SL' FROM 'SSparkSQLS')")
self.validate_identity("SPLIT(str, pattern, lim)")
+ self.validate_identity(
+ "SELECT REGEXP_REPLACE('100-200', r'([^0-9])', '')",
+ "SELECT REGEXP_REPLACE('100-200', '([^0-9])', '')",
+ )
+ self.validate_identity(
+ "SELECT REGEXP_REPLACE('100-200', R'([^0-9])', '')",
+ "SELECT REGEXP_REPLACE('100-200', '([^0-9])', '')",
+ )
self.validate_identity(
"SELECT STR_TO_MAP('a:1,b:2,c:3')",
"SELECT STR_TO_MAP('a:1,b:2,c:3', ',', ':')",
|
Gallopsled__pwntools-2051 | Double decoding in util.packing._need_text
I noticed that if you send a bytestring to e.g. log.info(), you get an AttributeError:
`AttributeError: 'str' object has no attribute 'decode'`
This is because of the following line:
https://github.com/Gallopsled/pwntools/blob/ef698d4562024802be5cc3e2fa49333c70a96662/pwnlib/util/packing.py#L1051
If the param for _need_text is a bytestring and context.encoding = auto, then it will try to decode the string as seen in https://github.com/Gallopsled/pwntools/blob/ef698d4562024802be5cc3e2fa49333c70a96662/pwnlib/util/packing.py#L1043
If this is successful it will then try to decode it one more time in the last return statement which will cause an exception.
If encoding is auto it will iterate over 3 different encodings and try to decode. So the second decode will then use the last encoding that was tried above. However, if all encodings get a UnicodeDecodeError, it will still try to use that last encoding to decode it again in the return statement.
| [
{
"content": " # -*- coding: utf-8 -*-\nr\"\"\"\nModule for packing and unpacking integers.\n\nSimplifies access to the standard ``struct.pack`` and ``struct.unpack``\nfunctions, and also adds support for packing/unpacking arbitrary-width\nintegers.\n\nThe packers are all context-aware for ``endian`` and ``signed`` arguments,\nthough they can be overridden in the parameters.\n\nExamples:\n\n >>> p8(0)\n b'\\x00'\n >>> p32(0xdeadbeef)\n b'\\xef\\xbe\\xad\\xde'\n >>> p32(0xdeadbeef, endian='big')\n b'\\xde\\xad\\xbe\\xef'\n >>> with context.local(endian='big'): p32(0xdeadbeef)\n b'\\xde\\xad\\xbe\\xef'\n\n Make a frozen packer, which does not change with context.\n\n >>> p=make_packer('all')\n >>> p(0xff)\n b'\\xff'\n >>> p(0x1ff)\n b'\\xff\\x01'\n >>> with context.local(endian='big'): print(repr(p(0x1ff)))\n b'\\xff\\x01'\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport collections\nimport six\nimport struct\nimport sys\nimport warnings\n\nfrom six.moves import range\n\nfrom pwnlib.context import LocalNoarchContext\nfrom pwnlib.context import context\nfrom pwnlib.log import getLogger\n\nfrom pwnlib.util import iters\n\nmod = sys.modules[__name__]\nlog = getLogger(__name__)\n\ndef pack(number, word_size = None, endianness = None, sign = None, **kwargs):\n r\"\"\"pack(number, word_size = None, endianness = None, sign = None, **kwargs) -> str\n\n Packs arbitrary-sized integer.\n\n Word-size, endianness and signedness is done according to context.\n\n `word_size` can be any positive number or the string \"all\". Choosing the\n string \"all\" will output a string long enough to contain all the significant\n bits and thus be decodable by :func:`unpack`.\n\n `word_size` can be any positive number. The output will contain word_size/8\n rounded up number of bytes. If word_size is not a multiple of 8, it will be\n padded with zeroes up to a byte boundary.\n\n Arguments:\n number (int): Number to convert\n word_size (int): Word size of the converted integer or the string 'all' (in bits).\n endianness (str): Endianness of the converted integer (\"little\"/\"big\")\n sign (str): Signedness of the converted integer (False/True)\n kwargs: Anything that can be passed to context.local\n\n Returns:\n The packed number as a string.\n\n Examples:\n >>> pack(0x414243, 24, 'big', True)\n b'ABC'\n >>> pack(0x414243, 24, 'little', True)\n b'CBA'\n >>> pack(0x814243, 24, 'big', False)\n b'\\x81BC'\n >>> pack(0x814243, 24, 'big', True)\n Traceback (most recent call last):\n ...\n ValueError: pack(): number does not fit within word_size\n >>> pack(0x814243, 25, 'big', True)\n b'\\x00\\x81BC'\n >>> pack(-1, 'all', 'little', True)\n b'\\xff'\n >>> pack(-256, 'all', 'big', True)\n b'\\xff\\x00'\n >>> pack(0x0102030405, 'all', 'little', True)\n b'\\x05\\x04\\x03\\x02\\x01'\n >>> pack(-1)\n b'\\xff\\xff\\xff\\xff'\n >>> pack(0x80000000, 'all', 'big', True)\n b'\\x00\\x80\\x00\\x00\\x00'\n\"\"\"\n if sign is None and number < 0:\n sign = True\n\n if word_size != 'all':\n kwargs.setdefault('word_size', word_size)\n\n kwargs.setdefault('endianness', endianness)\n kwargs.setdefault('sign', sign)\n\n with context.local(**kwargs):\n # Lookup in context if not found\n word_size = 'all' if word_size == 'all' else context.word_size\n endianness = context.endianness\n sign = context.sign\n\n if not isinstance(number, six.integer_types):\n raise ValueError(\"pack(): number must be of type (int,long) (got %r)\" % type(number))\n\n if not isinstance(sign, bool):\n raise ValueError(\"pack(): sign must be either True or False (got %r)\" % sign)\n\n if endianness not in ['little', 'big']:\n raise ValueError(\"pack(): endianness must be either 'little' or 'big' (got %r)\" % endianness)\n\n # Verify that word_size make sense\n if word_size == 'all':\n if number == 0:\n word_size = 8\n elif number > 0:\n if sign:\n word_size = (number.bit_length() | 7) + 1\n else:\n word_size = ((number.bit_length() - 1) | 7) + 1\n else:\n if not sign:\n raise ValueError(\"pack(): number does not fit within word_size\")\n word_size = ((number + 1).bit_length() | 7) + 1\n elif not isinstance(word_size, six.integer_types) or word_size <= 0:\n raise ValueError(\"pack(): word_size must be a positive integer or the string 'all'\")\n\n if sign:\n limit = 1 << (word_size-1)\n if not -limit <= number < limit:\n raise ValueError(\"pack(): number does not fit within word_size\")\n else:\n limit = 1 << word_size\n if not 0 <= number < limit:\n raise ValueError(\"pack(): number does not fit within word_size [%i, %r, %r]\" % (0, number, limit))\n\n # Normalize number and size now that we have verified them\n # From now on we can treat positive and negative numbers the same\n number = number & ((1 << word_size) - 1)\n byte_size = (word_size + 7) // 8\n\n out = []\n\n for _ in range(byte_size):\n out.append(_p8lu(number & 0xff))\n number = number >> 8\n\n if endianness == 'little':\n return b''.join(out)\n else:\n return b''.join(reversed(out))\n\n@LocalNoarchContext\ndef unpack(data, word_size = None):\n r\"\"\"unpack(data, word_size = None, endianness = None, sign = None, **kwargs) -> int\n\n Packs arbitrary-sized integer.\n\n Word-size, endianness and signedness is done according to context.\n\n `word_size` can be any positive number or the string \"all\". Choosing the\n string \"all\" is equivalent to ``len(data)*8``.\n\n If `word_size` is not a multiple of 8, then the bits used for padding\n are discarded.\n\n Arguments:\n number (int): String to convert\n word_size (int): Word size of the converted integer or the string \"all\" (in bits).\n endianness (str): Endianness of the converted integer (\"little\"/\"big\")\n sign (str): Signedness of the converted integer (False/True)\n kwargs: Anything that can be passed to context.local\n\n Returns:\n The unpacked number.\n\n Examples:\n >>> hex(unpack(b'\\xaa\\x55', 16, endian='little', sign=False))\n '0x55aa'\n >>> hex(unpack(b'\\xaa\\x55', 16, endian='big', sign=False))\n '0xaa55'\n >>> hex(unpack(b'\\xaa\\x55', 16, endian='big', sign=True))\n '-0x55ab'\n >>> hex(unpack(b'\\xaa\\x55', 15, endian='big', sign=True))\n '0x2a55'\n >>> hex(unpack(b'\\xff\\x02\\x03', 'all', endian='little', sign=True))\n '0x302ff'\n >>> hex(unpack(b'\\xff\\x02\\x03', 'all', endian='big', sign=True))\n '-0xfdfd'\n \"\"\"\n\n # Lookup in context if not found\n word_size = word_size or context.word_size\n endianness = context.endianness\n sign = context.sign\n data = _need_bytes(data, 2)\n\n # Verify that word_size make sense\n if word_size == 'all':\n word_size = len(data) * 8\n elif not isinstance(word_size, six.integer_types) or word_size <= 0:\n raise ValueError(\"unpack(): word_size must be a positive integer or the string 'all'\")\n\n byte_size = (word_size + 7) // 8\n\n if byte_size != len(data):\n raise ValueError(\"unpack(): data must have length %d, since word_size was %d\" % (byte_size, word_size))\n\n number = 0\n\n if endianness == \"little\":\n data = reversed(data)\n data = bytearray(data)\n\n for c in data:\n number = (number << 8) + c\n\n number = number & ((1 << word_size) - 1)\n\n if not sign:\n return int(number)\n\n signbit = number & (1 << (word_size-1))\n return int(number - 2*signbit)\n\n@LocalNoarchContext\ndef unpack_many(data, word_size = None):\n \"\"\"unpack(data, word_size = None, endianness = None, sign = None) -> int list\n\n Splits `data` into groups of ``word_size//8`` bytes and calls :func:`unpack` on each group. Returns a list of the results.\n\n `word_size` must be a multiple of `8` or the string \"all\". In the latter case a singleton list will always be returned.\n\n Args\n number (int): String to convert\n word_size (int): Word size of the converted integers or the string \"all\" (in bits).\n endianness (str): Endianness of the converted integer (\"little\"/\"big\")\n sign (str): Signedness of the converted integer (False/True)\n kwargs: Anything that can be passed to context.local\n\n Returns:\n The unpacked numbers.\n\n Examples:\n >>> list(map(hex, unpack_many(b'\\\\xaa\\\\x55\\\\xcc\\\\x33', 16, endian='little', sign=False)))\n ['0x55aa', '0x33cc']\n >>> list(map(hex, unpack_many(b'\\\\xaa\\\\x55\\\\xcc\\\\x33', 16, endian='big', sign=False)))\n ['0xaa55', '0xcc33']\n >>> list(map(hex, unpack_many(b'\\\\xaa\\\\x55\\\\xcc\\\\x33', 16, endian='big', sign=True)))\n ['-0x55ab', '-0x33cd']\n >>> list(map(hex, unpack_many(b'\\\\xff\\\\x02\\\\x03', 'all', endian='little', sign=True)))\n ['0x302ff']\n >>> list(map(hex, unpack_many(b'\\\\xff\\\\x02\\\\x03', 'all', endian='big', sign=True)))\n ['-0xfdfd']\n \"\"\"\n # Lookup in context if None\n word_size = word_size or context.word_size\n endianness = context.endianness\n sign = context.sign\n\n if word_size == 'all':\n return [unpack(data, word_size)]\n\n # Currently we only group on byte boundaries\n if word_size % 8 != 0:\n raise ValueError(\"unpack_many(): word_size must be a multiple of 8\")\n\n out = []\n n = word_size // 8\n for i in range(0, len(data), n):\n out.append(unpack(data[i:i+n], word_size))\n\n return list(map(int, out))\n\n\n\n#\n# Make individual packers, e.g. _p8lu\n#\nops = ['p','u']\nsizes = {8:'b', 16:'h', 32:'i', 64:'q'}\nends = ['b','l']\nsigns = ['s','u']\n\nreturn_types = {'p': 'bytes', 'u': 'int'}\nop_verbs = {'p': 'pack', 'u': 'unpack'}\narg_doc = {'p': 'number (int): Number to convert',\n 'u': 'data (bytes): Byte string to convert'}\nrv_doc = {'p': 'The packed number as a byte string',\n 'u': 'The unpacked number'}\n\n\ndef make_single(op,size,end,sign):\n name = '_%s%s%s%s' % (op, size, end, sign)\n fmt = sizes[size]\n end = '>' if end == 'b' else '<'\n\n if sign == 'u':\n fmt = fmt.upper()\n fmt = end+fmt\n\n struct_op = getattr(struct.Struct(fmt), op_verbs[op])\n if op == 'u':\n def routine(data, stacklevel=1):\n data = _need_bytes(data, stacklevel)\n return struct_op(data)[0]\n else:\n def routine(data, stacklevel=None):\n return struct_op(data)\n routine.__name__ = routine.__qualname__ = name\n\n return name, routine\n\n\nfor op,size,end,sign in iters.product(ops, sizes, ends, signs):\n name, routine = make_single(op,size,end,sign)\n setattr(mod, name, routine)\n\n\n#\n# Make normal user-oriented packers, e.g. p8\n#\ndef make_multi(op, size):\n\n name = \"%s%s\" % (op,size)\n\n ls = getattr(mod, \"_%sls\" % (name))\n lu = getattr(mod, \"_%slu\" % (name))\n bs = getattr(mod, \"_%sbs\" % (name))\n bu = getattr(mod, \"_%sbu\" % (name))\n\n @LocalNoarchContext\n def routine(number):\n endian = context.endian\n signed = context.signed\n return {(\"little\", True ): ls,\n (\"little\", False): lu,\n (\"big\", True ): bs,\n (\"big\", False): bu}[endian, signed](number, 3)\n\n routine.__name__ = name\n routine.__doc__ = \"\"\"%s%s(number, sign, endian, ...) -> %s\n\n %ss an %s-bit integer\n\n Arguments:\n %s\n endianness (str): Endianness of the converted integer (\"little\"/\"big\")\n sign (str): Signedness of the converted integer (\"unsigned\"/\"signed\")\n kwargs (dict): Arguments passed to context.local(), such as\n ``endian`` or ``signed``.\n\n Returns:\n %s\n \"\"\" % (op, size, return_types[op], op_verbs[op].title(), size, arg_doc[op], rv_doc[op])\n\n return name, routine\n\n\nfor op,size in iters.product(ops, sizes):\n name, routine = make_multi(op,size)\n setattr(mod, name, routine)\n\ndef make_packer(word_size = None, sign = None, **kwargs):\n \"\"\"make_packer(word_size = None, endianness = None, sign = None) -> number → str\n\n Creates a packer by \"freezing\" the given arguments.\n\n Semantically calling ``make_packer(w, e, s)(data)`` is equivalent to calling\n ``pack(data, w, e, s)``. If word_size is one of 8, 16, 32 or 64, it is however\n faster to call this function, since it will then use a specialized version.\n\n Arguments:\n word_size (int): The word size to be baked into the returned packer or the string all (in bits).\n endianness (str): The endianness to be baked into the returned packer. (\"little\"/\"big\")\n sign (str): The signness to be baked into the returned packer. (\"unsigned\"/\"signed\")\n kwargs: Additional context flags, for setting by alias (e.g. ``endian=`` rather than index)\n\n Returns:\n A function, which takes a single argument in the form of a number and returns a string\n of that number in a packed form.\n\n Examples:\n >>> p = make_packer(32, endian='little', sign='unsigned')\n >>> p\n <function _p32lu at 0x...>\n >>> p(42)\n b'*\\\\x00\\\\x00\\\\x00'\n >>> p(-1)\n Traceback (most recent call last):\n ...\n error: integer out of range for 'I' format code\n >>> make_packer(33, endian='little', sign='unsigned')\n <function ...<lambda> at 0x...>\n\"\"\"\n with context.local(sign=sign, **kwargs):\n word_size = word_size or context.word_size\n endianness = context.endianness\n sign = sign if sign is None else context.sign\n\n if word_size in [8, 16, 32, 64]:\n packer = {\n (8, 0, 0): _p8lu,\n (8, 0, 1): _p8ls,\n (8, 1, 0): _p8bu,\n (8, 1, 1): _p8bs,\n (16, 0, 0): _p16lu,\n (16, 0, 1): _p16ls,\n (16, 1, 0): _p16bu,\n (16, 1, 1): _p16bs,\n (32, 0, 0): _p32lu,\n (32, 0, 1): _p32ls,\n (32, 1, 0): _p32bu,\n (32, 1, 1): _p32bs,\n (64, 0, 0): _p64lu,\n (64, 0, 1): _p64ls,\n (64, 1, 0): _p64bu,\n (64, 1, 1): _p64bs,\n }.get((word_size, {'big': 1, 'little': 0}[endianness], sign))\n\n if packer:\n return packer\n\n return lambda number: pack(number, word_size, endianness, sign)\n\n@LocalNoarchContext\ndef make_unpacker(word_size = None, endianness = None, sign = None, **kwargs):\n \"\"\"make_unpacker(word_size = None, endianness = None, sign = None, **kwargs) -> str → number\n\n Creates a unpacker by \"freezing\" the given arguments.\n\n Semantically calling ``make_unpacker(w, e, s)(data)`` is equivalent to calling\n ``unpack(data, w, e, s)``. If word_size is one of 8, 16, 32 or 64, it is however\n faster to call this function, since it will then use a specialized version.\n\n Arguments:\n word_size (int): The word size to be baked into the returned packer (in bits).\n endianness (str): The endianness to be baked into the returned packer. (\"little\"/\"big\")\n sign (str): The signness to be baked into the returned packer. (\"unsigned\"/\"signed\")\n kwargs: Additional context flags, for setting by alias (e.g. ``endian=`` rather than index)\n\n Returns:\n A function, which takes a single argument in the form of a string and returns a number\n of that string in an unpacked form.\n\n Examples:\n >>> u = make_unpacker(32, endian='little', sign='unsigned')\n >>> u\n <function _u32lu at 0x...>\n >>> hex(u(b'/bin'))\n '0x6e69622f'\n >>> u(b'abcde')\n Traceback (most recent call last):\n ...\n error: unpack requires a string argument of length 4\n >>> make_unpacker(33, endian='little', sign='unsigned')\n <function ...<lambda> at 0x...>\n\"\"\"\n word_size = word_size or context.word_size\n endianness = context.endianness\n sign = context.sign\n\n if word_size in [8, 16, 32, 64]:\n endianness = 1 if endianness == 'big' else 0\n\n return {\n (8, 0, 0): _u8lu,\n (8, 0, 1): _u8ls,\n (8, 1, 0): _u8bu,\n (8, 1, 1): _u8bs,\n (16, 0, 0): _u16lu,\n (16, 0, 1): _u16ls,\n (16, 1, 0): _u16bu,\n (16, 1, 1): _u16bs,\n (32, 0, 0): _u32lu,\n (32, 0, 1): _u32ls,\n (32, 1, 0): _u32bu,\n (32, 1, 1): _u32bs,\n (64, 0, 0): _u64lu,\n (64, 0, 1): _u64ls,\n (64, 1, 0): _u64bu,\n (64, 1, 1): _u64bs,\n }[word_size, endianness, sign]\n else:\n return lambda number: unpack(number, word_size, endianness, sign)\n\ndef _fit(pieces, preprocessor, packer, filler, stacklevel=1):\n\n # Pulls bytes from `filler` and adds them to `pad` until it ends in `key`.\n # Returns the index of `key` in `pad`.\n pad = bytearray()\n def fill(key):\n key = bytearray(key)\n offset = pad.find(key)\n while offset == -1:\n pad.append(next(filler))\n offset = pad.find(key, -len(key))\n return offset\n\n # Key conversion:\n # - convert str/unicode keys to offsets\n # - convert large int (no null-bytes in a machine word) keys to offsets\n pieces_ = dict()\n large_key = 2**(context.word_size-8)\n for k, v in pieces.items():\n if isinstance(k, six.integer_types):\n if k >= large_key:\n k = fill(pack(k))\n elif isinstance(k, (six.text_type, bytearray, bytes)):\n k = fill(_need_bytes(k, stacklevel, 0x80))\n else:\n raise TypeError(\"flat(): offset must be of type int or str, but got '%s'\" % type(k))\n if k in pieces_:\n raise ValueError(\"flag(): multiple values at offset %d\" % k)\n pieces_[k] = v\n pieces = pieces_\n\n # We must \"roll back\" `filler` so each recursive call to `_flat` gets it in\n # the right position\n filler = iters.chain(pad, filler)\n\n # Build output\n out = b''\n\n # Negative indices need to be removed and then re-submitted\n negative = {k:v for k,v in pieces.items() if isinstance(k, int) and k<0}\n\n for k in negative:\n del pieces[k]\n\n # Positive output\n for k, v in sorted(pieces.items()):\n if k < len(out):\n raise ValueError(\"flat(): data at offset %d overlaps with previous data which ends at offset %d\" % (k, len(out)))\n\n # Fill up to offset\n while len(out) < k:\n out += p8(next(filler))\n\n # Recursively flatten data\n out += _flat([v], preprocessor, packer, filler, stacklevel + 1)\n\n # Now do negative indices\n out_negative = b''\n if negative:\n most_negative = min(negative.keys())\n for k, v in sorted(negative.items()):\n k += -most_negative\n\n if k < len(out_negative):\n raise ValueError(\"flat(): data at offset %d overlaps with previous data which ends at offset %d\" % (k, len(out)))\n\n # Fill up to offset\n while len(out_negative) < k:\n out_negative += p8(next(filler))\n\n # Recursively flatten data\n out_negative += _flat([v], preprocessor, packer, filler, stacklevel + 1)\n\n return filler, out_negative + out\n\ndef _flat(args, preprocessor, packer, filler, stacklevel=1):\n out = []\n for arg in args:\n\n if not isinstance(arg, (list, tuple, dict)):\n arg_ = preprocessor(arg)\n if arg_ is not None:\n arg = arg_\n\n if hasattr(arg, '__flat__'):\n val = arg.__flat__()\n elif isinstance(arg, (list, tuple)):\n val = _flat(arg, preprocessor, packer, filler, stacklevel + 1)\n elif isinstance(arg, dict):\n filler, val = _fit(arg, preprocessor, packer, filler, stacklevel + 1)\n elif isinstance(arg, bytes):\n val = arg\n elif isinstance(arg, six.text_type):\n val = _need_bytes(arg, stacklevel + 1)\n elif isinstance(arg, six.integer_types):\n val = packer(arg)\n elif isinstance(arg, bytearray):\n val = bytes(arg)\n else:\n raise ValueError(\"flat(): Flat does not support values of type %s\" % type(arg))\n\n out.append(val)\n\n # Advance `filler` for \"non-recursive\" values\n if not isinstance(arg, (list, tuple, dict)):\n for _ in range(len(val)):\n next(filler)\n\n return b''.join(out)\n\n@LocalNoarchContext\ndef flat(*args, **kwargs):\n r\"\"\"flat(\\*args, preprocessor = None, length = None, filler = de_bruijn(),\n word_size = None, endianness = None, sign = None) -> str\n\n Flattens the arguments into a string.\n\n This function takes an arbitrary number of arbitrarily nested lists, tuples\n and dictionaries. It will then find every string and number inside those\n and flatten them out. Strings are inserted directly while numbers are\n packed using the :func:`pack` function. Unicode strings are UTF-8 encoded.\n\n Dictionary keys give offsets at which to place the corresponding values\n (which are recursively flattened). Offsets are relative to where the\n flattened dictionary occurs in the output (i.e. ``{0: 'foo'}`` is equivalent\n to ``'foo'``). Offsets can be integers, unicode strings or regular strings.\n Integer offsets >= ``2**(word_size-8)`` are converted to a string using\n :func:`pack`. Unicode strings are UTF-8 encoded. After these conversions\n offsets are either integers or strings. In the latter case, the offset will\n be the lowest index at which the string occurs in `filler`. See examples\n below.\n\n Space between pieces of data is filled out using the iterable `filler`. The\n `n`'th byte in the output will be byte at index ``n % len(iterable)`` byte\n in `filler` if it has finite length or the byte at index `n` otherwise.\n\n If `length` is given, the output will be padded with bytes from `filler` to\n be this size. If the output is longer than `length`, a :py:exc:`ValueError`\n exception is raised.\n\n The three kwargs `word_size`, `endianness` and `sign` will default to using\n values in :mod:`pwnlib.context` if not specified as an argument.\n\n Arguments:\n args: Values to flatten\n preprocessor (function): Gets called on every element to optionally\n transform the element before flattening. If :const:`None` is\n returned, then the original value is used.\n length: The length of the output.\n filler: Iterable to use for padding.\n word_size (int): Word size of the converted integer.\n endianness (str): Endianness of the converted integer (\"little\"/\"big\").\n sign (str): Signedness of the converted integer (False/True)\n\n Examples:\n\n (Test setup, please ignore)\n \n >>> context.clear()\n\n Basic usage of :meth:`flat` works similar to the pack() routines.\n\n >>> flat(4)\n b'\\x04\\x00\\x00\\x00'\n\n :meth:`flat` works with strings, bytes, lists, and dictionaries.\n\n >>> flat(b'X')\n b'X'\n >>> flat([1,2,3])\n b'\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00'\n >>> flat({4:b'X'})\n b'aaaaX'\n\n :meth:`.flat` flattens all of the values provided, and allows nested lists\n and dictionaries.\n\n >>> flat([{4:b'X'}] * 2)\n b'aaaaXaaacX'\n >>> flat([[[[[[[[[1]]]], 2]]]]])\n b'\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00'\n\n You can also provide additional arguments like endianness, word-size, and\n whether the values are treated as signed or not.\n\n >>> flat(1, b\"test\", [[[b\"AB\"]*2]*3], endianness = 'little', word_size = 16, sign = False)\n b'\\x01\\x00testABABABABABAB'\n\n A preprocessor function can be provided in order to modify the values in-flight.\n This example converts increments each value by 1, then converts to a byte string.\n\n >>> flat([1, [2, 3]], preprocessor = lambda x: str(x+1).encode())\n b'234'\n\n Using dictionaries is a fast way to get specific values at specific offsets,\n without having to do ``data += \"foo\"`` repeatedly.\n\n >>> flat({12: 0x41414141,\n ... 24: b'Hello',\n ... })\n b'aaaabaaacaaaAAAAeaaafaaaHello'\n\n Dictionary usage permits directly using values derived from :func:`.cyclic`.\n See :func:`.cyclic`, :function:`pwnlib.context.context.cyclic_alphabet`, and :data:`.context.cyclic_size`\n for more options. \n\n The cyclic pattern can be provided as either the text or hexadecimal offset.\n\n >>> flat({ 0x61616162: b'X'})\n b'aaaaX'\n >>> flat({'baaa': b'X'})\n b'aaaaX'\n\n Fields do not have to be in linear order, and can be freely mixed.\n This also works with cyclic offsets.\n\n >>> flat({2: b'A', 0:b'B'})\n b'BaA'\n >>> flat({0x61616161: b'x', 0x61616162: b'y'})\n b'xaaay'\n >>> flat({0x61616162: b'y', 0x61616161: b'x'})\n b'xaaay'\n\n Fields do not have to be in order, and can be freely mixed.\n\n >>> flat({'caaa': b'XXXX', 16: b'\\x41', 20: 0xdeadbeef})\n b'aaaabaaaXXXXdaaaAaaa\\xef\\xbe\\xad\\xde'\n >>> flat({ 8: [0x41414141, 0x42424242], 20: b'CCCC'})\n b'aaaabaaaAAAABBBBeaaaCCCC'\n >>> fit({\n ... 0x61616161: b'a',\n ... 1: b'b',\n ... 0x61616161+2: b'c',\n ... 3: b'd',\n ... })\n b'abadbaaac'\n\n By default, gaps in the data are filled in with the :meth:`.cyclic` pattern.\n You can customize this by providing an iterable or method for the ``filler``\n argument.\n\n >>> flat({12: b'XXXX'}, filler = b'_', length = 20)\n b'____________XXXX____'\n >>> flat({12: b'XXXX'}, filler = b'AB', length = 20)\n b'ABABABABABABXXXXABAB'\n\n Nested dictionaries also work as expected.\n\n >>> flat({4: {0: b'X', 4: b'Y'}})\n b'aaaaXaaaY'\n >>> fit({4: {4: b'XXXX'}})\n b'aaaabaaaXXXX'\n\n Negative indices are also supported, though this only works for integer\n keys.\n \n >>> flat({-4: b'x', -1: b'A', 0: b'0', 4: b'y'})\n b'xaaA0aaay'\n \"\"\"\n # HACK: To avoid circular imports we need to delay the import of `cyclic`\n from pwnlib.util import cyclic\n\n preprocessor = kwargs.pop('preprocessor', lambda x: None)\n filler = kwargs.pop('filler', cyclic.de_bruijn())\n length = kwargs.pop('length', None)\n stacklevel = kwargs.pop('stacklevel', 0)\n\n if isinstance(filler, (str, six.text_type)):\n filler = bytearray(_need_bytes(filler))\n\n if kwargs != {}:\n raise TypeError(\"flat() does not support argument %r\" % kwargs.popitem()[0])\n\n filler = iters.cycle(filler)\n out = _flat(args, preprocessor, make_packer(), filler, stacklevel + 2)\n\n if length:\n if len(out) > length:\n raise ValueError(\"flat(): Arguments does not fit within `length` (= %d) bytes\" % length)\n out += b''.join(p8(next(filler)) for _ in range(length - len(out)))\n\n return out\n\ndef fit(*args, **kwargs):\n \"\"\"Legacy alias for :func:`flat`\"\"\"\n kwargs['stacklevel'] = kwargs.get('stacklevel', 0) + 1\n return flat(*args, **kwargs)\n\n\"\"\"\n Generates a string from a dictionary mapping offsets to data to place at\n that offset.\n\n For each key-value pair in `pieces`, the key is either an offset or a byte\n sequence. In the latter case, the offset will be the lowest index at which\n the sequence occurs in `filler`. See examples below.\n\n Each piece of data is passed to :meth:`flat` along with the keyword\n arguments `word_size`, `endianness` and `sign`.\n\n Space between pieces of data is filled out using the iterable `filler`. The\n `n`'th byte in the output will be byte at index ``n % len(iterable)`` byte\n in `filler` if it has finite length or the byte at index `n` otherwise.\n\n If `length` is given, the output will padded with bytes from `filler` to be\n this size. If the output is longer than `length`, a :py:exc:`ValueError`\n exception is raised.\n\n If entries in `pieces` overlap, a :py:exc:`ValueError` exception is\n raised.\n\n Arguments:\n pieces: Offsets and values to output.\n length: The length of the output.\n filler: Iterable to use for padding.\n preprocessor (function): Gets called on every element to optionally\n transform the element before flattening. If :const:`None` is\n returned, then the original value is used.\n word_size (int): Word size of the converted integer (in bits).\n endianness (str): Endianness of the converted integer (\"little\"/\"big\").\n sign (str): Signedness of the converted integer (False/True)\n\n Examples:\n\n \"\"\"\n\ndef signed(integer):\n return unpack(pack(integer), signed=True)\n\ndef unsigned(integer):\n return unpack(pack(integer))\n\ndef dd(dst, src, count = 0, skip = 0, seek = 0, truncate = False):\n \"\"\"dd(dst, src, count = 0, skip = 0, seek = 0, truncate = False) -> dst\n\n Inspired by the command line tool ``dd``, this function copies `count` byte\n values from offset `seek` in `src` to offset `skip` in `dst`. If `count` is\n 0, all of ``src[seek:]`` is copied.\n\n If `dst` is a mutable type it will be updated. Otherwise a new instance of\n the same type will be created. In either case the result is returned.\n\n `src` can be an iterable of characters or integers, a unicode string or a\n file object. If it is an iterable of integers, each integer must be in the\n range [0;255]. If it is a unicode string, its UTF-8 encoding will be used.\n\n The seek offset of file objects will be preserved.\n\n Arguments:\n dst: Supported types are :class:`file`, :class:`list`, :class:`tuple`,\n :class:`str`, :class:`bytearray` and :class:`unicode`.\n src: An iterable of byte values (characters or integers), a unicode\n string or a file object.\n count (int): How many bytes to copy. If `count` is 0 or larger than\n ``len(src[seek:])``, all bytes until the end of `src` are\n copied.\n skip (int): Offset in `dst` to copy to.\n seek (int): Offset in `src` to copy from.\n truncate (bool): If :const:`True`, `dst` is truncated at the last copied\n byte.\n\n Returns:\n A modified version of `dst`. If `dst` is a mutable type it will be\n modified in-place.\n\n Examples:\n >>> dd(tuple('Hello!'), b'?', skip = 5)\n ('H', 'e', 'l', 'l', 'o', b'?')\n >>> dd(list('Hello!'), (63,), skip = 5)\n ['H', 'e', 'l', 'l', 'o', b'?']\n >>> _ = open('/tmp/foo', 'w').write('A' * 10)\n >>> dd(open('/tmp/foo'), open('/dev/zero'), skip = 3, count = 4).read()\n 'AAA\\\\x00\\\\x00\\\\x00\\\\x00AAA'\n >>> _ = open('/tmp/foo', 'w').write('A' * 10)\n >>> dd(open('/tmp/foo'), open('/dev/zero'), skip = 3, count = 4, truncate = True).read()\n 'AAA\\\\x00\\\\x00\\\\x00\\\\x00'\n \"\"\"\n\n # Re-open file objects to make sure we have the mode right\n if hasattr(src, 'name'):\n src = open(src.name, 'rb')\n if hasattr(dst, 'name'):\n real_dst = dst\n dst = open(dst.name, 'rb+')\n\n # Special case: both `src` and `dst` are files, so we don't need to hold\n # everything in memory\n if hasattr(src, 'seek') and hasattr(dst, 'seek'):\n src.seek(seek)\n dst.seek(skip)\n n = 0\n if count:\n while n < count:\n s = src.read(min(count - n, 0x1000))\n if not s:\n break\n n += len(s)\n dst.write(s)\n else:\n while True:\n s = src.read(0x1000)\n if not s:\n break\n n += len(s)\n dst.write(s)\n if truncate:\n dst.truncate(skip + n)\n src.close()\n dst.close()\n return real_dst\n\n # Otherwise get `src` in canonical form, i.e. a string of at most `count`\n # bytes\n if isinstance(src, six.text_type):\n if count:\n # The only way to know where the `seek`th byte is, is to decode, but\n # we only need to decode up to the first `seek + count` code points\n src = src[:seek + count].encode('utf8')\n # The code points may result in more that `seek + count` bytes\n src = src[seek : seek + count]\n else:\n src = src.encode('utf8')[seek:]\n\n elif hasattr(src, 'seek'):\n src.seek(seek)\n src_ = b''\n if count:\n while len(src_) < count:\n s = src.read(count - len(src_))\n if not s:\n break\n src_ += s\n else:\n while True:\n s = src.read()\n if not s:\n break\n src_ += s\n src.close()\n src = src_\n\n elif isinstance(src, bytes):\n if count:\n src = src[seek : seek + count]\n else:\n src = src[seek:]\n\n elif hasattr(src, '__iter__'):\n src = src[seek:]\n src_ = b''\n for i, b in enumerate(src, seek):\n if count and i > count + seek:\n break\n if isinstance(b, bytes):\n src_ += b\n elif isinstance(b, six.integer_types):\n if b > 255 or b < 0:\n raise ValueError(\"dd(): Source value %d at index %d is not in range [0;255]\" % (b, i))\n src_ += _p8lu(b)\n else:\n raise TypeError(\"dd(): Unsupported `src` element type: %r\" % type(b))\n src = src_\n\n else:\n raise TypeError(\"dd(): Unsupported `src` type: %r\" % type(src))\n\n # If truncate, then where?\n if truncate:\n truncate = skip + len(src)\n\n # UTF-8 encode unicode `dst`\n if isinstance(dst, six.text_type):\n dst = dst.encode('utf8')\n utf8 = True\n else:\n utf8 = False\n\n # Match on the type of `dst`\n if hasattr(dst, 'seek'):\n dst.seek(skip)\n dst.write(src)\n if truncate:\n dst.truncate(truncate)\n dst.close()\n dst = real_dst\n\n elif isinstance(dst, (list, bytearray)):\n dst[skip : skip + len(src)] = list(map(p8, bytearray(src)))\n if truncate:\n while len(dst) > truncate:\n dst.pop()\n\n elif isinstance(dst, tuple):\n tail = dst[skip + len(src):]\n dst = dst[:skip] + tuple(map(p8, bytearray(src)))\n if not truncate:\n dst = dst + tail\n\n elif isinstance(dst, bytes):\n tail = dst[skip + len(src):]\n dst = dst[:skip] + src\n if not truncate:\n dst = dst + tail\n\n else:\n raise TypeError(\"dd(): Unsupported `dst` type: %r\" % type(dst))\n\n if utf8:\n dst = dst.decode('utf8')\n\n return dst\n\ndef _need_bytes(s, level=1, min_wrong=0):\n if isinstance(s, (bytes, bytearray)):\n return s # already bytes\n\n encoding = context.encoding\n errors = 'strict'\n worst = -1\n if encoding == 'auto':\n worst = s and max(map(ord, s)) or 0\n if worst > 255:\n encoding = 'UTF-8'\n errors = 'surrogateescape'\n elif worst > 127:\n encoding = 'ISO-8859-1'\n else:\n encoding = 'ASCII'\n\n if worst >= min_wrong:\n warnings.warn(\"Text is not bytes; assuming {}, no guarantees. See https://docs.pwntools.com/#bytes\"\n .format(encoding), BytesWarning, level + 2)\n return s.encode(encoding, errors)\n\ndef _need_text(s, level=1):\n if isinstance(s, (str, six.text_type)):\n return s # already text\n\n encoding = context.encoding\n errors = 'strict'\n if encoding == 'auto':\n for encoding in 'ASCII', 'UTF-8', 'ISO-8859-1':\n try:\n s = s.decode(encoding)\n except UnicodeDecodeError:\n pass\n else:\n break\n\n warnings.warn(\"Bytes is not text; assuming {}, no guarantees. See https://docs.pwntools.com/#bytes\"\n .format(encoding), BytesWarning, level + 2)\n return s.decode(encoding, errors)\n\ndef _encode(s):\n if isinstance(s, (bytes, bytearray)):\n return s # already bytes\n\n if context.encoding == 'auto':\n try:\n return s.encode('latin1')\n except UnicodeEncodeError:\n return s.encode('utf-8', 'surrogateescape')\n return s.encode(context.encoding)\n\ndef _decode(b):\n if context.encoding == 'auto':\n try:\n return b.decode('utf-8')\n except UnicodeDecodeError:\n return b.decode('latin1')\n except AttributeError:\n return b\n return b.decode(context.encoding)\n\ndel op, size, end, sign\ndel name, routine, mod\n",
"path": "pwnlib/util/packing.py"
}
] | [
{
"content": " # -*- coding: utf-8 -*-\nr\"\"\"\nModule for packing and unpacking integers.\n\nSimplifies access to the standard ``struct.pack`` and ``struct.unpack``\nfunctions, and also adds support for packing/unpacking arbitrary-width\nintegers.\n\nThe packers are all context-aware for ``endian`` and ``signed`` arguments,\nthough they can be overridden in the parameters.\n\nExamples:\n\n >>> p8(0)\n b'\\x00'\n >>> p32(0xdeadbeef)\n b'\\xef\\xbe\\xad\\xde'\n >>> p32(0xdeadbeef, endian='big')\n b'\\xde\\xad\\xbe\\xef'\n >>> with context.local(endian='big'): p32(0xdeadbeef)\n b'\\xde\\xad\\xbe\\xef'\n\n Make a frozen packer, which does not change with context.\n\n >>> p=make_packer('all')\n >>> p(0xff)\n b'\\xff'\n >>> p(0x1ff)\n b'\\xff\\x01'\n >>> with context.local(endian='big'): print(repr(p(0x1ff)))\n b'\\xff\\x01'\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport collections\nimport six\nimport struct\nimport sys\nimport warnings\n\nfrom six.moves import range\n\nfrom pwnlib.context import LocalNoarchContext\nfrom pwnlib.context import context\nfrom pwnlib.log import getLogger\n\nfrom pwnlib.util import iters\n\nmod = sys.modules[__name__]\nlog = getLogger(__name__)\n\ndef pack(number, word_size = None, endianness = None, sign = None, **kwargs):\n r\"\"\"pack(number, word_size = None, endianness = None, sign = None, **kwargs) -> str\n\n Packs arbitrary-sized integer.\n\n Word-size, endianness and signedness is done according to context.\n\n `word_size` can be any positive number or the string \"all\". Choosing the\n string \"all\" will output a string long enough to contain all the significant\n bits and thus be decodable by :func:`unpack`.\n\n `word_size` can be any positive number. The output will contain word_size/8\n rounded up number of bytes. If word_size is not a multiple of 8, it will be\n padded with zeroes up to a byte boundary.\n\n Arguments:\n number (int): Number to convert\n word_size (int): Word size of the converted integer or the string 'all' (in bits).\n endianness (str): Endianness of the converted integer (\"little\"/\"big\")\n sign (str): Signedness of the converted integer (False/True)\n kwargs: Anything that can be passed to context.local\n\n Returns:\n The packed number as a string.\n\n Examples:\n >>> pack(0x414243, 24, 'big', True)\n b'ABC'\n >>> pack(0x414243, 24, 'little', True)\n b'CBA'\n >>> pack(0x814243, 24, 'big', False)\n b'\\x81BC'\n >>> pack(0x814243, 24, 'big', True)\n Traceback (most recent call last):\n ...\n ValueError: pack(): number does not fit within word_size\n >>> pack(0x814243, 25, 'big', True)\n b'\\x00\\x81BC'\n >>> pack(-1, 'all', 'little', True)\n b'\\xff'\n >>> pack(-256, 'all', 'big', True)\n b'\\xff\\x00'\n >>> pack(0x0102030405, 'all', 'little', True)\n b'\\x05\\x04\\x03\\x02\\x01'\n >>> pack(-1)\n b'\\xff\\xff\\xff\\xff'\n >>> pack(0x80000000, 'all', 'big', True)\n b'\\x00\\x80\\x00\\x00\\x00'\n\"\"\"\n if sign is None and number < 0:\n sign = True\n\n if word_size != 'all':\n kwargs.setdefault('word_size', word_size)\n\n kwargs.setdefault('endianness', endianness)\n kwargs.setdefault('sign', sign)\n\n with context.local(**kwargs):\n # Lookup in context if not found\n word_size = 'all' if word_size == 'all' else context.word_size\n endianness = context.endianness\n sign = context.sign\n\n if not isinstance(number, six.integer_types):\n raise ValueError(\"pack(): number must be of type (int,long) (got %r)\" % type(number))\n\n if not isinstance(sign, bool):\n raise ValueError(\"pack(): sign must be either True or False (got %r)\" % sign)\n\n if endianness not in ['little', 'big']:\n raise ValueError(\"pack(): endianness must be either 'little' or 'big' (got %r)\" % endianness)\n\n # Verify that word_size make sense\n if word_size == 'all':\n if number == 0:\n word_size = 8\n elif number > 0:\n if sign:\n word_size = (number.bit_length() | 7) + 1\n else:\n word_size = ((number.bit_length() - 1) | 7) + 1\n else:\n if not sign:\n raise ValueError(\"pack(): number does not fit within word_size\")\n word_size = ((number + 1).bit_length() | 7) + 1\n elif not isinstance(word_size, six.integer_types) or word_size <= 0:\n raise ValueError(\"pack(): word_size must be a positive integer or the string 'all'\")\n\n if sign:\n limit = 1 << (word_size-1)\n if not -limit <= number < limit:\n raise ValueError(\"pack(): number does not fit within word_size\")\n else:\n limit = 1 << word_size\n if not 0 <= number < limit:\n raise ValueError(\"pack(): number does not fit within word_size [%i, %r, %r]\" % (0, number, limit))\n\n # Normalize number and size now that we have verified them\n # From now on we can treat positive and negative numbers the same\n number = number & ((1 << word_size) - 1)\n byte_size = (word_size + 7) // 8\n\n out = []\n\n for _ in range(byte_size):\n out.append(_p8lu(number & 0xff))\n number = number >> 8\n\n if endianness == 'little':\n return b''.join(out)\n else:\n return b''.join(reversed(out))\n\n@LocalNoarchContext\ndef unpack(data, word_size = None):\n r\"\"\"unpack(data, word_size = None, endianness = None, sign = None, **kwargs) -> int\n\n Packs arbitrary-sized integer.\n\n Word-size, endianness and signedness is done according to context.\n\n `word_size` can be any positive number or the string \"all\". Choosing the\n string \"all\" is equivalent to ``len(data)*8``.\n\n If `word_size` is not a multiple of 8, then the bits used for padding\n are discarded.\n\n Arguments:\n number (int): String to convert\n word_size (int): Word size of the converted integer or the string \"all\" (in bits).\n endianness (str): Endianness of the converted integer (\"little\"/\"big\")\n sign (str): Signedness of the converted integer (False/True)\n kwargs: Anything that can be passed to context.local\n\n Returns:\n The unpacked number.\n\n Examples:\n >>> hex(unpack(b'\\xaa\\x55', 16, endian='little', sign=False))\n '0x55aa'\n >>> hex(unpack(b'\\xaa\\x55', 16, endian='big', sign=False))\n '0xaa55'\n >>> hex(unpack(b'\\xaa\\x55', 16, endian='big', sign=True))\n '-0x55ab'\n >>> hex(unpack(b'\\xaa\\x55', 15, endian='big', sign=True))\n '0x2a55'\n >>> hex(unpack(b'\\xff\\x02\\x03', 'all', endian='little', sign=True))\n '0x302ff'\n >>> hex(unpack(b'\\xff\\x02\\x03', 'all', endian='big', sign=True))\n '-0xfdfd'\n \"\"\"\n\n # Lookup in context if not found\n word_size = word_size or context.word_size\n endianness = context.endianness\n sign = context.sign\n data = _need_bytes(data, 2)\n\n # Verify that word_size make sense\n if word_size == 'all':\n word_size = len(data) * 8\n elif not isinstance(word_size, six.integer_types) or word_size <= 0:\n raise ValueError(\"unpack(): word_size must be a positive integer or the string 'all'\")\n\n byte_size = (word_size + 7) // 8\n\n if byte_size != len(data):\n raise ValueError(\"unpack(): data must have length %d, since word_size was %d\" % (byte_size, word_size))\n\n number = 0\n\n if endianness == \"little\":\n data = reversed(data)\n data = bytearray(data)\n\n for c in data:\n number = (number << 8) + c\n\n number = number & ((1 << word_size) - 1)\n\n if not sign:\n return int(number)\n\n signbit = number & (1 << (word_size-1))\n return int(number - 2*signbit)\n\n@LocalNoarchContext\ndef unpack_many(data, word_size = None):\n \"\"\"unpack(data, word_size = None, endianness = None, sign = None) -> int list\n\n Splits `data` into groups of ``word_size//8`` bytes and calls :func:`unpack` on each group. Returns a list of the results.\n\n `word_size` must be a multiple of `8` or the string \"all\". In the latter case a singleton list will always be returned.\n\n Args\n number (int): String to convert\n word_size (int): Word size of the converted integers or the string \"all\" (in bits).\n endianness (str): Endianness of the converted integer (\"little\"/\"big\")\n sign (str): Signedness of the converted integer (False/True)\n kwargs: Anything that can be passed to context.local\n\n Returns:\n The unpacked numbers.\n\n Examples:\n >>> list(map(hex, unpack_many(b'\\\\xaa\\\\x55\\\\xcc\\\\x33', 16, endian='little', sign=False)))\n ['0x55aa', '0x33cc']\n >>> list(map(hex, unpack_many(b'\\\\xaa\\\\x55\\\\xcc\\\\x33', 16, endian='big', sign=False)))\n ['0xaa55', '0xcc33']\n >>> list(map(hex, unpack_many(b'\\\\xaa\\\\x55\\\\xcc\\\\x33', 16, endian='big', sign=True)))\n ['-0x55ab', '-0x33cd']\n >>> list(map(hex, unpack_many(b'\\\\xff\\\\x02\\\\x03', 'all', endian='little', sign=True)))\n ['0x302ff']\n >>> list(map(hex, unpack_many(b'\\\\xff\\\\x02\\\\x03', 'all', endian='big', sign=True)))\n ['-0xfdfd']\n \"\"\"\n # Lookup in context if None\n word_size = word_size or context.word_size\n endianness = context.endianness\n sign = context.sign\n\n if word_size == 'all':\n return [unpack(data, word_size)]\n\n # Currently we only group on byte boundaries\n if word_size % 8 != 0:\n raise ValueError(\"unpack_many(): word_size must be a multiple of 8\")\n\n out = []\n n = word_size // 8\n for i in range(0, len(data), n):\n out.append(unpack(data[i:i+n], word_size))\n\n return list(map(int, out))\n\n\n\n#\n# Make individual packers, e.g. _p8lu\n#\nops = ['p','u']\nsizes = {8:'b', 16:'h', 32:'i', 64:'q'}\nends = ['b','l']\nsigns = ['s','u']\n\nreturn_types = {'p': 'bytes', 'u': 'int'}\nop_verbs = {'p': 'pack', 'u': 'unpack'}\narg_doc = {'p': 'number (int): Number to convert',\n 'u': 'data (bytes): Byte string to convert'}\nrv_doc = {'p': 'The packed number as a byte string',\n 'u': 'The unpacked number'}\n\n\ndef make_single(op,size,end,sign):\n name = '_%s%s%s%s' % (op, size, end, sign)\n fmt = sizes[size]\n end = '>' if end == 'b' else '<'\n\n if sign == 'u':\n fmt = fmt.upper()\n fmt = end+fmt\n\n struct_op = getattr(struct.Struct(fmt), op_verbs[op])\n if op == 'u':\n def routine(data, stacklevel=1):\n data = _need_bytes(data, stacklevel)\n return struct_op(data)[0]\n else:\n def routine(data, stacklevel=None):\n return struct_op(data)\n routine.__name__ = routine.__qualname__ = name\n\n return name, routine\n\n\nfor op,size,end,sign in iters.product(ops, sizes, ends, signs):\n name, routine = make_single(op,size,end,sign)\n setattr(mod, name, routine)\n\n\n#\n# Make normal user-oriented packers, e.g. p8\n#\ndef make_multi(op, size):\n\n name = \"%s%s\" % (op,size)\n\n ls = getattr(mod, \"_%sls\" % (name))\n lu = getattr(mod, \"_%slu\" % (name))\n bs = getattr(mod, \"_%sbs\" % (name))\n bu = getattr(mod, \"_%sbu\" % (name))\n\n @LocalNoarchContext\n def routine(number):\n endian = context.endian\n signed = context.signed\n return {(\"little\", True ): ls,\n (\"little\", False): lu,\n (\"big\", True ): bs,\n (\"big\", False): bu}[endian, signed](number, 3)\n\n routine.__name__ = name\n routine.__doc__ = \"\"\"%s%s(number, sign, endian, ...) -> %s\n\n %ss an %s-bit integer\n\n Arguments:\n %s\n endianness (str): Endianness of the converted integer (\"little\"/\"big\")\n sign (str): Signedness of the converted integer (\"unsigned\"/\"signed\")\n kwargs (dict): Arguments passed to context.local(), such as\n ``endian`` or ``signed``.\n\n Returns:\n %s\n \"\"\" % (op, size, return_types[op], op_verbs[op].title(), size, arg_doc[op], rv_doc[op])\n\n return name, routine\n\n\nfor op,size in iters.product(ops, sizes):\n name, routine = make_multi(op,size)\n setattr(mod, name, routine)\n\ndef make_packer(word_size = None, sign = None, **kwargs):\n \"\"\"make_packer(word_size = None, endianness = None, sign = None) -> number → str\n\n Creates a packer by \"freezing\" the given arguments.\n\n Semantically calling ``make_packer(w, e, s)(data)`` is equivalent to calling\n ``pack(data, w, e, s)``. If word_size is one of 8, 16, 32 or 64, it is however\n faster to call this function, since it will then use a specialized version.\n\n Arguments:\n word_size (int): The word size to be baked into the returned packer or the string all (in bits).\n endianness (str): The endianness to be baked into the returned packer. (\"little\"/\"big\")\n sign (str): The signness to be baked into the returned packer. (\"unsigned\"/\"signed\")\n kwargs: Additional context flags, for setting by alias (e.g. ``endian=`` rather than index)\n\n Returns:\n A function, which takes a single argument in the form of a number and returns a string\n of that number in a packed form.\n\n Examples:\n >>> p = make_packer(32, endian='little', sign='unsigned')\n >>> p\n <function _p32lu at 0x...>\n >>> p(42)\n b'*\\\\x00\\\\x00\\\\x00'\n >>> p(-1)\n Traceback (most recent call last):\n ...\n error: integer out of range for 'I' format code\n >>> make_packer(33, endian='little', sign='unsigned')\n <function ...<lambda> at 0x...>\n\"\"\"\n with context.local(sign=sign, **kwargs):\n word_size = word_size or context.word_size\n endianness = context.endianness\n sign = sign if sign is None else context.sign\n\n if word_size in [8, 16, 32, 64]:\n packer = {\n (8, 0, 0): _p8lu,\n (8, 0, 1): _p8ls,\n (8, 1, 0): _p8bu,\n (8, 1, 1): _p8bs,\n (16, 0, 0): _p16lu,\n (16, 0, 1): _p16ls,\n (16, 1, 0): _p16bu,\n (16, 1, 1): _p16bs,\n (32, 0, 0): _p32lu,\n (32, 0, 1): _p32ls,\n (32, 1, 0): _p32bu,\n (32, 1, 1): _p32bs,\n (64, 0, 0): _p64lu,\n (64, 0, 1): _p64ls,\n (64, 1, 0): _p64bu,\n (64, 1, 1): _p64bs,\n }.get((word_size, {'big': 1, 'little': 0}[endianness], sign))\n\n if packer:\n return packer\n\n return lambda number: pack(number, word_size, endianness, sign)\n\n@LocalNoarchContext\ndef make_unpacker(word_size = None, endianness = None, sign = None, **kwargs):\n \"\"\"make_unpacker(word_size = None, endianness = None, sign = None, **kwargs) -> str → number\n\n Creates a unpacker by \"freezing\" the given arguments.\n\n Semantically calling ``make_unpacker(w, e, s)(data)`` is equivalent to calling\n ``unpack(data, w, e, s)``. If word_size is one of 8, 16, 32 or 64, it is however\n faster to call this function, since it will then use a specialized version.\n\n Arguments:\n word_size (int): The word size to be baked into the returned packer (in bits).\n endianness (str): The endianness to be baked into the returned packer. (\"little\"/\"big\")\n sign (str): The signness to be baked into the returned packer. (\"unsigned\"/\"signed\")\n kwargs: Additional context flags, for setting by alias (e.g. ``endian=`` rather than index)\n\n Returns:\n A function, which takes a single argument in the form of a string and returns a number\n of that string in an unpacked form.\n\n Examples:\n >>> u = make_unpacker(32, endian='little', sign='unsigned')\n >>> u\n <function _u32lu at 0x...>\n >>> hex(u(b'/bin'))\n '0x6e69622f'\n >>> u(b'abcde')\n Traceback (most recent call last):\n ...\n error: unpack requires a string argument of length 4\n >>> make_unpacker(33, endian='little', sign='unsigned')\n <function ...<lambda> at 0x...>\n\"\"\"\n word_size = word_size or context.word_size\n endianness = context.endianness\n sign = context.sign\n\n if word_size in [8, 16, 32, 64]:\n endianness = 1 if endianness == 'big' else 0\n\n return {\n (8, 0, 0): _u8lu,\n (8, 0, 1): _u8ls,\n (8, 1, 0): _u8bu,\n (8, 1, 1): _u8bs,\n (16, 0, 0): _u16lu,\n (16, 0, 1): _u16ls,\n (16, 1, 0): _u16bu,\n (16, 1, 1): _u16bs,\n (32, 0, 0): _u32lu,\n (32, 0, 1): _u32ls,\n (32, 1, 0): _u32bu,\n (32, 1, 1): _u32bs,\n (64, 0, 0): _u64lu,\n (64, 0, 1): _u64ls,\n (64, 1, 0): _u64bu,\n (64, 1, 1): _u64bs,\n }[word_size, endianness, sign]\n else:\n return lambda number: unpack(number, word_size, endianness, sign)\n\ndef _fit(pieces, preprocessor, packer, filler, stacklevel=1):\n\n # Pulls bytes from `filler` and adds them to `pad` until it ends in `key`.\n # Returns the index of `key` in `pad`.\n pad = bytearray()\n def fill(key):\n key = bytearray(key)\n offset = pad.find(key)\n while offset == -1:\n pad.append(next(filler))\n offset = pad.find(key, -len(key))\n return offset\n\n # Key conversion:\n # - convert str/unicode keys to offsets\n # - convert large int (no null-bytes in a machine word) keys to offsets\n pieces_ = dict()\n large_key = 2**(context.word_size-8)\n for k, v in pieces.items():\n if isinstance(k, six.integer_types):\n if k >= large_key:\n k = fill(pack(k))\n elif isinstance(k, (six.text_type, bytearray, bytes)):\n k = fill(_need_bytes(k, stacklevel, 0x80))\n else:\n raise TypeError(\"flat(): offset must be of type int or str, but got '%s'\" % type(k))\n if k in pieces_:\n raise ValueError(\"flag(): multiple values at offset %d\" % k)\n pieces_[k] = v\n pieces = pieces_\n\n # We must \"roll back\" `filler` so each recursive call to `_flat` gets it in\n # the right position\n filler = iters.chain(pad, filler)\n\n # Build output\n out = b''\n\n # Negative indices need to be removed and then re-submitted\n negative = {k:v for k,v in pieces.items() if isinstance(k, int) and k<0}\n\n for k in negative:\n del pieces[k]\n\n # Positive output\n for k, v in sorted(pieces.items()):\n if k < len(out):\n raise ValueError(\"flat(): data at offset %d overlaps with previous data which ends at offset %d\" % (k, len(out)))\n\n # Fill up to offset\n while len(out) < k:\n out += p8(next(filler))\n\n # Recursively flatten data\n out += _flat([v], preprocessor, packer, filler, stacklevel + 1)\n\n # Now do negative indices\n out_negative = b''\n if negative:\n most_negative = min(negative.keys())\n for k, v in sorted(negative.items()):\n k += -most_negative\n\n if k < len(out_negative):\n raise ValueError(\"flat(): data at offset %d overlaps with previous data which ends at offset %d\" % (k, len(out)))\n\n # Fill up to offset\n while len(out_negative) < k:\n out_negative += p8(next(filler))\n\n # Recursively flatten data\n out_negative += _flat([v], preprocessor, packer, filler, stacklevel + 1)\n\n return filler, out_negative + out\n\ndef _flat(args, preprocessor, packer, filler, stacklevel=1):\n out = []\n for arg in args:\n\n if not isinstance(arg, (list, tuple, dict)):\n arg_ = preprocessor(arg)\n if arg_ is not None:\n arg = arg_\n\n if hasattr(arg, '__flat__'):\n val = arg.__flat__()\n elif isinstance(arg, (list, tuple)):\n val = _flat(arg, preprocessor, packer, filler, stacklevel + 1)\n elif isinstance(arg, dict):\n filler, val = _fit(arg, preprocessor, packer, filler, stacklevel + 1)\n elif isinstance(arg, bytes):\n val = arg\n elif isinstance(arg, six.text_type):\n val = _need_bytes(arg, stacklevel + 1)\n elif isinstance(arg, six.integer_types):\n val = packer(arg)\n elif isinstance(arg, bytearray):\n val = bytes(arg)\n else:\n raise ValueError(\"flat(): Flat does not support values of type %s\" % type(arg))\n\n out.append(val)\n\n # Advance `filler` for \"non-recursive\" values\n if not isinstance(arg, (list, tuple, dict)):\n for _ in range(len(val)):\n next(filler)\n\n return b''.join(out)\n\n@LocalNoarchContext\ndef flat(*args, **kwargs):\n r\"\"\"flat(\\*args, preprocessor = None, length = None, filler = de_bruijn(),\n word_size = None, endianness = None, sign = None) -> str\n\n Flattens the arguments into a string.\n\n This function takes an arbitrary number of arbitrarily nested lists, tuples\n and dictionaries. It will then find every string and number inside those\n and flatten them out. Strings are inserted directly while numbers are\n packed using the :func:`pack` function. Unicode strings are UTF-8 encoded.\n\n Dictionary keys give offsets at which to place the corresponding values\n (which are recursively flattened). Offsets are relative to where the\n flattened dictionary occurs in the output (i.e. ``{0: 'foo'}`` is equivalent\n to ``'foo'``). Offsets can be integers, unicode strings or regular strings.\n Integer offsets >= ``2**(word_size-8)`` are converted to a string using\n :func:`pack`. Unicode strings are UTF-8 encoded. After these conversions\n offsets are either integers or strings. In the latter case, the offset will\n be the lowest index at which the string occurs in `filler`. See examples\n below.\n\n Space between pieces of data is filled out using the iterable `filler`. The\n `n`'th byte in the output will be byte at index ``n % len(iterable)`` byte\n in `filler` if it has finite length or the byte at index `n` otherwise.\n\n If `length` is given, the output will be padded with bytes from `filler` to\n be this size. If the output is longer than `length`, a :py:exc:`ValueError`\n exception is raised.\n\n The three kwargs `word_size`, `endianness` and `sign` will default to using\n values in :mod:`pwnlib.context` if not specified as an argument.\n\n Arguments:\n args: Values to flatten\n preprocessor (function): Gets called on every element to optionally\n transform the element before flattening. If :const:`None` is\n returned, then the original value is used.\n length: The length of the output.\n filler: Iterable to use for padding.\n word_size (int): Word size of the converted integer.\n endianness (str): Endianness of the converted integer (\"little\"/\"big\").\n sign (str): Signedness of the converted integer (False/True)\n\n Examples:\n\n (Test setup, please ignore)\n \n >>> context.clear()\n\n Basic usage of :meth:`flat` works similar to the pack() routines.\n\n >>> flat(4)\n b'\\x04\\x00\\x00\\x00'\n\n :meth:`flat` works with strings, bytes, lists, and dictionaries.\n\n >>> flat(b'X')\n b'X'\n >>> flat([1,2,3])\n b'\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00'\n >>> flat({4:b'X'})\n b'aaaaX'\n\n :meth:`.flat` flattens all of the values provided, and allows nested lists\n and dictionaries.\n\n >>> flat([{4:b'X'}] * 2)\n b'aaaaXaaacX'\n >>> flat([[[[[[[[[1]]]], 2]]]]])\n b'\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00'\n\n You can also provide additional arguments like endianness, word-size, and\n whether the values are treated as signed or not.\n\n >>> flat(1, b\"test\", [[[b\"AB\"]*2]*3], endianness = 'little', word_size = 16, sign = False)\n b'\\x01\\x00testABABABABABAB'\n\n A preprocessor function can be provided in order to modify the values in-flight.\n This example converts increments each value by 1, then converts to a byte string.\n\n >>> flat([1, [2, 3]], preprocessor = lambda x: str(x+1).encode())\n b'234'\n\n Using dictionaries is a fast way to get specific values at specific offsets,\n without having to do ``data += \"foo\"`` repeatedly.\n\n >>> flat({12: 0x41414141,\n ... 24: b'Hello',\n ... })\n b'aaaabaaacaaaAAAAeaaafaaaHello'\n\n Dictionary usage permits directly using values derived from :func:`.cyclic`.\n See :func:`.cyclic`, :function:`pwnlib.context.context.cyclic_alphabet`, and :data:`.context.cyclic_size`\n for more options. \n\n The cyclic pattern can be provided as either the text or hexadecimal offset.\n\n >>> flat({ 0x61616162: b'X'})\n b'aaaaX'\n >>> flat({'baaa': b'X'})\n b'aaaaX'\n\n Fields do not have to be in linear order, and can be freely mixed.\n This also works with cyclic offsets.\n\n >>> flat({2: b'A', 0:b'B'})\n b'BaA'\n >>> flat({0x61616161: b'x', 0x61616162: b'y'})\n b'xaaay'\n >>> flat({0x61616162: b'y', 0x61616161: b'x'})\n b'xaaay'\n\n Fields do not have to be in order, and can be freely mixed.\n\n >>> flat({'caaa': b'XXXX', 16: b'\\x41', 20: 0xdeadbeef})\n b'aaaabaaaXXXXdaaaAaaa\\xef\\xbe\\xad\\xde'\n >>> flat({ 8: [0x41414141, 0x42424242], 20: b'CCCC'})\n b'aaaabaaaAAAABBBBeaaaCCCC'\n >>> fit({\n ... 0x61616161: b'a',\n ... 1: b'b',\n ... 0x61616161+2: b'c',\n ... 3: b'd',\n ... })\n b'abadbaaac'\n\n By default, gaps in the data are filled in with the :meth:`.cyclic` pattern.\n You can customize this by providing an iterable or method for the ``filler``\n argument.\n\n >>> flat({12: b'XXXX'}, filler = b'_', length = 20)\n b'____________XXXX____'\n >>> flat({12: b'XXXX'}, filler = b'AB', length = 20)\n b'ABABABABABABXXXXABAB'\n\n Nested dictionaries also work as expected.\n\n >>> flat({4: {0: b'X', 4: b'Y'}})\n b'aaaaXaaaY'\n >>> fit({4: {4: b'XXXX'}})\n b'aaaabaaaXXXX'\n\n Negative indices are also supported, though this only works for integer\n keys.\n \n >>> flat({-4: b'x', -1: b'A', 0: b'0', 4: b'y'})\n b'xaaA0aaay'\n \"\"\"\n # HACK: To avoid circular imports we need to delay the import of `cyclic`\n from pwnlib.util import cyclic\n\n preprocessor = kwargs.pop('preprocessor', lambda x: None)\n filler = kwargs.pop('filler', cyclic.de_bruijn())\n length = kwargs.pop('length', None)\n stacklevel = kwargs.pop('stacklevel', 0)\n\n if isinstance(filler, (str, six.text_type)):\n filler = bytearray(_need_bytes(filler))\n\n if kwargs != {}:\n raise TypeError(\"flat() does not support argument %r\" % kwargs.popitem()[0])\n\n filler = iters.cycle(filler)\n out = _flat(args, preprocessor, make_packer(), filler, stacklevel + 2)\n\n if length:\n if len(out) > length:\n raise ValueError(\"flat(): Arguments does not fit within `length` (= %d) bytes\" % length)\n out += b''.join(p8(next(filler)) for _ in range(length - len(out)))\n\n return out\n\ndef fit(*args, **kwargs):\n \"\"\"Legacy alias for :func:`flat`\"\"\"\n kwargs['stacklevel'] = kwargs.get('stacklevel', 0) + 1\n return flat(*args, **kwargs)\n\n\"\"\"\n Generates a string from a dictionary mapping offsets to data to place at\n that offset.\n\n For each key-value pair in `pieces`, the key is either an offset or a byte\n sequence. In the latter case, the offset will be the lowest index at which\n the sequence occurs in `filler`. See examples below.\n\n Each piece of data is passed to :meth:`flat` along with the keyword\n arguments `word_size`, `endianness` and `sign`.\n\n Space between pieces of data is filled out using the iterable `filler`. The\n `n`'th byte in the output will be byte at index ``n % len(iterable)`` byte\n in `filler` if it has finite length or the byte at index `n` otherwise.\n\n If `length` is given, the output will padded with bytes from `filler` to be\n this size. If the output is longer than `length`, a :py:exc:`ValueError`\n exception is raised.\n\n If entries in `pieces` overlap, a :py:exc:`ValueError` exception is\n raised.\n\n Arguments:\n pieces: Offsets and values to output.\n length: The length of the output.\n filler: Iterable to use for padding.\n preprocessor (function): Gets called on every element to optionally\n transform the element before flattening. If :const:`None` is\n returned, then the original value is used.\n word_size (int): Word size of the converted integer (in bits).\n endianness (str): Endianness of the converted integer (\"little\"/\"big\").\n sign (str): Signedness of the converted integer (False/True)\n\n Examples:\n\n \"\"\"\n\ndef signed(integer):\n return unpack(pack(integer), signed=True)\n\ndef unsigned(integer):\n return unpack(pack(integer))\n\ndef dd(dst, src, count = 0, skip = 0, seek = 0, truncate = False):\n \"\"\"dd(dst, src, count = 0, skip = 0, seek = 0, truncate = False) -> dst\n\n Inspired by the command line tool ``dd``, this function copies `count` byte\n values from offset `seek` in `src` to offset `skip` in `dst`. If `count` is\n 0, all of ``src[seek:]`` is copied.\n\n If `dst` is a mutable type it will be updated. Otherwise a new instance of\n the same type will be created. In either case the result is returned.\n\n `src` can be an iterable of characters or integers, a unicode string or a\n file object. If it is an iterable of integers, each integer must be in the\n range [0;255]. If it is a unicode string, its UTF-8 encoding will be used.\n\n The seek offset of file objects will be preserved.\n\n Arguments:\n dst: Supported types are :class:`file`, :class:`list`, :class:`tuple`,\n :class:`str`, :class:`bytearray` and :class:`unicode`.\n src: An iterable of byte values (characters or integers), a unicode\n string or a file object.\n count (int): How many bytes to copy. If `count` is 0 or larger than\n ``len(src[seek:])``, all bytes until the end of `src` are\n copied.\n skip (int): Offset in `dst` to copy to.\n seek (int): Offset in `src` to copy from.\n truncate (bool): If :const:`True`, `dst` is truncated at the last copied\n byte.\n\n Returns:\n A modified version of `dst`. If `dst` is a mutable type it will be\n modified in-place.\n\n Examples:\n >>> dd(tuple('Hello!'), b'?', skip = 5)\n ('H', 'e', 'l', 'l', 'o', b'?')\n >>> dd(list('Hello!'), (63,), skip = 5)\n ['H', 'e', 'l', 'l', 'o', b'?']\n >>> _ = open('/tmp/foo', 'w').write('A' * 10)\n >>> dd(open('/tmp/foo'), open('/dev/zero'), skip = 3, count = 4).read()\n 'AAA\\\\x00\\\\x00\\\\x00\\\\x00AAA'\n >>> _ = open('/tmp/foo', 'w').write('A' * 10)\n >>> dd(open('/tmp/foo'), open('/dev/zero'), skip = 3, count = 4, truncate = True).read()\n 'AAA\\\\x00\\\\x00\\\\x00\\\\x00'\n \"\"\"\n\n # Re-open file objects to make sure we have the mode right\n if hasattr(src, 'name'):\n src = open(src.name, 'rb')\n if hasattr(dst, 'name'):\n real_dst = dst\n dst = open(dst.name, 'rb+')\n\n # Special case: both `src` and `dst` are files, so we don't need to hold\n # everything in memory\n if hasattr(src, 'seek') and hasattr(dst, 'seek'):\n src.seek(seek)\n dst.seek(skip)\n n = 0\n if count:\n while n < count:\n s = src.read(min(count - n, 0x1000))\n if not s:\n break\n n += len(s)\n dst.write(s)\n else:\n while True:\n s = src.read(0x1000)\n if not s:\n break\n n += len(s)\n dst.write(s)\n if truncate:\n dst.truncate(skip + n)\n src.close()\n dst.close()\n return real_dst\n\n # Otherwise get `src` in canonical form, i.e. a string of at most `count`\n # bytes\n if isinstance(src, six.text_type):\n if count:\n # The only way to know where the `seek`th byte is, is to decode, but\n # we only need to decode up to the first `seek + count` code points\n src = src[:seek + count].encode('utf8')\n # The code points may result in more that `seek + count` bytes\n src = src[seek : seek + count]\n else:\n src = src.encode('utf8')[seek:]\n\n elif hasattr(src, 'seek'):\n src.seek(seek)\n src_ = b''\n if count:\n while len(src_) < count:\n s = src.read(count - len(src_))\n if not s:\n break\n src_ += s\n else:\n while True:\n s = src.read()\n if not s:\n break\n src_ += s\n src.close()\n src = src_\n\n elif isinstance(src, bytes):\n if count:\n src = src[seek : seek + count]\n else:\n src = src[seek:]\n\n elif hasattr(src, '__iter__'):\n src = src[seek:]\n src_ = b''\n for i, b in enumerate(src, seek):\n if count and i > count + seek:\n break\n if isinstance(b, bytes):\n src_ += b\n elif isinstance(b, six.integer_types):\n if b > 255 or b < 0:\n raise ValueError(\"dd(): Source value %d at index %d is not in range [0;255]\" % (b, i))\n src_ += _p8lu(b)\n else:\n raise TypeError(\"dd(): Unsupported `src` element type: %r\" % type(b))\n src = src_\n\n else:\n raise TypeError(\"dd(): Unsupported `src` type: %r\" % type(src))\n\n # If truncate, then where?\n if truncate:\n truncate = skip + len(src)\n\n # UTF-8 encode unicode `dst`\n if isinstance(dst, six.text_type):\n dst = dst.encode('utf8')\n utf8 = True\n else:\n utf8 = False\n\n # Match on the type of `dst`\n if hasattr(dst, 'seek'):\n dst.seek(skip)\n dst.write(src)\n if truncate:\n dst.truncate(truncate)\n dst.close()\n dst = real_dst\n\n elif isinstance(dst, (list, bytearray)):\n dst[skip : skip + len(src)] = list(map(p8, bytearray(src)))\n if truncate:\n while len(dst) > truncate:\n dst.pop()\n\n elif isinstance(dst, tuple):\n tail = dst[skip + len(src):]\n dst = dst[:skip] + tuple(map(p8, bytearray(src)))\n if not truncate:\n dst = dst + tail\n\n elif isinstance(dst, bytes):\n tail = dst[skip + len(src):]\n dst = dst[:skip] + src\n if not truncate:\n dst = dst + tail\n\n else:\n raise TypeError(\"dd(): Unsupported `dst` type: %r\" % type(dst))\n\n if utf8:\n dst = dst.decode('utf8')\n\n return dst\n\ndef _need_bytes(s, level=1, min_wrong=0):\n if isinstance(s, (bytes, bytearray)):\n return s # already bytes\n\n encoding = context.encoding\n errors = 'strict'\n worst = -1\n if encoding == 'auto':\n worst = s and max(map(ord, s)) or 0\n if worst > 255:\n encoding = 'UTF-8'\n errors = 'surrogateescape'\n elif worst > 127:\n encoding = 'ISO-8859-1'\n else:\n encoding = 'ASCII'\n\n if worst >= min_wrong:\n warnings.warn(\"Text is not bytes; assuming {}, no guarantees. See https://docs.pwntools.com/#bytes\"\n .format(encoding), BytesWarning, level + 2)\n return s.encode(encoding, errors)\n\ndef _need_text(s, level=1):\n if isinstance(s, (str, six.text_type)):\n return s # already text\n\n encoding = context.encoding\n errors = 'strict'\n if encoding == 'auto':\n for encoding in 'ASCII', 'UTF-8', 'ISO-8859-1':\n try:\n s.decode(encoding)\n except UnicodeDecodeError:\n pass\n else:\n break\n\n warnings.warn(\"Bytes is not text; assuming {}, no guarantees. See https://docs.pwntools.com/#bytes\"\n .format(encoding), BytesWarning, level + 2)\n return s.decode(encoding, errors)\n\ndef _encode(s):\n if isinstance(s, (bytes, bytearray)):\n return s # already bytes\n\n if context.encoding == 'auto':\n try:\n return s.encode('latin1')\n except UnicodeEncodeError:\n return s.encode('utf-8', 'surrogateescape')\n return s.encode(context.encoding)\n\ndef _decode(b):\n if context.encoding == 'auto':\n try:\n return b.decode('utf-8')\n except UnicodeDecodeError:\n return b.decode('latin1')\n except AttributeError:\n return b\n return b.decode(context.encoding)\n\ndel op, size, end, sign\ndel name, routine, mod\n",
"path": "pwnlib/util/packing.py"
}
] | diff --git a/pwnlib/util/packing.py b/pwnlib/util/packing.py
index 7638865a9..9af06bfc8 100644
--- a/pwnlib/util/packing.py
+++ b/pwnlib/util/packing.py
@@ -1040,7 +1040,7 @@ def _need_text(s, level=1):
if encoding == 'auto':
for encoding in 'ASCII', 'UTF-8', 'ISO-8859-1':
try:
- s = s.decode(encoding)
+ s.decode(encoding)
except UnicodeDecodeError:
pass
else:
|
cookiecutter__cookiecutter-753 | Bug for replay feature from pwd
Running the following command inside of a template repo:
`$ cookiecutter -o tmp .`
Will cause `replay.dump` to files like this:
`~/.cookiecutter_replay/..json`
Identified by @eliasdorneles
| [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.main\n-----------------\n\nMain entry point for the `cookiecutter` command.\n\nThe code in this module is also a good example of how to use Cookiecutter as a\nlibrary rather than a script.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport logging\nimport os\nimport re\n\nfrom .config import get_user_config, USER_CONFIG_PATH\nfrom .exceptions import InvalidModeException, RepositoryNotFound\nfrom .prompt import prompt_for_config\nfrom .generate import generate_context, generate_files\nfrom .vcs import clone\nfrom .replay import dump, load\n\nlogger = logging.getLogger(__name__)\n\nbuiltin_abbreviations = {\n 'gh': 'https://github.com/{0}.git',\n 'bb': 'https://bitbucket.org/{0}',\n}\n\nREPO_REGEX = re.compile(r\"\"\"\n(?x)\n((((git|hg)\\+)?(git|ssh|https?):(//)?) # something like git:// ssh:// etc.\n | # or\n (\\w+@[\\w\\.]+) # something like user@...\n)\n\"\"\")\n\n\ndef is_repo_url(value):\n \"\"\"Return True if value is a repository URL.\"\"\"\n return bool(REPO_REGEX.match(value))\n\n\ndef expand_abbreviations(template, config_dict):\n \"\"\"\n Expand abbreviations in a template name.\n\n :param template: The project template name.\n :param config_dict: The user config, which will contain abbreviation\n definitions.\n \"\"\"\n\n abbreviations = builtin_abbreviations.copy()\n abbreviations.update(config_dict.get('abbreviations', {}))\n\n if template in abbreviations:\n return abbreviations[template]\n\n # Split on colon. If there is no colon, rest will be empty\n # and prefix will be the whole template\n prefix, sep, rest = template.partition(':')\n if prefix in abbreviations:\n return abbreviations[prefix].format(rest)\n\n return template\n\n\ndef cookiecutter(\n template, checkout=None, no_input=False, extra_context=None,\n replay=False, overwrite_if_exists=False, output_dir='.',\n config_file=USER_CONFIG_PATH):\n \"\"\"\n API equivalent to using Cookiecutter at the command line.\n\n :param template: A directory containing a project template directory,\n or a URL to a git repository.\n :param checkout: The branch, tag or commit ID to checkout after clone.\n :param no_input: Prompt the user at command line for manual configuration?\n :param extra_context: A dictionary of context that overrides default\n and user configuration.\n :param: overwrite_if_exists: Overwrite the contents of output directory\n if it exists\n :param output_dir: Where to output the generated project dir into.\n :param config_file: User configuration file path.\n \"\"\"\n if replay and ((no_input is not False) or (extra_context is not None)):\n err_msg = (\n \"You can not use both replay and no_input or extra_context \"\n \"at the same time.\"\n )\n raise InvalidModeException(err_msg)\n\n # Get user config from ~/.cookiecutterrc or equivalent\n # If no config file, sensible defaults from config.DEFAULT_CONFIG are used\n config_dict = get_user_config(config_file=config_file)\n\n template = expand_abbreviations(template, config_dict)\n\n if is_repo_url(template):\n repo_dir = clone(\n repo_url=template,\n checkout=checkout,\n clone_to_dir=config_dict['cookiecutters_dir'],\n no_input=no_input\n )\n else:\n # If it's a local repo, no need to clone or copy to your\n # cookiecutters_dir\n repo_dir = template\n\n if not os.path.isdir(repo_dir):\n raise RepositoryNotFound(\n 'The repository {0} could not be located.'.format(template)\n )\n\n template_name = os.path.basename(template)\n\n if replay:\n context = load(config_dict['replay_dir'], template_name)\n else:\n context_file = os.path.join(repo_dir, 'cookiecutter.json')\n logging.debug('context_file is {0}'.format(context_file))\n\n context = generate_context(\n context_file=context_file,\n default_context=config_dict['default_context'],\n extra_context=extra_context,\n )\n\n # prompt the user to manually configure at the command line.\n # except when 'no-input' flag is set\n context['cookiecutter'] = prompt_for_config(context, no_input)\n\n dump(config_dict['replay_dir'], template_name, context)\n\n # Create project from local context and project template.\n return generate_files(\n repo_dir=repo_dir,\n context=context,\n overwrite_if_exists=overwrite_if_exists,\n output_dir=output_dir\n )\n",
"path": "cookiecutter/main.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.main\n-----------------\n\nMain entry point for the `cookiecutter` command.\n\nThe code in this module is also a good example of how to use Cookiecutter as a\nlibrary rather than a script.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport logging\nimport os\nimport re\n\nfrom .config import get_user_config, USER_CONFIG_PATH\nfrom .exceptions import InvalidModeException, RepositoryNotFound\nfrom .prompt import prompt_for_config\nfrom .generate import generate_context, generate_files\nfrom .vcs import clone\nfrom .replay import dump, load\n\nlogger = logging.getLogger(__name__)\n\nbuiltin_abbreviations = {\n 'gh': 'https://github.com/{0}.git',\n 'bb': 'https://bitbucket.org/{0}',\n}\n\nREPO_REGEX = re.compile(r\"\"\"\n(?x)\n((((git|hg)\\+)?(git|ssh|https?):(//)?) # something like git:// ssh:// etc.\n | # or\n (\\w+@[\\w\\.]+) # something like user@...\n)\n\"\"\")\n\n\ndef is_repo_url(value):\n \"\"\"Return True if value is a repository URL.\"\"\"\n return bool(REPO_REGEX.match(value))\n\n\ndef expand_abbreviations(template, config_dict):\n \"\"\"\n Expand abbreviations in a template name.\n\n :param template: The project template name.\n :param config_dict: The user config, which will contain abbreviation\n definitions.\n \"\"\"\n\n abbreviations = builtin_abbreviations.copy()\n abbreviations.update(config_dict.get('abbreviations', {}))\n\n if template in abbreviations:\n return abbreviations[template]\n\n # Split on colon. If there is no colon, rest will be empty\n # and prefix will be the whole template\n prefix, sep, rest = template.partition(':')\n if prefix in abbreviations:\n return abbreviations[prefix].format(rest)\n\n return template\n\n\ndef cookiecutter(\n template, checkout=None, no_input=False, extra_context=None,\n replay=False, overwrite_if_exists=False, output_dir='.',\n config_file=USER_CONFIG_PATH):\n \"\"\"\n API equivalent to using Cookiecutter at the command line.\n\n :param template: A directory containing a project template directory,\n or a URL to a git repository.\n :param checkout: The branch, tag or commit ID to checkout after clone.\n :param no_input: Prompt the user at command line for manual configuration?\n :param extra_context: A dictionary of context that overrides default\n and user configuration.\n :param: overwrite_if_exists: Overwrite the contents of output directory\n if it exists\n :param output_dir: Where to output the generated project dir into.\n :param config_file: User configuration file path.\n \"\"\"\n if replay and ((no_input is not False) or (extra_context is not None)):\n err_msg = (\n \"You can not use both replay and no_input or extra_context \"\n \"at the same time.\"\n )\n raise InvalidModeException(err_msg)\n\n # Get user config from ~/.cookiecutterrc or equivalent\n # If no config file, sensible defaults from config.DEFAULT_CONFIG are used\n config_dict = get_user_config(config_file=config_file)\n\n template = expand_abbreviations(template, config_dict)\n\n if is_repo_url(template):\n repo_dir = clone(\n repo_url=template,\n checkout=checkout,\n clone_to_dir=config_dict['cookiecutters_dir'],\n no_input=no_input\n )\n else:\n # If it's a local repo, no need to clone or copy to your\n # cookiecutters_dir\n repo_dir = template\n\n if not os.path.isdir(repo_dir):\n raise RepositoryNotFound(\n 'The repository {0} could not be located.'.format(template)\n )\n\n template_name = os.path.basename(os.path.abspath(template))\n\n if replay:\n context = load(config_dict['replay_dir'], template_name)\n else:\n context_file = os.path.join(repo_dir, 'cookiecutter.json')\n logging.debug('context_file is {0}'.format(context_file))\n\n context = generate_context(\n context_file=context_file,\n default_context=config_dict['default_context'],\n extra_context=extra_context,\n )\n\n # prompt the user to manually configure at the command line.\n # except when 'no-input' flag is set\n context['cookiecutter'] = prompt_for_config(context, no_input)\n\n dump(config_dict['replay_dir'], template_name, context)\n\n # Create project from local context and project template.\n return generate_files(\n repo_dir=repo_dir,\n context=context,\n overwrite_if_exists=overwrite_if_exists,\n output_dir=output_dir\n )\n",
"path": "cookiecutter/main.py"
}
] | diff --git a/cookiecutter/main.py b/cookiecutter/main.py
index d8ff7b6c7..0900142cd 100644
--- a/cookiecutter/main.py
+++ b/cookiecutter/main.py
@@ -116,7 +116,7 @@ def cookiecutter(
'The repository {0} could not be located.'.format(template)
)
- template_name = os.path.basename(template)
+ template_name = os.path.basename(os.path.abspath(template))
if replay:
context = load(config_dict['replay_dir'], template_name)
diff --git a/tests/test_main.py b/tests/test_main.py
index 56fc6bfc1..f25ceff56 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -140,3 +140,59 @@ def test_cookiecutter_repository_url_should_clone(
)
assert os.path.isdir(project_dir)
+
+
+def test_replay_dump_template_name(
+ monkeypatch, mocker, user_config_data, user_config_file):
+ """Check that replay_dump is called with a valid template_name that is
+ not a relative path.
+
+ Otherwise files such as ``..json`` are created, which are not just cryptic
+ but also later mistaken for replay files of other templates if invoked with
+ '.' and '--replay'.
+
+ Change the current working directory temporarily to 'tests/fake-repo-tmpl'
+ for this test and call cookiecutter with '.' for the target template.
+ """
+ monkeypatch.chdir('tests/fake-repo-tmpl')
+
+ mock_replay_dump = mocker.patch('cookiecutter.main.dump')
+ mocker.patch('cookiecutter.main.generate_files')
+
+ cookiecutter(
+ '.',
+ no_input=True,
+ replay=False,
+ config_file=user_config_file,
+ )
+
+ mock_replay_dump.assert_called_once_with(
+ user_config_data['replay_dir'],
+ 'fake-repo-tmpl',
+ mocker.ANY,
+ )
+
+
+def test_replay_load_template_name(
+ monkeypatch, mocker, user_config_data, user_config_file):
+ """Check that replay_load is called with a valid template_name that is
+ not a relative path.
+
+ Change the current working directory temporarily to 'tests/fake-repo-tmpl'
+ for this test and call cookiecutter with '.' for the target template.
+ """
+ monkeypatch.chdir('tests/fake-repo-tmpl')
+
+ mock_replay_load = mocker.patch('cookiecutter.main.load')
+ mocker.patch('cookiecutter.main.generate_files')
+
+ cookiecutter(
+ '.',
+ replay=True,
+ config_file=user_config_file,
+ )
+
+ mock_replay_load.assert_called_once_with(
+ user_config_data['replay_dir'],
+ 'fake-repo-tmpl',
+ )
|
aws-cloudformation__cfn-lint-2386 | E0002 parsing I3013 on AWS::RDS::DBInstance if Engine is a Ref
### CloudFormation Lint Version
0.65.0
### What operating system are you using?
Ubuntu 22.04
### Describe the bug
A cfn-lint exception is raised when parsing I3013 rule.
The trigger seems to be the presence of a reference as a value of the "Engine" resource parameter.



### Expected behavior
No error should be present is `Engine: !Ref Something` is used.
### Reproduction template
```yaml
---
AWSTemplateFormatVersion: 2010-09-09
Parameters:
Engine:
Description: DB Engine
Type: String
AllowedValues:
- aurora-mysql
- aurora-postgresql
Resources:
DbCluster:
Type: AWS::RDS::DBCluster
DeletionPolicy: Snapshot
UpdateReplacePolicy: Retain
Properties:
DBClusterIdentifier: FooBar
Engine: !Ref Engine
StorageEncrypted: true
## XXX Other properties removed for brevity
DbWriterInstance:
Type: AWS::RDS::DBInstance
DeletionPolicy: Snapshot
UpdateReplacePolicy: Retain
Properties:
DBClusterIdentifier: !Ref DbCluster
Engine: !Ref Engine # XXX here a cfn-lint bug. Allowed parameter values are "aurora-postgresql" and "aurora-mysql"
PubliclyAccessible: false
```
| [
{
"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport re\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass RetentionPeriodOnResourceTypesWithAutoExpiringContent(CloudFormationLintRule):\n \"\"\"Check for RetentionPeriod \"\"\"\n id = 'I3013'\n shortdesc = 'Check resources with auto expiring content have explicit retention period'\n description = 'The behaviour for data retention is different across AWS Services.'\\\n 'If no retention period is specified the default for some services is to delete the data after a period of time.' \\\n 'This check requires you to explicitly set the retention period for those resources to avoid unexpected data losses'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'\n tags = ['resources', 'retentionperiod']\n\n def _check_ref(self, value, parameters, resources, path): # pylint: disable=W0613\n print(value)\n\n def match(self, cfn):\n \"\"\"Check for RetentionPeriod\"\"\"\n matches = []\n\n retention_attributes_by_resource_type = {\n 'AWS::Kinesis::Stream': [\n {\n 'Attribute': 'RetentionPeriodHours',\n 'SourceUrl': 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-retentionperiodhours'\n }\n ],\n 'AWS::SQS::Queue': [\n {\n 'Attribute': 'MessageRetentionPeriod',\n 'SourceUrl': 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sqs-queues.html#aws-sqs-queue-msgretentionperiod'\n }\n ],\n 'AWS::DocDB::DBCluster': [\n {\n 'Attribute': 'BackupRetentionPeriod',\n 'SourceUrl': 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-docdb-dbcluster.html#cfn-docdb-dbcluster-backupretentionperiod'\n }\n ],\n 'AWS::Synthetics::Canary': [\n {\n 'Attribute': 'SuccessRetentionPeriod',\n 'SourceUrl': 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-synthetics-canary.html#cfn-synthetics-canary-successretentionperiod'\n },\n {\n 'Attribute': 'FailureRetentionPeriod',\n 'SourceUrl': 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-synthetics-canary.html#cfn-synthetics-canary-failureretentionperiod'\n }\n ],\n 'AWS::Redshift::Cluster': [\n {\n 'Attribute': 'AutomatedSnapshotRetentionPeriod',\n 'SourceUrl': 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-cluster.html#cfn-redshift-cluster-automatedsnapshotretentionperiod'\n }\n ],\n 'AWS::RDS::DBInstance': [\n {\n 'Attribute': 'BackupRetentionPeriod',\n 'SourceUrl': 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-database-instance.html#cfn-rds-dbinstance-backupretentionperiod',\n 'CheckAttribute': 'Engine',\n 'CheckAttributeRegex': re.compile('^((?!aurora).)*$'),\n }\n ],\n 'AWS::RDS::DBCluster': [\n {\n 'Attribute': 'BackupRetentionPeriod',\n 'SourceUrl': 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbcluster.html#cfn-rds-dbcluster-backuprententionperiod'\n }\n ]\n }\n\n resources = cfn.get_resources()\n for r_name, r_values in resources.items():\n if r_values.get('Type') in retention_attributes_by_resource_type:\n for attr_def in retention_attributes_by_resource_type[r_values.get('Type')]:\n property_sets = r_values.get_safe('Properties')\n for property_set, path in property_sets:\n error_path = ['Resources', r_name] + path\n if not property_set:\n message = f'The default retention period will delete the data after a pre-defined time. Set an explicit values to avoid data loss on resource : {\"/\".join(str(x) for x in error_path)}'\n matches.append(RuleMatch(error_path, message))\n else:\n value = property_set.get(attr_def.get('Attribute'))\n if not value:\n message = f'The default retention period will delete the data after a pre-defined time. Set an explicit values to avoid data loss on resource : {\"/\".join(str(x) for x in error_path)}'\n if attr_def.get('CheckAttribute'):\n if self._validate_property(property_set.get(attr_def.get('CheckAttribute')), attr_def.get('CheckAttributeRegex')):\n matches.append(RuleMatch(error_path, message))\n else:\n matches.append(RuleMatch(error_path, message))\n if isinstance(value, dict):\n # pylint: disable=protected-access\n refs = cfn._search_deep_keys(\n 'Ref', value, error_path + [attr_def.get('Attribute')])\n for ref in refs:\n if ref[-1] == 'AWS::NoValue':\n message = f'The default retention period will delete the data after a pre-defined time. Set an explicit values to avoid data loss on resource : {\"/\".join(str(x) for x in ref[0:-1])}'\n matches.append(RuleMatch(ref[0:-1], message))\n\n return matches\n\n def _validate_property(self, value, regex) -> bool:\n if regex.match(value):\n return True\n return False\n",
"path": "src/cfnlint/rules/resources/RetentionPeriodOnResourceTypesWithAutoExpiringContent.py"
}
] | [
{
"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport re\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass RetentionPeriodOnResourceTypesWithAutoExpiringContent(CloudFormationLintRule):\n \"\"\"Check for RetentionPeriod \"\"\"\n id = 'I3013'\n shortdesc = 'Check resources with auto expiring content have explicit retention period'\n description = 'The behaviour for data retention is different across AWS Services.'\\\n 'If no retention period is specified the default for some services is to delete the data after a period of time.' \\\n 'This check requires you to explicitly set the retention period for those resources to avoid unexpected data losses'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'\n tags = ['resources', 'retentionperiod']\n\n def _check_ref(self, value, parameters, resources, path): # pylint: disable=W0613\n print(value)\n\n def match(self, cfn):\n \"\"\"Check for RetentionPeriod\"\"\"\n matches = []\n\n retention_attributes_by_resource_type = {\n 'AWS::Kinesis::Stream': [\n {\n 'Attribute': 'RetentionPeriodHours',\n 'SourceUrl': 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-retentionperiodhours'\n }\n ],\n 'AWS::SQS::Queue': [\n {\n 'Attribute': 'MessageRetentionPeriod',\n 'SourceUrl': 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sqs-queues.html#aws-sqs-queue-msgretentionperiod'\n }\n ],\n 'AWS::DocDB::DBCluster': [\n {\n 'Attribute': 'BackupRetentionPeriod',\n 'SourceUrl': 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-docdb-dbcluster.html#cfn-docdb-dbcluster-backupretentionperiod'\n }\n ],\n 'AWS::Synthetics::Canary': [\n {\n 'Attribute': 'SuccessRetentionPeriod',\n 'SourceUrl': 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-synthetics-canary.html#cfn-synthetics-canary-successretentionperiod'\n },\n {\n 'Attribute': 'FailureRetentionPeriod',\n 'SourceUrl': 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-synthetics-canary.html#cfn-synthetics-canary-failureretentionperiod'\n }\n ],\n 'AWS::Redshift::Cluster': [\n {\n 'Attribute': 'AutomatedSnapshotRetentionPeriod',\n 'SourceUrl': 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-cluster.html#cfn-redshift-cluster-automatedsnapshotretentionperiod'\n }\n ],\n 'AWS::RDS::DBInstance': [\n {\n 'Attribute': 'BackupRetentionPeriod',\n 'SourceUrl': 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-database-instance.html#cfn-rds-dbinstance-backupretentionperiod',\n 'CheckAttribute': 'Engine',\n 'CheckAttributeRegex': re.compile('^((?!aurora).)*$'),\n }\n ],\n 'AWS::RDS::DBCluster': [\n {\n 'Attribute': 'BackupRetentionPeriod',\n 'SourceUrl': 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbcluster.html#cfn-rds-dbcluster-backuprententionperiod'\n }\n ]\n }\n\n resources = cfn.get_resources()\n for r_name, r_values in resources.items():\n if r_values.get('Type') in retention_attributes_by_resource_type:\n for attr_def in retention_attributes_by_resource_type[r_values.get('Type')]:\n property_sets = r_values.get_safe('Properties')\n for property_set, path in property_sets:\n error_path = ['Resources', r_name] + path\n if not property_set:\n message = f'The default retention period will delete the data after a pre-defined time. Set an explicit values to avoid data loss on resource : {\"/\".join(str(x) for x in error_path)}'\n matches.append(RuleMatch(error_path, message))\n else:\n value = property_set.get(attr_def.get('Attribute'))\n if not value:\n message = f'The default retention period will delete the data after a pre-defined time. Set an explicit values to avoid data loss on resource : {\"/\".join(str(x) for x in error_path)}'\n if attr_def.get('CheckAttribute'):\n if self._validate_property(property_set.get(attr_def.get('CheckAttribute')), attr_def.get('CheckAttributeRegex')):\n matches.append(RuleMatch(error_path, message))\n else:\n matches.append(RuleMatch(error_path, message))\n if isinstance(value, dict):\n # pylint: disable=protected-access\n refs = cfn._search_deep_keys(\n 'Ref', value, error_path + [attr_def.get('Attribute')])\n for ref in refs:\n if ref[-1] == 'AWS::NoValue':\n message = f'The default retention period will delete the data after a pre-defined time. Set an explicit values to avoid data loss on resource : {\"/\".join(str(x) for x in ref[0:-1])}'\n matches.append(RuleMatch(ref[0:-1], message))\n\n return matches\n\n def _validate_property(self, value, regex) -> bool:\n if isinstance(value, str):\n if regex.match(value):\n return True\n return False\n return True\n",
"path": "src/cfnlint/rules/resources/RetentionPeriodOnResourceTypesWithAutoExpiringContent.py"
}
] | diff --git a/src/cfnlint/rules/resources/RetentionPeriodOnResourceTypesWithAutoExpiringContent.py b/src/cfnlint/rules/resources/RetentionPeriodOnResourceTypesWithAutoExpiringContent.py
index 338966f2b8..3cb3017f0b 100644
--- a/src/cfnlint/rules/resources/RetentionPeriodOnResourceTypesWithAutoExpiringContent.py
+++ b/src/cfnlint/rules/resources/RetentionPeriodOnResourceTypesWithAutoExpiringContent.py
@@ -106,6 +106,8 @@ def match(self, cfn):
return matches
def _validate_property(self, value, regex) -> bool:
- if regex.match(value):
- return True
- return False
+ if isinstance(value, str):
+ if regex.match(value):
+ return True
+ return False
+ return True
|
electricitymaps__electricitymaps-contrib-1599 | GB-NIR invalid data in database
The latest observation seems to be problematic:

therefore it is not shown on the map. However it is inserted in the database.
We should add proper validations to make sure coal/gas are present and that load > 0
| [
{
"content": "#!/usr/bin/env python3\n\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom io import StringIO\nfrom operator import itemgetter\n\nimport logging\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\nfrom dateutil import parser, tz\n\nfrom .lib.validation import validate\n\nthermal_url = 'http://ws.soni.ltd.uk/DownloadCentre/aspx/FuelMix.aspx'\nwind_url = 'http://ws.soni.ltd.uk/DownloadCentre/aspx/SystemOutput.aspx'\nexchange_url = 'http://ws.soni.ltd.uk/DownloadCentre/aspx/MoyleTie.aspx'\n# Positive values represent imports to Northern Ireland.\n# Negative value represent exports from Northern Ireland.\n\n\ndef get_data(url, session=None):\n \"\"\"\n Requests data from a specified url in CSV format.\n Returns a response.text object.\n \"\"\"\n\n s = session or requests.Session()\n\n headers = {\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:55.0) Gecko/20100101 Firefox/55.0',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'\n }\n\n pagereq = requests.get(url, headers=headers)\n soup = BeautifulSoup(pagereq.text, 'html.parser')\n\n # Find and define parameters needed to send a POST request for the actual data.\n viewstategenerator = soup.find(\"input\", attrs={'id': '__VIEWSTATEGENERATOR'})['value']\n viewstate = soup.find(\"input\", attrs={'id': '__VIEWSTATE'})['value']\n eventvalidation = soup.find(\"input\", attrs={'id': '__EVENTVALIDATION'})['value']\n\n # Set date for post request.\n current_date = datetime.now().date()\n month = current_date.month\n day = current_date.day\n year = current_date.year\n\n FromDatePicker_clientState = '|0|01%s-%s-%s-0-0-0-0||[[[[]],[],[]],[{%s},[]],\"01%s-%s-%s-0-0-0-0\"]' % (year, month, day, '', year, month, day)\n ToDatePicker_clientState = '|0|01%s-%s-%s-0-0-0-0||[[[[]],[],[]],[{%s},[]],\"01%s-%s-%s-0-0-0-0\"]' % (year, month, day, '', year, month, day)\n btnDownloadCSV = 'Download+CSV'\n ig_def_dp_cal_clientState = '|0|15,2017,09,2017,%s,%s||[[null,[],null],[{%s},[]],\"11,2017,09,2017,%s,%s\"]' % (month, day, '', month, day)\n IG_CSS_LINKS_ = 'ig_res/default/ig_monthcalendar.css|ig_res/default/ig_texteditor.css|ig_res/default/ig_shared.css'\n\n postdata = {'__VIEWSTATE': viewstate,\n '__VIEWSTATEGENERATOR': viewstategenerator,\n '__EVENTVALIDATION': eventvalidation,\n 'FromDatePicker_clientState': FromDatePicker_clientState,\n 'ToDatePicker_clientState': ToDatePicker_clientState,\n 'btnDownloadCSV': btnDownloadCSV,\n '_ig_def_dp_cal_clientState': ig_def_dp_cal_clientState,\n '_IG_CSS_LINKS_': IG_CSS_LINKS_\n }\n\n postheaders = {\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:55.0) Gecko/20100101 Firefox/55.0',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n\n datareq = s.post(url, headers=postheaders, data=postdata)\n\n return datareq.text\n\n\ndef add_default_tz(timestamp):\n \"\"\"\n Adds Northern Ireland timezone to datetime object if tz = None.\n \"\"\"\n\n NIR = tz.gettz('Europe/Belfast')\n modified_timestamp = timestamp.replace(tzinfo=timestamp.tzinfo or NIR)\n\n return modified_timestamp\n\n\ndef create_thermal_df(text_data):\n \"\"\"\n Turns thermal csv data into a usable dataframe.\n \"\"\"\n\n cols_to_use = [0, 1, 2, 3, 4, 5]\n df_thermal = pd.read_csv(StringIO(text_data),\n usecols=cols_to_use)\n df_thermal.fillna(0.0, inplace=True)\n\n return df_thermal\n\n\ndef create_wind_df(text_data):\n \"\"\"\n Turns wind csv data into a usable dataframe.\n \"\"\"\n\n cols_to_use = [0, 1]\n df_wind = pd.read_csv(StringIO(text_data),\n usecols=cols_to_use)\n df_wind.fillna(0.0, inplace=True)\n\n return df_wind\n\n\ndef create_exchange_df(text_data):\n \"\"\"\n Turns exchange csv data into a usable dataframe.\n \"\"\"\n\n df_exchange = pd.read_csv(StringIO(text_data))\n df_exchange.fillna(0.0, inplace=True)\n\n return df_exchange\n\n\ndef thermal_processor(df):\n \"\"\"\n Creates quarter hour datapoints for thermal production.\n Returns a list.\n \"\"\"\n\n datapoints = []\n for index, row in df.iterrows():\n snapshot = {}\n snapshot['datetime'] = row['TimeStamp']\n snapshot['gas'] = row['Gas_MW']\n snapshot['coal'] = row['Coal_MW']\n snapshot['oil'] = row['Distillate_MW'] + row['Diesel_MW']\n datapoints.append(snapshot)\n\n return datapoints\n\n\ndef wind_processor(df):\n \"\"\"\n Creates quarter hour datapoints for wind production.\n Returns a list.\n \"\"\"\n\n datapoints = []\n for index, row in df.iterrows():\n snapshot = {}\n snapshot['datetime'] = row['TimeStamp']\n snapshot['wind'] = row['Total_Wind_Generated_MW']\n if snapshot['wind'] > -20:\n snapshot['wind'] = max(snapshot['wind'], 0)\n datapoints.append(snapshot)\n\n return datapoints\n\n\ndef moyle_processor(df):\n \"\"\"\n Creates quarter hour datapoints for GB exchange.\n Returns a list.\n \"\"\"\n\n datapoints = []\n for index, row in df.iterrows():\n snapshot = {}\n snapshot['datetime'] = add_default_tz(parser.parse(row['TimeStamp'],\n dayfirst=True))\n snapshot['netFlow'] = row['Total_Moyle_Load_MW']\n snapshot['source'] = 'soni.ltd.uk'\n snapshot['sortedZoneKeys'] = 'GB->GB-NIR'\n datapoints.append(snapshot)\n\n return datapoints\n\n\ndef IE_processor(df):\n \"\"\"\n Creates quarter hour datapoints for IE exchange.\n Returns a list.\n \"\"\"\n\n datapoints = []\n for index, row in df.iterrows():\n snapshot = {}\n snapshot['datetime'] = add_default_tz(parser.parse(row['TimeStamp'],\n dayfirst=True))\n netFlow = (row['Total_Str_Let_Load_MW'] +\n row['Total_Enn_Cor_Load_MW'] +\n row['Total_Tan_Lou_Load_MW'])\n snapshot['netFlow'] = -1 * (netFlow)\n snapshot['source'] = 'soni.ltd.uk'\n snapshot['sortedZoneKeys'] = 'GB-NIR->IE'\n datapoints.append(snapshot)\n\n return datapoints\n\n\ndef merge_production(thermal_data, wind_data):\n \"\"\"\n Joins thermal and wind production data on shared datetime key.\n Returns a list.\n \"\"\"\n\n total_production = thermal_data + wind_data\n\n # Join thermal and wind dicts on 'datetime' key.\n d = defaultdict(dict)\n for elem in total_production:\n d[elem['datetime']].update(elem)\n\n joined_data = sorted(d.values(), key=itemgetter(\"datetime\"))\n\n for datapoint in joined_data:\n datapoint['datetime'] = add_default_tz(parser.parse(datapoint['datetime'], dayfirst=True))\n\n return joined_data\n\n\ndef fetch_production(zone_key='GB-NIR', session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n thermal_data = get_data(thermal_url)\n wind_data = get_data(wind_url)\n thermal_df = create_thermal_df(thermal_data)\n wind_df = create_wind_df(wind_data)\n thermal = thermal_processor(thermal_df)\n wind = wind_processor(wind_df)\n merge = merge_production(thermal, wind)\n\n production_mix_by_quarter_hour = []\n\n for datapoint in merge:\n production_mix = {\n 'zoneKey': zone_key,\n 'datetime': datapoint.get('datetime', 0.0),\n 'production': {\n 'coal': datapoint.get('coal', 0.0),\n 'gas': datapoint.get('gas', 0.0),\n 'oil': datapoint.get('oil', 0.0),\n 'solar': None,\n 'wind': datapoint.get('wind', 0.0)\n },\n 'source': 'soni.ltd.uk'\n }\n production_mix_by_quarter_hour.append(\n validate(production_mix, logger=logger, required=['gas', 'coal']))\n\n return production_mix_by_quarter_hour\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known power exchange (in MW) between two countries\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n exchange_data = get_data(exchange_url)\n exchange_dataframe = create_exchange_df(exchange_data)\n if '->'.join(sorted([zone_key1, zone_key2])) == 'GB->GB-NIR':\n moyle = moyle_processor(exchange_dataframe)\n return moyle\n elif '->'.join(sorted([zone_key1, zone_key2])) == 'GB-NIR->IE':\n IE = IE_processor(exchange_dataframe)\n return IE\n else:\n raise NotImplementedError('This exchange pair is not implemented')\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_exchange(GB-NIR, GB) ->')\n print(fetch_exchange('GB-NIR', 'GB'))\n print('fetch_exchange(GB-NIR, IE) ->')\n print(fetch_exchange('GB-NIR', 'IE'))\n",
"path": "parsers/GB_NIR.py"
}
] | [
{
"content": "#!/usr/bin/env python3\n\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom io import StringIO\nfrom operator import itemgetter\n\nimport logging\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\nfrom dateutil import parser, tz\n\nfrom .lib.validation import validate\n\nthermal_url = 'http://ws.soni.ltd.uk/DownloadCentre/aspx/FuelMix.aspx'\nwind_url = 'http://ws.soni.ltd.uk/DownloadCentre/aspx/SystemOutput.aspx'\nexchange_url = 'http://ws.soni.ltd.uk/DownloadCentre/aspx/MoyleTie.aspx'\n# Positive values represent imports to Northern Ireland.\n# Negative value represent exports from Northern Ireland.\n\n\ndef get_data(url, session=None):\n \"\"\"\n Requests data from a specified url in CSV format.\n Returns a response.text object.\n \"\"\"\n\n s = session or requests.Session()\n\n headers = {\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:55.0) Gecko/20100101 Firefox/55.0',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'\n }\n\n pagereq = requests.get(url, headers=headers)\n soup = BeautifulSoup(pagereq.text, 'html.parser')\n\n # Find and define parameters needed to send a POST request for the actual data.\n viewstategenerator = soup.find(\"input\", attrs={'id': '__VIEWSTATEGENERATOR'})['value']\n viewstate = soup.find(\"input\", attrs={'id': '__VIEWSTATE'})['value']\n eventvalidation = soup.find(\"input\", attrs={'id': '__EVENTVALIDATION'})['value']\n\n # Set date for post request.\n current_date = datetime.now().date()\n month = current_date.month\n day = current_date.day\n year = current_date.year\n\n FromDatePicker_clientState = '|0|01%s-%s-%s-0-0-0-0||[[[[]],[],[]],[{%s},[]],\"01%s-%s-%s-0-0-0-0\"]' % (year, month, day, '', year, month, day)\n ToDatePicker_clientState = '|0|01%s-%s-%s-0-0-0-0||[[[[]],[],[]],[{%s},[]],\"01%s-%s-%s-0-0-0-0\"]' % (year, month, day, '', year, month, day)\n btnDownloadCSV = 'Download+CSV'\n ig_def_dp_cal_clientState = '|0|15,2017,09,2017,%s,%s||[[null,[],null],[{%s},[]],\"11,2017,09,2017,%s,%s\"]' % (month, day, '', month, day)\n IG_CSS_LINKS_ = 'ig_res/default/ig_monthcalendar.css|ig_res/default/ig_texteditor.css|ig_res/default/ig_shared.css'\n\n postdata = {'__VIEWSTATE': viewstate,\n '__VIEWSTATEGENERATOR': viewstategenerator,\n '__EVENTVALIDATION': eventvalidation,\n 'FromDatePicker_clientState': FromDatePicker_clientState,\n 'ToDatePicker_clientState': ToDatePicker_clientState,\n 'btnDownloadCSV': btnDownloadCSV,\n '_ig_def_dp_cal_clientState': ig_def_dp_cal_clientState,\n '_IG_CSS_LINKS_': IG_CSS_LINKS_\n }\n\n postheaders = {\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:55.0) Gecko/20100101 Firefox/55.0',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n\n datareq = s.post(url, headers=postheaders, data=postdata)\n\n return datareq.text\n\n\ndef add_default_tz(timestamp):\n \"\"\"\n Adds Northern Ireland timezone to datetime object if tz = None.\n \"\"\"\n\n NIR = tz.gettz('Europe/Belfast')\n modified_timestamp = timestamp.replace(tzinfo=timestamp.tzinfo or NIR)\n\n return modified_timestamp\n\n\ndef create_thermal_df(text_data):\n \"\"\"\n Turns thermal csv data into a usable dataframe.\n \"\"\"\n\n cols_to_use = [0, 1, 2, 3, 4, 5]\n df_thermal = pd.read_csv(StringIO(text_data),\n usecols=cols_to_use)\n df_thermal.fillna(0.0, inplace=True)\n\n return df_thermal\n\n\ndef create_wind_df(text_data):\n \"\"\"\n Turns wind csv data into a usable dataframe.\n \"\"\"\n\n cols_to_use = [0, 1]\n df_wind = pd.read_csv(StringIO(text_data),\n usecols=cols_to_use)\n df_wind.fillna(0.0, inplace=True)\n\n return df_wind\n\n\ndef create_exchange_df(text_data):\n \"\"\"\n Turns exchange csv data into a usable dataframe.\n \"\"\"\n\n df_exchange = pd.read_csv(StringIO(text_data))\n df_exchange.fillna(0.0, inplace=True)\n\n return df_exchange\n\n\ndef thermal_processor(df):\n \"\"\"\n Creates quarter hour datapoints for thermal production.\n Returns a list.\n \"\"\"\n\n datapoints = []\n for index, row in df.iterrows():\n snapshot = {}\n snapshot['datetime'] = row['TimeStamp']\n snapshot['gas'] = row['Gas_MW']\n snapshot['coal'] = row['Coal_MW']\n snapshot['oil'] = row['Distillate_MW'] + row['Diesel_MW']\n datapoints.append(snapshot)\n\n return datapoints\n\n\ndef wind_processor(df):\n \"\"\"\n Creates quarter hour datapoints for wind production.\n Returns a list.\n \"\"\"\n\n datapoints = []\n for index, row in df.iterrows():\n snapshot = {}\n snapshot['datetime'] = row['TimeStamp']\n snapshot['wind'] = row['Total_Wind_Generated_MW']\n if snapshot['wind'] > -20:\n snapshot['wind'] = max(snapshot['wind'], 0)\n datapoints.append(snapshot)\n\n return datapoints\n\n\ndef moyle_processor(df):\n \"\"\"\n Creates quarter hour datapoints for GB exchange.\n Returns a list.\n \"\"\"\n\n datapoints = []\n for index, row in df.iterrows():\n snapshot = {}\n snapshot['datetime'] = add_default_tz(parser.parse(row['TimeStamp'],\n dayfirst=True))\n snapshot['netFlow'] = row['Total_Moyle_Load_MW']\n snapshot['source'] = 'soni.ltd.uk'\n snapshot['sortedZoneKeys'] = 'GB->GB-NIR'\n datapoints.append(snapshot)\n\n return datapoints\n\n\ndef IE_processor(df):\n \"\"\"\n Creates quarter hour datapoints for IE exchange.\n Returns a list.\n \"\"\"\n\n datapoints = []\n for index, row in df.iterrows():\n snapshot = {}\n snapshot['datetime'] = add_default_tz(parser.parse(row['TimeStamp'],\n dayfirst=True))\n netFlow = (row['Total_Str_Let_Load_MW'] +\n row['Total_Enn_Cor_Load_MW'] +\n row['Total_Tan_Lou_Load_MW'])\n snapshot['netFlow'] = -1 * (netFlow)\n snapshot['source'] = 'soni.ltd.uk'\n snapshot['sortedZoneKeys'] = 'GB-NIR->IE'\n datapoints.append(snapshot)\n\n return datapoints\n\n\ndef merge_production(thermal_data, wind_data):\n \"\"\"\n Joins thermal and wind production data on shared datetime key.\n Returns a list.\n \"\"\"\n\n total_production = thermal_data + wind_data\n\n # Join thermal and wind dicts on 'datetime' key.\n d = defaultdict(dict)\n for elem in total_production:\n d[elem['datetime']].update(elem)\n\n joined_data = sorted(d.values(), key=itemgetter(\"datetime\"))\n\n for datapoint in joined_data:\n datapoint['datetime'] = add_default_tz(parser.parse(datapoint['datetime'], dayfirst=True))\n\n return joined_data\n\n\ndef fetch_production(zone_key='GB-NIR', session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n thermal_data = get_data(thermal_url)\n wind_data = get_data(wind_url)\n thermal_df = create_thermal_df(thermal_data)\n wind_df = create_wind_df(wind_data)\n thermal = thermal_processor(thermal_df)\n wind = wind_processor(wind_df)\n merge = merge_production(thermal, wind)\n\n production_mix_by_quarter_hour = []\n\n for datapoint in merge:\n production_mix = {\n 'zoneKey': zone_key,\n 'datetime': datapoint.get('datetime', 0.0),\n 'production': {\n 'coal': datapoint.get('coal', 0.0),\n 'gas': datapoint.get('gas', 0.0),\n 'oil': datapoint.get('oil', 0.0),\n 'solar': None,\n 'wind': datapoint.get('wind', 0.0)\n },\n 'source': 'soni.ltd.uk'\n }\n production_mix_by_quarter_hour.append(\n validate(production_mix, logger=logger, required=['gas', 'coal'], floor=1.0))\n\n return production_mix_by_quarter_hour\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known power exchange (in MW) between two countries\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n exchange_data = get_data(exchange_url)\n exchange_dataframe = create_exchange_df(exchange_data)\n if '->'.join(sorted([zone_key1, zone_key2])) == 'GB->GB-NIR':\n moyle = moyle_processor(exchange_dataframe)\n return moyle\n elif '->'.join(sorted([zone_key1, zone_key2])) == 'GB-NIR->IE':\n IE = IE_processor(exchange_dataframe)\n return IE\n else:\n raise NotImplementedError('This exchange pair is not implemented')\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_exchange(GB-NIR, GB) ->')\n print(fetch_exchange('GB-NIR', 'GB'))\n print('fetch_exchange(GB-NIR, IE) ->')\n print(fetch_exchange('GB-NIR', 'IE'))\n",
"path": "parsers/GB_NIR.py"
}
] | diff --git a/parsers/GB_NIR.py b/parsers/GB_NIR.py
index 5379e28bbd..3db1b819c4 100644
--- a/parsers/GB_NIR.py
+++ b/parsers/GB_NIR.py
@@ -277,7 +277,7 @@ def fetch_production(zone_key='GB-NIR', session=None, target_datetime=None,
'source': 'soni.ltd.uk'
}
production_mix_by_quarter_hour.append(
- validate(production_mix, logger=logger, required=['gas', 'coal']))
+ validate(production_mix, logger=logger, required=['gas', 'coal'], floor=1.0))
return production_mix_by_quarter_hour
|
docker__docker-py-753 | Slightly incorrect documentation for user parameter in create_container?
The documentation for `user` parameter in `create_container` says:
`user (str or int): Username or UID`
However, supplying it as python's int(`client.create_container(user=1000, ...)`) gives
`docker.errors.APIError: 500 Server Error: Internal Server Error ("json: cannot unmarshal number into Go value of type string")`
Supplying it as string works. So this code `client.create_container(user="1000", ...)` works fine.
I guess it is a minor issue, though only because Celery printed the exception as "500 Server Error: Internal Server Error", so the critical piece of information was missing(`json: cannot unmarshal number into Go value of type string`). So I guess it is a minor issue, but I figured I should report it.
| [
{
"content": "# Copyright 2013 dotCloud inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\nimport os.path\nimport json\nimport shlex\nimport tarfile\nimport tempfile\nimport warnings\nfrom distutils.version import StrictVersion\nfrom fnmatch import fnmatch\nfrom datetime import datetime\n\nimport requests\nimport six\n\nfrom .. import constants\nfrom .. import errors\nfrom .. import tls\nfrom .types import Ulimit, LogConfig\n\n\nDEFAULT_HTTP_HOST = \"127.0.0.1\"\nDEFAULT_UNIX_SOCKET = \"http+unix://var/run/docker.sock\"\nBYTE_UNITS = {\n 'b': 1,\n 'k': 1024,\n 'm': 1024 * 1024,\n 'g': 1024 * 1024 * 1024\n}\n\n\ndef mkbuildcontext(dockerfile):\n f = tempfile.NamedTemporaryFile()\n t = tarfile.open(mode='w', fileobj=f)\n if isinstance(dockerfile, io.StringIO):\n dfinfo = tarfile.TarInfo('Dockerfile')\n if six.PY3:\n raise TypeError('Please use io.BytesIO to create in-memory '\n 'Dockerfiles with Python 3')\n else:\n dfinfo.size = len(dockerfile.getvalue())\n dockerfile.seek(0)\n elif isinstance(dockerfile, io.BytesIO):\n dfinfo = tarfile.TarInfo('Dockerfile')\n dfinfo.size = len(dockerfile.getvalue())\n dockerfile.seek(0)\n else:\n dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')\n t.addfile(dfinfo, dockerfile)\n t.close()\n f.seek(0)\n return f\n\n\ndef tar(path, exclude=None, dockerfile=None):\n f = tempfile.NamedTemporaryFile()\n t = tarfile.open(mode='w', fileobj=f)\n\n root = os.path.abspath(path)\n exclude = exclude or []\n\n for path in sorted(exclude_paths(root, exclude, dockerfile=dockerfile)):\n t.add(os.path.join(root, path), arcname=path, recursive=False)\n\n t.close()\n f.seek(0)\n return f\n\n\ndef exclude_paths(root, patterns, dockerfile=None):\n \"\"\"\n Given a root directory path and a list of .dockerignore patterns, return\n an iterator of all paths (both regular files and directories) in the root\n directory that do *not* match any of the patterns.\n\n All paths returned are relative to the root.\n \"\"\"\n if dockerfile is None:\n dockerfile = 'Dockerfile'\n\n exceptions = [p for p in patterns if p.startswith('!')]\n\n include_patterns = [p[1:] for p in exceptions]\n include_patterns += [dockerfile, '.dockerignore']\n\n exclude_patterns = list(set(patterns) - set(exceptions))\n\n all_paths = get_paths(root)\n\n # Remove all paths that are matched by any exclusion pattern\n paths = [\n p for p in all_paths\n if not any(match_path(p, pattern) for pattern in exclude_patterns)\n ]\n\n # Add back the set of paths that are matched by any inclusion pattern.\n # Include parent dirs - if we add back 'foo/bar', add 'foo' as well\n for p in all_paths:\n if any(match_path(p, pattern) for pattern in include_patterns):\n components = p.split('/')\n paths += [\n '/'.join(components[:end])\n for end in range(1, len(components) + 1)\n ]\n\n return set(paths)\n\n\ndef get_paths(root):\n paths = []\n\n for parent, dirs, files in os.walk(root, followlinks=False):\n parent = os.path.relpath(parent, root)\n if parent == '.':\n parent = ''\n for path in dirs:\n paths.append(os.path.join(parent, path))\n for path in files:\n paths.append(os.path.join(parent, path))\n\n return paths\n\n\ndef match_path(path, pattern):\n pattern = pattern.rstrip('/')\n pattern_components = pattern.split('/')\n path_components = path.split('/')[:len(pattern_components)]\n return fnmatch('/'.join(path_components), pattern)\n\n\ndef compare_version(v1, v2):\n \"\"\"Compare docker versions\n\n >>> v1 = '1.9'\n >>> v2 = '1.10'\n >>> compare_version(v1, v2)\n 1\n >>> compare_version(v2, v1)\n -1\n >>> compare_version(v2, v2)\n 0\n \"\"\"\n s1 = StrictVersion(v1)\n s2 = StrictVersion(v2)\n if s1 == s2:\n return 0\n elif s1 > s2:\n return -1\n else:\n return 1\n\n\ndef ping_registry(url):\n warnings.warn(\n 'The `ping_registry` method is deprecated and will be removed.',\n DeprecationWarning\n )\n\n return ping(url + '/v2/', [401]) or ping(url + '/v1/_ping')\n\n\ndef ping(url, valid_4xx_statuses=None):\n try:\n res = requests.get(url, timeout=3)\n except Exception:\n return False\n else:\n # We don't send yet auth headers\n # and a v2 registry will respond with status 401\n return (\n res.status_code < 400 or\n (valid_4xx_statuses and res.status_code in valid_4xx_statuses)\n )\n\n\ndef _convert_port_binding(binding):\n result = {'HostIp': '', 'HostPort': ''}\n if isinstance(binding, tuple):\n if len(binding) == 2:\n result['HostPort'] = binding[1]\n result['HostIp'] = binding[0]\n elif isinstance(binding[0], six.string_types):\n result['HostIp'] = binding[0]\n else:\n result['HostPort'] = binding[0]\n elif isinstance(binding, dict):\n if 'HostPort' in binding:\n result['HostPort'] = binding['HostPort']\n if 'HostIp' in binding:\n result['HostIp'] = binding['HostIp']\n else:\n raise ValueError(binding)\n else:\n result['HostPort'] = binding\n\n if result['HostPort'] is None:\n result['HostPort'] = ''\n else:\n result['HostPort'] = str(result['HostPort'])\n\n return result\n\n\ndef convert_port_bindings(port_bindings):\n result = {}\n for k, v in six.iteritems(port_bindings):\n key = str(k)\n if '/' not in key:\n key = key + '/tcp'\n if isinstance(v, list):\n result[key] = [_convert_port_binding(binding) for binding in v]\n else:\n result[key] = [_convert_port_binding(v)]\n return result\n\n\ndef convert_volume_binds(binds):\n if isinstance(binds, list):\n return binds\n\n result = []\n for k, v in binds.items():\n if isinstance(v, dict):\n if 'ro' in v and 'mode' in v:\n raise ValueError(\n 'Binding cannot contain both \"ro\" and \"mode\": {}'\n .format(repr(v))\n )\n\n if 'ro' in v:\n mode = 'ro' if v['ro'] else 'rw'\n elif 'mode' in v:\n mode = v['mode']\n else:\n mode = 'rw'\n\n result.append('{0}:{1}:{2}'.format(\n k, v['bind'], mode\n ))\n else:\n result.append('{0}:{1}:rw'.format(k, v))\n return result\n\n\ndef parse_repository_tag(repo):\n column_index = repo.rfind(':')\n if column_index < 0:\n return repo, None\n tag = repo[column_index + 1:]\n slash_index = tag.find('/')\n if slash_index < 0:\n return repo[:column_index], tag\n\n return repo, None\n\n\n# Based on utils.go:ParseHost http://tinyurl.com/nkahcfh\n# fd:// protocol unsupported (for obvious reasons)\n# Added support for http and https\n# Protocol translation: tcp -> http, unix -> http+unix\ndef parse_host(addr):\n proto = \"http+unix\"\n host = DEFAULT_HTTP_HOST\n port = None\n path = ''\n if not addr or addr.strip() == 'unix://':\n return DEFAULT_UNIX_SOCKET\n\n addr = addr.strip()\n if addr.startswith('http://'):\n addr = addr.replace('http://', 'tcp://')\n if addr.startswith('http+unix://'):\n addr = addr.replace('http+unix://', 'unix://')\n\n if addr == 'tcp://':\n raise errors.DockerException(\n \"Invalid bind address format: {0}\".format(addr))\n elif addr.startswith('unix://'):\n addr = addr[7:]\n elif addr.startswith('tcp://'):\n proto = \"http\"\n addr = addr[6:]\n elif addr.startswith('https://'):\n proto = \"https\"\n addr = addr[8:]\n elif addr.startswith('fd://'):\n raise errors.DockerException(\"fd protocol is not implemented\")\n else:\n if \"://\" in addr:\n raise errors.DockerException(\n \"Invalid bind address protocol: {0}\".format(addr)\n )\n proto = \"http\"\n\n if proto != \"http+unix\" and \":\" in addr:\n host_parts = addr.split(':')\n if len(host_parts) != 2:\n raise errors.DockerException(\n \"Invalid bind address format: {0}\".format(addr)\n )\n if host_parts[0]:\n host = host_parts[0]\n\n port = host_parts[1]\n if '/' in port:\n port, path = port.split('/', 1)\n path = '/{0}'.format(path)\n try:\n port = int(port)\n except Exception:\n raise errors.DockerException(\n \"Invalid port: %s\", addr\n )\n\n elif proto in (\"http\", \"https\") and ':' not in addr:\n raise errors.DockerException(\n \"Bind address needs a port: {0}\".format(addr))\n else:\n host = addr\n\n if proto == \"http+unix\":\n return \"{0}://{1}\".format(proto, host)\n return \"{0}://{1}:{2}{3}\".format(proto, host, port, path)\n\n\ndef parse_devices(devices):\n device_list = []\n for device in devices:\n device_mapping = device.split(\":\")\n if device_mapping:\n path_on_host = device_mapping[0]\n if len(device_mapping) > 1:\n path_in_container = device_mapping[1]\n else:\n path_in_container = path_on_host\n if len(device_mapping) > 2:\n permissions = device_mapping[2]\n else:\n permissions = 'rwm'\n device_list.append({\"PathOnHost\": path_on_host,\n \"PathInContainer\": path_in_container,\n \"CgroupPermissions\": permissions})\n return device_list\n\n\ndef kwargs_from_env(ssl_version=None, assert_hostname=None):\n host = os.environ.get('DOCKER_HOST')\n cert_path = os.environ.get('DOCKER_CERT_PATH')\n tls_verify = os.environ.get('DOCKER_TLS_VERIFY')\n\n params = {}\n\n if host:\n params['base_url'] = (host.replace('tcp://', 'https://')\n if tls_verify else host)\n\n if tls_verify and not cert_path:\n cert_path = os.path.join(os.path.expanduser('~'), '.docker')\n\n if tls_verify and cert_path:\n params['tls'] = tls.TLSConfig(\n client_cert=(os.path.join(cert_path, 'cert.pem'),\n os.path.join(cert_path, 'key.pem')),\n ca_cert=os.path.join(cert_path, 'ca.pem'),\n verify=True,\n ssl_version=ssl_version,\n assert_hostname=assert_hostname)\n\n return params\n\n\ndef convert_filters(filters):\n result = {}\n for k, v in six.iteritems(filters):\n if isinstance(v, bool):\n v = 'true' if v else 'false'\n if not isinstance(v, list):\n v = [v, ]\n result[k] = v\n return json.dumps(result)\n\n\ndef datetime_to_timestamp(dt):\n \"\"\"Convert a UTC datetime to a Unix timestamp\"\"\"\n delta = dt - datetime.utcfromtimestamp(0)\n return delta.seconds + delta.days * 24 * 3600\n\n\ndef parse_bytes(s):\n if len(s) == 0:\n s = 0\n else:\n if s[-2:-1].isalpha() and s[-1].isalpha():\n if (s[-1] == \"b\" or s[-1] == \"B\"):\n s = s[:-1]\n units = BYTE_UNITS\n suffix = s[-1].lower()\n\n # Check if the variable is a string representation of an int\n # without a units part. Assuming that the units are bytes.\n if suffix.isdigit():\n digits_part = s\n suffix = 'b'\n else:\n digits_part = s[:-1]\n\n if suffix in units.keys() or suffix.isdigit():\n try:\n digits = int(digits_part)\n except ValueError:\n message = ('Failed converting the string value for'\n 'memory ({0}) to a number.')\n formatted_message = message.format(digits_part)\n raise errors.DockerException(formatted_message)\n\n s = digits * units[suffix]\n else:\n message = ('The specified value for memory'\n ' ({0}) should specify the units. The postfix'\n ' should be one of the `b` `k` `m` `g`'\n ' characters')\n raise errors.DockerException(message.format(s))\n\n return s\n\n\ndef create_host_config(\n binds=None, port_bindings=None, lxc_conf=None,\n publish_all_ports=False, links=None, privileged=False,\n dns=None, dns_search=None, volumes_from=None, network_mode=None,\n restart_policy=None, cap_add=None, cap_drop=None, devices=None,\n extra_hosts=None, read_only=None, pid_mode=None, ipc_mode=None,\n security_opt=None, ulimits=None, log_config=None, mem_limit=None,\n memswap_limit=None, cgroup_parent=None, version=None\n):\n host_config = {}\n\n if not version:\n warnings.warn(\n 'docker.utils.create_host_config() is deprecated. Please use '\n 'Client.create_host_config() instead.'\n )\n version = constants.DEFAULT_DOCKER_API_VERSION\n\n if mem_limit is not None:\n if isinstance(mem_limit, six.string_types):\n mem_limit = parse_bytes(mem_limit)\n host_config['Memory'] = mem_limit\n\n if memswap_limit is not None:\n if isinstance(memswap_limit, six.string_types):\n memswap_limit = parse_bytes(memswap_limit)\n host_config['MemorySwap'] = memswap_limit\n\n if pid_mode not in (None, 'host'):\n raise errors.DockerException(\n 'Invalid value for pid param: {0}'.format(pid_mode)\n )\n elif pid_mode:\n host_config['PidMode'] = pid_mode\n\n if ipc_mode:\n host_config['IpcMode'] = ipc_mode\n\n if privileged:\n host_config['Privileged'] = privileged\n\n if publish_all_ports:\n host_config['PublishAllPorts'] = publish_all_ports\n\n if read_only is not None:\n host_config['ReadonlyRootfs'] = read_only\n\n if dns_search:\n host_config['DnsSearch'] = dns_search\n\n if network_mode:\n host_config['NetworkMode'] = network_mode\n elif network_mode is None and compare_version('1.19', version) > 0:\n host_config['NetworkMode'] = 'default'\n\n if restart_policy:\n host_config['RestartPolicy'] = restart_policy\n\n if cap_add:\n host_config['CapAdd'] = cap_add\n\n if cap_drop:\n host_config['CapDrop'] = cap_drop\n\n if devices:\n host_config['Devices'] = parse_devices(devices)\n\n if dns is not None:\n host_config['Dns'] = dns\n\n if security_opt is not None:\n if not isinstance(security_opt, list):\n raise errors.DockerException(\n 'Invalid type for security_opt param: expected list but found'\n ' {0}'.format(type(security_opt))\n )\n host_config['SecurityOpt'] = security_opt\n\n if volumes_from is not None:\n if isinstance(volumes_from, six.string_types):\n volumes_from = volumes_from.split(',')\n host_config['VolumesFrom'] = volumes_from\n\n if binds is not None:\n host_config['Binds'] = convert_volume_binds(binds)\n\n if port_bindings is not None:\n host_config['PortBindings'] = convert_port_bindings(\n port_bindings\n )\n\n if extra_hosts is not None:\n if isinstance(extra_hosts, dict):\n extra_hosts = [\n '{0}:{1}'.format(k, v)\n for k, v in sorted(six.iteritems(extra_hosts))\n ]\n\n host_config['ExtraHosts'] = extra_hosts\n\n if links is not None:\n if isinstance(links, dict):\n links = six.iteritems(links)\n\n formatted_links = [\n '{0}:{1}'.format(k, v) for k, v in sorted(links)\n ]\n\n host_config['Links'] = formatted_links\n\n if isinstance(lxc_conf, dict):\n formatted = []\n for k, v in six.iteritems(lxc_conf):\n formatted.append({'Key': k, 'Value': str(v)})\n lxc_conf = formatted\n\n if lxc_conf is not None:\n host_config['LxcConf'] = lxc_conf\n\n if cgroup_parent is not None:\n host_config['CgroupParent'] = cgroup_parent\n\n if ulimits is not None:\n if not isinstance(ulimits, list):\n raise errors.DockerException(\n 'Invalid type for ulimits param: expected list but found'\n ' {0}'.format(type(ulimits))\n )\n host_config['Ulimits'] = []\n for l in ulimits:\n if not isinstance(l, Ulimit):\n l = Ulimit(**l)\n host_config['Ulimits'].append(l)\n\n if log_config is not None:\n if not isinstance(log_config, LogConfig):\n if not isinstance(log_config, dict):\n raise errors.DockerException(\n 'Invalid type for log_config param: expected LogConfig but'\n ' found {0}'.format(type(log_config))\n )\n log_config = LogConfig(**log_config)\n host_config['LogConfig'] = log_config\n\n return host_config\n\n\ndef parse_env_file(env_file):\n \"\"\"\n Reads a line-separated environment file.\n The format of each line should be \"key=value\".\n \"\"\"\n environment = {}\n\n with open(env_file, 'r') as f:\n for line in f:\n\n if line[0] == '#':\n continue\n\n parse_line = line.strip().split('=')\n if len(parse_line) == 2:\n k, v = parse_line\n environment[k] = v\n else:\n raise errors.DockerException(\n 'Invalid line in environment file {0}:\\n{1}'.format(\n env_file, line))\n\n return environment\n\n\ndef create_container_config(\n version, image, command, hostname=None, user=None, detach=False,\n stdin_open=False, tty=False, mem_limit=None, ports=None, environment=None,\n dns=None, volumes=None, volumes_from=None, network_disabled=False,\n entrypoint=None, cpu_shares=None, working_dir=None, domainname=None,\n memswap_limit=None, cpuset=None, host_config=None, mac_address=None,\n labels=None, volume_driver=None\n):\n if isinstance(command, six.string_types):\n command = shlex.split(str(command))\n\n if isinstance(entrypoint, six.string_types):\n entrypoint = shlex.split(str(entrypoint))\n\n if isinstance(environment, dict):\n environment = [\n six.text_type('{0}={1}').format(k, v)\n for k, v in six.iteritems(environment)\n ]\n\n if labels is not None and compare_version('1.18', version) < 0:\n raise errors.InvalidVersion(\n 'labels were only introduced in API version 1.18'\n )\n\n if compare_version('1.19', version) < 0:\n if volume_driver is not None:\n raise errors.InvalidVersion(\n 'Volume drivers were only introduced in API version 1.19'\n )\n mem_limit = mem_limit if mem_limit is not None else 0\n memswap_limit = memswap_limit if memswap_limit is not None else 0\n else:\n if mem_limit is not None:\n raise errors.InvalidVersion(\n 'mem_limit has been moved to host_config in API version 1.19'\n )\n\n if memswap_limit is not None:\n raise errors.InvalidVersion(\n 'memswap_limit has been moved to host_config in API '\n 'version 1.19'\n )\n\n if isinstance(labels, list):\n labels = dict((lbl, six.text_type('')) for lbl in labels)\n\n if isinstance(mem_limit, six.string_types):\n mem_limit = parse_bytes(mem_limit)\n if isinstance(memswap_limit, six.string_types):\n memswap_limit = parse_bytes(memswap_limit)\n\n if isinstance(ports, list):\n exposed_ports = {}\n for port_definition in ports:\n port = port_definition\n proto = 'tcp'\n if isinstance(port_definition, tuple):\n if len(port_definition) == 2:\n proto = port_definition[1]\n port = port_definition[0]\n exposed_ports['{0}/{1}'.format(port, proto)] = {}\n ports = exposed_ports\n\n if isinstance(volumes, six.string_types):\n volumes = [volumes, ]\n\n if isinstance(volumes, list):\n volumes_dict = {}\n for vol in volumes:\n volumes_dict[vol] = {}\n volumes = volumes_dict\n\n if volumes_from:\n if not isinstance(volumes_from, six.string_types):\n volumes_from = ','.join(volumes_from)\n else:\n # Force None, an empty list or dict causes client.start to fail\n volumes_from = None\n\n attach_stdin = False\n attach_stdout = False\n attach_stderr = False\n stdin_once = False\n\n if not detach:\n attach_stdout = True\n attach_stderr = True\n\n if stdin_open:\n attach_stdin = True\n stdin_once = True\n\n if compare_version('1.10', version) >= 0:\n message = ('{0!r} parameter has no effect on create_container().'\n ' It has been moved to start()')\n if dns is not None:\n raise errors.InvalidVersion(message.format('dns'))\n if volumes_from is not None:\n raise errors.InvalidVersion(message.format('volumes_from'))\n\n return {\n 'Hostname': hostname,\n 'Domainname': domainname,\n 'ExposedPorts': ports,\n 'User': user,\n 'Tty': tty,\n 'OpenStdin': stdin_open,\n 'StdinOnce': stdin_once,\n 'Memory': mem_limit,\n 'AttachStdin': attach_stdin,\n 'AttachStdout': attach_stdout,\n 'AttachStderr': attach_stderr,\n 'Env': environment,\n 'Cmd': command,\n 'Dns': dns,\n 'Image': image,\n 'Volumes': volumes,\n 'VolumesFrom': volumes_from,\n 'NetworkDisabled': network_disabled,\n 'Entrypoint': entrypoint,\n 'CpuShares': cpu_shares,\n 'Cpuset': cpuset,\n 'CpusetCpus': cpuset,\n 'WorkingDir': working_dir,\n 'MemorySwap': memswap_limit,\n 'HostConfig': host_config,\n 'MacAddress': mac_address,\n 'Labels': labels,\n 'VolumeDriver': volume_driver,\n }\n",
"path": "docker/utils/utils.py"
}
] | [
{
"content": "# Copyright 2013 dotCloud inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\nimport os.path\nimport json\nimport shlex\nimport tarfile\nimport tempfile\nimport warnings\nfrom distutils.version import StrictVersion\nfrom fnmatch import fnmatch\nfrom datetime import datetime\n\nimport requests\nimport six\n\nfrom .. import constants\nfrom .. import errors\nfrom .. import tls\nfrom .types import Ulimit, LogConfig\n\n\nDEFAULT_HTTP_HOST = \"127.0.0.1\"\nDEFAULT_UNIX_SOCKET = \"http+unix://var/run/docker.sock\"\nBYTE_UNITS = {\n 'b': 1,\n 'k': 1024,\n 'm': 1024 * 1024,\n 'g': 1024 * 1024 * 1024\n}\n\n\ndef mkbuildcontext(dockerfile):\n f = tempfile.NamedTemporaryFile()\n t = tarfile.open(mode='w', fileobj=f)\n if isinstance(dockerfile, io.StringIO):\n dfinfo = tarfile.TarInfo('Dockerfile')\n if six.PY3:\n raise TypeError('Please use io.BytesIO to create in-memory '\n 'Dockerfiles with Python 3')\n else:\n dfinfo.size = len(dockerfile.getvalue())\n dockerfile.seek(0)\n elif isinstance(dockerfile, io.BytesIO):\n dfinfo = tarfile.TarInfo('Dockerfile')\n dfinfo.size = len(dockerfile.getvalue())\n dockerfile.seek(0)\n else:\n dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')\n t.addfile(dfinfo, dockerfile)\n t.close()\n f.seek(0)\n return f\n\n\ndef tar(path, exclude=None, dockerfile=None):\n f = tempfile.NamedTemporaryFile()\n t = tarfile.open(mode='w', fileobj=f)\n\n root = os.path.abspath(path)\n exclude = exclude or []\n\n for path in sorted(exclude_paths(root, exclude, dockerfile=dockerfile)):\n t.add(os.path.join(root, path), arcname=path, recursive=False)\n\n t.close()\n f.seek(0)\n return f\n\n\ndef exclude_paths(root, patterns, dockerfile=None):\n \"\"\"\n Given a root directory path and a list of .dockerignore patterns, return\n an iterator of all paths (both regular files and directories) in the root\n directory that do *not* match any of the patterns.\n\n All paths returned are relative to the root.\n \"\"\"\n if dockerfile is None:\n dockerfile = 'Dockerfile'\n\n exceptions = [p for p in patterns if p.startswith('!')]\n\n include_patterns = [p[1:] for p in exceptions]\n include_patterns += [dockerfile, '.dockerignore']\n\n exclude_patterns = list(set(patterns) - set(exceptions))\n\n all_paths = get_paths(root)\n\n # Remove all paths that are matched by any exclusion pattern\n paths = [\n p for p in all_paths\n if not any(match_path(p, pattern) for pattern in exclude_patterns)\n ]\n\n # Add back the set of paths that are matched by any inclusion pattern.\n # Include parent dirs - if we add back 'foo/bar', add 'foo' as well\n for p in all_paths:\n if any(match_path(p, pattern) for pattern in include_patterns):\n components = p.split('/')\n paths += [\n '/'.join(components[:end])\n for end in range(1, len(components) + 1)\n ]\n\n return set(paths)\n\n\ndef get_paths(root):\n paths = []\n\n for parent, dirs, files in os.walk(root, followlinks=False):\n parent = os.path.relpath(parent, root)\n if parent == '.':\n parent = ''\n for path in dirs:\n paths.append(os.path.join(parent, path))\n for path in files:\n paths.append(os.path.join(parent, path))\n\n return paths\n\n\ndef match_path(path, pattern):\n pattern = pattern.rstrip('/')\n pattern_components = pattern.split('/')\n path_components = path.split('/')[:len(pattern_components)]\n return fnmatch('/'.join(path_components), pattern)\n\n\ndef compare_version(v1, v2):\n \"\"\"Compare docker versions\n\n >>> v1 = '1.9'\n >>> v2 = '1.10'\n >>> compare_version(v1, v2)\n 1\n >>> compare_version(v2, v1)\n -1\n >>> compare_version(v2, v2)\n 0\n \"\"\"\n s1 = StrictVersion(v1)\n s2 = StrictVersion(v2)\n if s1 == s2:\n return 0\n elif s1 > s2:\n return -1\n else:\n return 1\n\n\ndef ping_registry(url):\n warnings.warn(\n 'The `ping_registry` method is deprecated and will be removed.',\n DeprecationWarning\n )\n\n return ping(url + '/v2/', [401]) or ping(url + '/v1/_ping')\n\n\ndef ping(url, valid_4xx_statuses=None):\n try:\n res = requests.get(url, timeout=3)\n except Exception:\n return False\n else:\n # We don't send yet auth headers\n # and a v2 registry will respond with status 401\n return (\n res.status_code < 400 or\n (valid_4xx_statuses and res.status_code in valid_4xx_statuses)\n )\n\n\ndef _convert_port_binding(binding):\n result = {'HostIp': '', 'HostPort': ''}\n if isinstance(binding, tuple):\n if len(binding) == 2:\n result['HostPort'] = binding[1]\n result['HostIp'] = binding[0]\n elif isinstance(binding[0], six.string_types):\n result['HostIp'] = binding[0]\n else:\n result['HostPort'] = binding[0]\n elif isinstance(binding, dict):\n if 'HostPort' in binding:\n result['HostPort'] = binding['HostPort']\n if 'HostIp' in binding:\n result['HostIp'] = binding['HostIp']\n else:\n raise ValueError(binding)\n else:\n result['HostPort'] = binding\n\n if result['HostPort'] is None:\n result['HostPort'] = ''\n else:\n result['HostPort'] = str(result['HostPort'])\n\n return result\n\n\ndef convert_port_bindings(port_bindings):\n result = {}\n for k, v in six.iteritems(port_bindings):\n key = str(k)\n if '/' not in key:\n key = key + '/tcp'\n if isinstance(v, list):\n result[key] = [_convert_port_binding(binding) for binding in v]\n else:\n result[key] = [_convert_port_binding(v)]\n return result\n\n\ndef convert_volume_binds(binds):\n if isinstance(binds, list):\n return binds\n\n result = []\n for k, v in binds.items():\n if isinstance(v, dict):\n if 'ro' in v and 'mode' in v:\n raise ValueError(\n 'Binding cannot contain both \"ro\" and \"mode\": {}'\n .format(repr(v))\n )\n\n if 'ro' in v:\n mode = 'ro' if v['ro'] else 'rw'\n elif 'mode' in v:\n mode = v['mode']\n else:\n mode = 'rw'\n\n result.append('{0}:{1}:{2}'.format(\n k, v['bind'], mode\n ))\n else:\n result.append('{0}:{1}:rw'.format(k, v))\n return result\n\n\ndef parse_repository_tag(repo):\n column_index = repo.rfind(':')\n if column_index < 0:\n return repo, None\n tag = repo[column_index + 1:]\n slash_index = tag.find('/')\n if slash_index < 0:\n return repo[:column_index], tag\n\n return repo, None\n\n\n# Based on utils.go:ParseHost http://tinyurl.com/nkahcfh\n# fd:// protocol unsupported (for obvious reasons)\n# Added support for http and https\n# Protocol translation: tcp -> http, unix -> http+unix\ndef parse_host(addr):\n proto = \"http+unix\"\n host = DEFAULT_HTTP_HOST\n port = None\n path = ''\n if not addr or addr.strip() == 'unix://':\n return DEFAULT_UNIX_SOCKET\n\n addr = addr.strip()\n if addr.startswith('http://'):\n addr = addr.replace('http://', 'tcp://')\n if addr.startswith('http+unix://'):\n addr = addr.replace('http+unix://', 'unix://')\n\n if addr == 'tcp://':\n raise errors.DockerException(\n \"Invalid bind address format: {0}\".format(addr))\n elif addr.startswith('unix://'):\n addr = addr[7:]\n elif addr.startswith('tcp://'):\n proto = \"http\"\n addr = addr[6:]\n elif addr.startswith('https://'):\n proto = \"https\"\n addr = addr[8:]\n elif addr.startswith('fd://'):\n raise errors.DockerException(\"fd protocol is not implemented\")\n else:\n if \"://\" in addr:\n raise errors.DockerException(\n \"Invalid bind address protocol: {0}\".format(addr)\n )\n proto = \"http\"\n\n if proto != \"http+unix\" and \":\" in addr:\n host_parts = addr.split(':')\n if len(host_parts) != 2:\n raise errors.DockerException(\n \"Invalid bind address format: {0}\".format(addr)\n )\n if host_parts[0]:\n host = host_parts[0]\n\n port = host_parts[1]\n if '/' in port:\n port, path = port.split('/', 1)\n path = '/{0}'.format(path)\n try:\n port = int(port)\n except Exception:\n raise errors.DockerException(\n \"Invalid port: %s\", addr\n )\n\n elif proto in (\"http\", \"https\") and ':' not in addr:\n raise errors.DockerException(\n \"Bind address needs a port: {0}\".format(addr))\n else:\n host = addr\n\n if proto == \"http+unix\":\n return \"{0}://{1}\".format(proto, host)\n return \"{0}://{1}:{2}{3}\".format(proto, host, port, path)\n\n\ndef parse_devices(devices):\n device_list = []\n for device in devices:\n device_mapping = device.split(\":\")\n if device_mapping:\n path_on_host = device_mapping[0]\n if len(device_mapping) > 1:\n path_in_container = device_mapping[1]\n else:\n path_in_container = path_on_host\n if len(device_mapping) > 2:\n permissions = device_mapping[2]\n else:\n permissions = 'rwm'\n device_list.append({\"PathOnHost\": path_on_host,\n \"PathInContainer\": path_in_container,\n \"CgroupPermissions\": permissions})\n return device_list\n\n\ndef kwargs_from_env(ssl_version=None, assert_hostname=None):\n host = os.environ.get('DOCKER_HOST')\n cert_path = os.environ.get('DOCKER_CERT_PATH')\n tls_verify = os.environ.get('DOCKER_TLS_VERIFY')\n\n params = {}\n\n if host:\n params['base_url'] = (host.replace('tcp://', 'https://')\n if tls_verify else host)\n\n if tls_verify and not cert_path:\n cert_path = os.path.join(os.path.expanduser('~'), '.docker')\n\n if tls_verify and cert_path:\n params['tls'] = tls.TLSConfig(\n client_cert=(os.path.join(cert_path, 'cert.pem'),\n os.path.join(cert_path, 'key.pem')),\n ca_cert=os.path.join(cert_path, 'ca.pem'),\n verify=True,\n ssl_version=ssl_version,\n assert_hostname=assert_hostname)\n\n return params\n\n\ndef convert_filters(filters):\n result = {}\n for k, v in six.iteritems(filters):\n if isinstance(v, bool):\n v = 'true' if v else 'false'\n if not isinstance(v, list):\n v = [v, ]\n result[k] = v\n return json.dumps(result)\n\n\ndef datetime_to_timestamp(dt):\n \"\"\"Convert a UTC datetime to a Unix timestamp\"\"\"\n delta = dt - datetime.utcfromtimestamp(0)\n return delta.seconds + delta.days * 24 * 3600\n\n\ndef parse_bytes(s):\n if len(s) == 0:\n s = 0\n else:\n if s[-2:-1].isalpha() and s[-1].isalpha():\n if (s[-1] == \"b\" or s[-1] == \"B\"):\n s = s[:-1]\n units = BYTE_UNITS\n suffix = s[-1].lower()\n\n # Check if the variable is a string representation of an int\n # without a units part. Assuming that the units are bytes.\n if suffix.isdigit():\n digits_part = s\n suffix = 'b'\n else:\n digits_part = s[:-1]\n\n if suffix in units.keys() or suffix.isdigit():\n try:\n digits = int(digits_part)\n except ValueError:\n message = ('Failed converting the string value for'\n 'memory ({0}) to a number.')\n formatted_message = message.format(digits_part)\n raise errors.DockerException(formatted_message)\n\n s = digits * units[suffix]\n else:\n message = ('The specified value for memory'\n ' ({0}) should specify the units. The postfix'\n ' should be one of the `b` `k` `m` `g`'\n ' characters')\n raise errors.DockerException(message.format(s))\n\n return s\n\n\ndef create_host_config(\n binds=None, port_bindings=None, lxc_conf=None,\n publish_all_ports=False, links=None, privileged=False,\n dns=None, dns_search=None, volumes_from=None, network_mode=None,\n restart_policy=None, cap_add=None, cap_drop=None, devices=None,\n extra_hosts=None, read_only=None, pid_mode=None, ipc_mode=None,\n security_opt=None, ulimits=None, log_config=None, mem_limit=None,\n memswap_limit=None, cgroup_parent=None, version=None\n):\n host_config = {}\n\n if not version:\n warnings.warn(\n 'docker.utils.create_host_config() is deprecated. Please use '\n 'Client.create_host_config() instead.'\n )\n version = constants.DEFAULT_DOCKER_API_VERSION\n\n if mem_limit is not None:\n if isinstance(mem_limit, six.string_types):\n mem_limit = parse_bytes(mem_limit)\n host_config['Memory'] = mem_limit\n\n if memswap_limit is not None:\n if isinstance(memswap_limit, six.string_types):\n memswap_limit = parse_bytes(memswap_limit)\n host_config['MemorySwap'] = memswap_limit\n\n if pid_mode not in (None, 'host'):\n raise errors.DockerException(\n 'Invalid value for pid param: {0}'.format(pid_mode)\n )\n elif pid_mode:\n host_config['PidMode'] = pid_mode\n\n if ipc_mode:\n host_config['IpcMode'] = ipc_mode\n\n if privileged:\n host_config['Privileged'] = privileged\n\n if publish_all_ports:\n host_config['PublishAllPorts'] = publish_all_ports\n\n if read_only is not None:\n host_config['ReadonlyRootfs'] = read_only\n\n if dns_search:\n host_config['DnsSearch'] = dns_search\n\n if network_mode:\n host_config['NetworkMode'] = network_mode\n elif network_mode is None and compare_version('1.19', version) > 0:\n host_config['NetworkMode'] = 'default'\n\n if restart_policy:\n host_config['RestartPolicy'] = restart_policy\n\n if cap_add:\n host_config['CapAdd'] = cap_add\n\n if cap_drop:\n host_config['CapDrop'] = cap_drop\n\n if devices:\n host_config['Devices'] = parse_devices(devices)\n\n if dns is not None:\n host_config['Dns'] = dns\n\n if security_opt is not None:\n if not isinstance(security_opt, list):\n raise errors.DockerException(\n 'Invalid type for security_opt param: expected list but found'\n ' {0}'.format(type(security_opt))\n )\n host_config['SecurityOpt'] = security_opt\n\n if volumes_from is not None:\n if isinstance(volumes_from, six.string_types):\n volumes_from = volumes_from.split(',')\n host_config['VolumesFrom'] = volumes_from\n\n if binds is not None:\n host_config['Binds'] = convert_volume_binds(binds)\n\n if port_bindings is not None:\n host_config['PortBindings'] = convert_port_bindings(\n port_bindings\n )\n\n if extra_hosts is not None:\n if isinstance(extra_hosts, dict):\n extra_hosts = [\n '{0}:{1}'.format(k, v)\n for k, v in sorted(six.iteritems(extra_hosts))\n ]\n\n host_config['ExtraHosts'] = extra_hosts\n\n if links is not None:\n if isinstance(links, dict):\n links = six.iteritems(links)\n\n formatted_links = [\n '{0}:{1}'.format(k, v) for k, v in sorted(links)\n ]\n\n host_config['Links'] = formatted_links\n\n if isinstance(lxc_conf, dict):\n formatted = []\n for k, v in six.iteritems(lxc_conf):\n formatted.append({'Key': k, 'Value': str(v)})\n lxc_conf = formatted\n\n if lxc_conf is not None:\n host_config['LxcConf'] = lxc_conf\n\n if cgroup_parent is not None:\n host_config['CgroupParent'] = cgroup_parent\n\n if ulimits is not None:\n if not isinstance(ulimits, list):\n raise errors.DockerException(\n 'Invalid type for ulimits param: expected list but found'\n ' {0}'.format(type(ulimits))\n )\n host_config['Ulimits'] = []\n for l in ulimits:\n if not isinstance(l, Ulimit):\n l = Ulimit(**l)\n host_config['Ulimits'].append(l)\n\n if log_config is not None:\n if not isinstance(log_config, LogConfig):\n if not isinstance(log_config, dict):\n raise errors.DockerException(\n 'Invalid type for log_config param: expected LogConfig but'\n ' found {0}'.format(type(log_config))\n )\n log_config = LogConfig(**log_config)\n host_config['LogConfig'] = log_config\n\n return host_config\n\n\ndef parse_env_file(env_file):\n \"\"\"\n Reads a line-separated environment file.\n The format of each line should be \"key=value\".\n \"\"\"\n environment = {}\n\n with open(env_file, 'r') as f:\n for line in f:\n\n if line[0] == '#':\n continue\n\n parse_line = line.strip().split('=')\n if len(parse_line) == 2:\n k, v = parse_line\n environment[k] = v\n else:\n raise errors.DockerException(\n 'Invalid line in environment file {0}:\\n{1}'.format(\n env_file, line))\n\n return environment\n\n\ndef create_container_config(\n version, image, command, hostname=None, user=None, detach=False,\n stdin_open=False, tty=False, mem_limit=None, ports=None, environment=None,\n dns=None, volumes=None, volumes_from=None, network_disabled=False,\n entrypoint=None, cpu_shares=None, working_dir=None, domainname=None,\n memswap_limit=None, cpuset=None, host_config=None, mac_address=None,\n labels=None, volume_driver=None\n):\n if isinstance(command, six.string_types):\n command = shlex.split(str(command))\n\n if isinstance(entrypoint, six.string_types):\n entrypoint = shlex.split(str(entrypoint))\n\n if isinstance(environment, dict):\n environment = [\n six.text_type('{0}={1}').format(k, v)\n for k, v in six.iteritems(environment)\n ]\n\n if labels is not None and compare_version('1.18', version) < 0:\n raise errors.InvalidVersion(\n 'labels were only introduced in API version 1.18'\n )\n\n if compare_version('1.19', version) < 0:\n if volume_driver is not None:\n raise errors.InvalidVersion(\n 'Volume drivers were only introduced in API version 1.19'\n )\n mem_limit = mem_limit if mem_limit is not None else 0\n memswap_limit = memswap_limit if memswap_limit is not None else 0\n else:\n if mem_limit is not None:\n raise errors.InvalidVersion(\n 'mem_limit has been moved to host_config in API version 1.19'\n )\n\n if memswap_limit is not None:\n raise errors.InvalidVersion(\n 'memswap_limit has been moved to host_config in API '\n 'version 1.19'\n )\n\n if isinstance(labels, list):\n labels = dict((lbl, six.text_type('')) for lbl in labels)\n\n if isinstance(mem_limit, six.string_types):\n mem_limit = parse_bytes(mem_limit)\n if isinstance(memswap_limit, six.string_types):\n memswap_limit = parse_bytes(memswap_limit)\n\n if isinstance(ports, list):\n exposed_ports = {}\n for port_definition in ports:\n port = port_definition\n proto = 'tcp'\n if isinstance(port_definition, tuple):\n if len(port_definition) == 2:\n proto = port_definition[1]\n port = port_definition[0]\n exposed_ports['{0}/{1}'.format(port, proto)] = {}\n ports = exposed_ports\n\n if isinstance(volumes, six.string_types):\n volumes = [volumes, ]\n\n if isinstance(volumes, list):\n volumes_dict = {}\n for vol in volumes:\n volumes_dict[vol] = {}\n volumes = volumes_dict\n\n if volumes_from:\n if not isinstance(volumes_from, six.string_types):\n volumes_from = ','.join(volumes_from)\n else:\n # Force None, an empty list or dict causes client.start to fail\n volumes_from = None\n\n attach_stdin = False\n attach_stdout = False\n attach_stderr = False\n stdin_once = False\n\n if not detach:\n attach_stdout = True\n attach_stderr = True\n\n if stdin_open:\n attach_stdin = True\n stdin_once = True\n\n if compare_version('1.10', version) >= 0:\n message = ('{0!r} parameter has no effect on create_container().'\n ' It has been moved to start()')\n if dns is not None:\n raise errors.InvalidVersion(message.format('dns'))\n if volumes_from is not None:\n raise errors.InvalidVersion(message.format('volumes_from'))\n\n return {\n 'Hostname': hostname,\n 'Domainname': domainname,\n 'ExposedPorts': ports,\n 'User': six.text_type(user) if user else None,\n 'Tty': tty,\n 'OpenStdin': stdin_open,\n 'StdinOnce': stdin_once,\n 'Memory': mem_limit,\n 'AttachStdin': attach_stdin,\n 'AttachStdout': attach_stdout,\n 'AttachStderr': attach_stderr,\n 'Env': environment,\n 'Cmd': command,\n 'Dns': dns,\n 'Image': image,\n 'Volumes': volumes,\n 'VolumesFrom': volumes_from,\n 'NetworkDisabled': network_disabled,\n 'Entrypoint': entrypoint,\n 'CpuShares': cpu_shares,\n 'Cpuset': cpuset,\n 'CpusetCpus': cpuset,\n 'WorkingDir': working_dir,\n 'MemorySwap': memswap_limit,\n 'HostConfig': host_config,\n 'MacAddress': mac_address,\n 'Labels': labels,\n 'VolumeDriver': volume_driver,\n }\n",
"path": "docker/utils/utils.py"
}
] | diff --git a/docker/utils/utils.py b/docker/utils/utils.py
index 8dc726b34..c49b3e585 100644
--- a/docker/utils/utils.py
+++ b/docker/utils/utils.py
@@ -715,7 +715,7 @@ def create_container_config(
'Hostname': hostname,
'Domainname': domainname,
'ExposedPorts': ports,
- 'User': user,
+ 'User': six.text_type(user) if user else None,
'Tty': tty,
'OpenStdin': stdin_open,
'StdinOnce': stdin_once,
diff --git a/tests/integration_test.py b/tests/integration_test.py
index fd4ff2d08..4fb2b8ff9 100644
--- a/tests/integration_test.py
+++ b/tests/integration_test.py
@@ -1624,3 +1624,9 @@ def test_649(self):
ctnr = self.client.create_container('busybox', ['sleep', '2'])
self.client.start(ctnr)
self.client.stop(ctnr)
+
+ def test_715(self):
+ ctnr = self.client.create_container('busybox', ['id', '-u'], user=1000)
+ self.client.start(ctnr)
+ self.client.wait(ctnr)
+ assert self.client.logs(ctnr) == '1000\n'
|
mkdocs__mkdocs-2071 | How to add a watched directory
The docs says:
> The serve event is only called when the serve command is used during development. It is passed the Server instance which can be modified before it is activated. For example, additional files or directories could be added to the list of "watched" files for auto-reloading.
How can I add files or directories?
I tried this:
```python
def on_serve(self, server, config, **kwargs):
for element in self.config["watch"]:
server.watch(element)
return server
```
With this in my `mkdocs.yml`:
```yaml
plugins:
- search
- mkdocstrings:
watch:
- src/mkdocstrings
```
It detects the changes, but since I gave no function to `server.watch(dir, func)`, the site is not rebuilt.
I checked the source code of `mkdocs`, and I see that you are using a local function that we cannot reuse ourselves without rewriting it:
https://github.com/mkdocs/mkdocs/blob/262c2b70f3b1a450d685530610af3f28e12f9c9f/mkdocs/commands/serve.py#L120-L137
and
https://github.com/mkdocs/mkdocs/blob/262c2b70f3b1a450d685530610af3f28e12f9c9f/mkdocs/commands/serve.py#L69
What would be the best way to add a directory with this same `builder` functionality? Should I simply copy paste it?
| [
{
"content": "import logging\nimport shutil\nimport tempfile\nimport sys\n\nfrom os.path import isfile, join\nfrom mkdocs.commands.build import build\nfrom mkdocs.config import load_config\n\nlog = logging.getLogger(__name__)\n\n\ndef _init_asyncio_patch():\n \"\"\"\n Select compatible event loop for Tornado 5+.\n\n As of Python 3.8, the default event loop on Windows is `proactor`,\n however Tornado requires the old default \"selector\" event loop.\n As Tornado has decided to leave this to users to set, MkDocs needs\n to set it. See https://github.com/tornadoweb/tornado/issues/2608.\n \"\"\"\n if sys.platform.startswith(\"win\") and sys.version_info >= (3, 8):\n import asyncio\n try:\n from asyncio import WindowsSelectorEventLoopPolicy\n except ImportError:\n pass # Can't assign a policy which doesn't exist.\n else:\n if not isinstance(asyncio.get_event_loop_policy(), WindowsSelectorEventLoopPolicy):\n asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())\n\n\ndef _get_handler(site_dir, StaticFileHandler):\n\n from tornado.template import Loader\n\n class WebHandler(StaticFileHandler):\n\n def write_error(self, status_code, **kwargs):\n\n if status_code in (404, 500):\n error_page = '{}.html'.format(status_code)\n if isfile(join(site_dir, error_page)):\n self.write(Loader(site_dir).load(error_page).generate())\n else:\n super().write_error(status_code, **kwargs)\n\n return WebHandler\n\n\ndef _livereload(host, port, config, builder, site_dir):\n\n # We are importing here for anyone that has issues with livereload. Even if\n # this fails, the --no-livereload alternative should still work.\n _init_asyncio_patch()\n from livereload import Server\n import livereload.handlers\n\n class LiveReloadServer(Server):\n\n def get_web_handlers(self, script):\n handlers = super().get_web_handlers(script)\n # replace livereload handler\n return [(handlers[0][0], _get_handler(site_dir, livereload.handlers.StaticFileHandler), handlers[0][2],)]\n\n server = LiveReloadServer()\n\n # Watch the documentation files, the config file and the theme files.\n server.watch(config['docs_dir'], builder)\n server.watch(config['config_file_path'], builder)\n\n for d in config['theme'].dirs:\n server.watch(d, builder)\n\n # Run `serve` plugin events.\n server = config['plugins'].run_event('serve', server, config=config)\n\n server.serve(root=site_dir, host=host, port=port, restart_delay=0)\n\n\ndef _static_server(host, port, site_dir):\n\n # Importing here to separate the code paths from the --livereload\n # alternative.\n _init_asyncio_patch()\n from tornado import ioloop\n from tornado import web\n\n application = web.Application([\n (r\"/(.*)\", _get_handler(site_dir, web.StaticFileHandler), {\n \"path\": site_dir,\n \"default_filename\": \"index.html\"\n }),\n ])\n application.listen(port=port, address=host)\n\n log.info('Running at: http://%s:%s/', host, port)\n log.info('Hold ctrl+c to quit.')\n try:\n ioloop.IOLoop.instance().start()\n except KeyboardInterrupt:\n log.info('Stopping server...')\n\n\ndef serve(config_file=None, dev_addr=None, strict=None, theme=None,\n theme_dir=None, livereload='livereload', **kwargs):\n \"\"\"\n Start the MkDocs development server\n\n By default it will serve the documentation on http://localhost:8000/ and\n it will rebuild the documentation and refresh the page automatically\n whenever a file is edited.\n \"\"\"\n\n # Create a temporary build directory, and set some options to serve it\n # PY2 returns a byte string by default. The Unicode prefix ensures a Unicode\n # string is returned. And it makes MkDocs temp dirs easier to identify.\n site_dir = tempfile.mkdtemp(prefix='mkdocs_')\n\n def builder():\n log.info(\"Building documentation...\")\n config = load_config(\n config_file=config_file,\n dev_addr=dev_addr,\n strict=strict,\n theme=theme,\n theme_dir=theme_dir,\n site_dir=site_dir,\n **kwargs\n )\n # Override a few config settings after validation\n config['site_url'] = 'http://{}/'.format(config['dev_addr'])\n\n live_server = livereload in ['dirty', 'livereload']\n dirty = livereload == 'dirty'\n build(config, live_server=live_server, dirty=dirty)\n return config\n\n try:\n # Perform the initial build\n config = builder()\n\n host, port = config['dev_addr']\n\n if livereload in ['livereload', 'dirty']:\n _livereload(host, port, config, builder, site_dir)\n else:\n _static_server(host, port, site_dir)\n finally:\n shutil.rmtree(site_dir)\n",
"path": "mkdocs/commands/serve.py"
}
] | [
{
"content": "import logging\nimport shutil\nimport tempfile\nimport sys\n\nfrom os.path import isfile, join\nfrom mkdocs.commands.build import build\nfrom mkdocs.config import load_config\n\nlog = logging.getLogger(__name__)\n\n\ndef _init_asyncio_patch():\n \"\"\"\n Select compatible event loop for Tornado 5+.\n\n As of Python 3.8, the default event loop on Windows is `proactor`,\n however Tornado requires the old default \"selector\" event loop.\n As Tornado has decided to leave this to users to set, MkDocs needs\n to set it. See https://github.com/tornadoweb/tornado/issues/2608.\n \"\"\"\n if sys.platform.startswith(\"win\") and sys.version_info >= (3, 8):\n import asyncio\n try:\n from asyncio import WindowsSelectorEventLoopPolicy\n except ImportError:\n pass # Can't assign a policy which doesn't exist.\n else:\n if not isinstance(asyncio.get_event_loop_policy(), WindowsSelectorEventLoopPolicy):\n asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())\n\n\ndef _get_handler(site_dir, StaticFileHandler):\n\n from tornado.template import Loader\n\n class WebHandler(StaticFileHandler):\n\n def write_error(self, status_code, **kwargs):\n\n if status_code in (404, 500):\n error_page = '{}.html'.format(status_code)\n if isfile(join(site_dir, error_page)):\n self.write(Loader(site_dir).load(error_page).generate())\n else:\n super().write_error(status_code, **kwargs)\n\n return WebHandler\n\n\ndef _livereload(host, port, config, builder, site_dir):\n\n # We are importing here for anyone that has issues with livereload. Even if\n # this fails, the --no-livereload alternative should still work.\n _init_asyncio_patch()\n from livereload import Server\n import livereload.handlers\n\n class LiveReloadServer(Server):\n\n def get_web_handlers(self, script):\n handlers = super().get_web_handlers(script)\n # replace livereload handler\n return [(handlers[0][0], _get_handler(site_dir, livereload.handlers.StaticFileHandler), handlers[0][2],)]\n\n server = LiveReloadServer()\n\n # Watch the documentation files, the config file and the theme files.\n server.watch(config['docs_dir'], builder)\n server.watch(config['config_file_path'], builder)\n\n for d in config['theme'].dirs:\n server.watch(d, builder)\n\n # Run `serve` plugin events.\n server = config['plugins'].run_event('serve', server, config=config, builder=builder)\n\n server.serve(root=site_dir, host=host, port=port, restart_delay=0)\n\n\ndef _static_server(host, port, site_dir):\n\n # Importing here to separate the code paths from the --livereload\n # alternative.\n _init_asyncio_patch()\n from tornado import ioloop\n from tornado import web\n\n application = web.Application([\n (r\"/(.*)\", _get_handler(site_dir, web.StaticFileHandler), {\n \"path\": site_dir,\n \"default_filename\": \"index.html\"\n }),\n ])\n application.listen(port=port, address=host)\n\n log.info('Running at: http://%s:%s/', host, port)\n log.info('Hold ctrl+c to quit.')\n try:\n ioloop.IOLoop.instance().start()\n except KeyboardInterrupt:\n log.info('Stopping server...')\n\n\ndef serve(config_file=None, dev_addr=None, strict=None, theme=None,\n theme_dir=None, livereload='livereload', **kwargs):\n \"\"\"\n Start the MkDocs development server\n\n By default it will serve the documentation on http://localhost:8000/ and\n it will rebuild the documentation and refresh the page automatically\n whenever a file is edited.\n \"\"\"\n\n # Create a temporary build directory, and set some options to serve it\n # PY2 returns a byte string by default. The Unicode prefix ensures a Unicode\n # string is returned. And it makes MkDocs temp dirs easier to identify.\n site_dir = tempfile.mkdtemp(prefix='mkdocs_')\n\n def builder():\n log.info(\"Building documentation...\")\n config = load_config(\n config_file=config_file,\n dev_addr=dev_addr,\n strict=strict,\n theme=theme,\n theme_dir=theme_dir,\n site_dir=site_dir,\n **kwargs\n )\n # Override a few config settings after validation\n config['site_url'] = 'http://{}/'.format(config['dev_addr'])\n\n live_server = livereload in ['dirty', 'livereload']\n dirty = livereload == 'dirty'\n build(config, live_server=live_server, dirty=dirty)\n return config\n\n try:\n # Perform the initial build\n config = builder()\n\n host, port = config['dev_addr']\n\n if livereload in ['livereload', 'dirty']:\n _livereload(host, port, config, builder, site_dir)\n else:\n _static_server(host, port, site_dir)\n finally:\n shutil.rmtree(site_dir)\n",
"path": "mkdocs/commands/serve.py"
}
] | diff --git a/docs/about/release-notes.md b/docs/about/release-notes.md
index 2b81e42552..392f533942 100644
--- a/docs/about/release-notes.md
+++ b/docs/about/release-notes.md
@@ -23,6 +23,8 @@ The current and past members of the MkDocs team.
## Version 1.1.1 (in development)
+* Bugfix: Pass `builder` to the `on_serve` event so that it can be passed to
+ `server.watch` by plugins (#1952).
* Bugfix: Use `lunr[languages]==0.5.8` to avoid `nltk` incompatibilities (#2062).
* Bugfix: Ensure wheel is Python 3 only (#2021).
* Bugfix: Clean up `dev_addr` validation and disallow `0.0.0.0` (#2022).
diff --git a/docs/user-guide/plugins.md b/docs/user-guide/plugins.md
index 59ed8647b0..fb7bc8b3aa 100644
--- a/docs/user-guide/plugins.md
+++ b/docs/user-guide/plugins.md
@@ -155,6 +155,7 @@ entire site.
Parameters:
: __server:__ `livereload.Server` instance
: __config:__ global configuration object
+ : __builder:__ a callable which gets passed to each call to `server.watch`
Returns:
: `livereload.Server` instance
diff --git a/mkdocs/commands/serve.py b/mkdocs/commands/serve.py
index 21b7ca6c1e..390f134596 100644
--- a/mkdocs/commands/serve.py
+++ b/mkdocs/commands/serve.py
@@ -73,7 +73,7 @@ def get_web_handlers(self, script):
server.watch(d, builder)
# Run `serve` plugin events.
- server = config['plugins'].run_event('serve', server, config=config)
+ server = config['plugins'].run_event('serve', server, config=config, builder=builder)
server.serve(root=site_dir, host=host, port=port, restart_delay=0)
|
TheAlgorithms__Python-7556 | [PYTEST WARNING] QasmSimulator will be deprecated
### Feature description
The use of `q.Aer.get_backend("qasm_simulator")` raises the warning
```
/opt/hostedtoolcache/Python/3.10.7/x64/lib/python3.10/site-packages/qiskit_aer/backends/qasm_simulator.py:360: PendingDeprecationWarning: The `QasmSimulator` backend will be deprecated in the future. It has been superseded by the `AerSimulator` backend.
warn('The `QasmSimulator` backend will be deprecated in the'
```
This code is found in the following files:
- deutsch_jozsa @abhishekjiitr
- half_adder @abhishekjiitr
- not_gate @abhishekjiitr
- single_quibit_measure @abhishekjiitr
origin: #7211
| [
{
"content": "\"\"\"\nBuild the superdense coding protocol. This quantum\ncircuit can send two classical bits using one quantum\nbit. This circuit is designed using the Qiskit\nframework. This experiment run in IBM Q simulator\nwith 1000 shots.\n.\nReferences:\nhttps://qiskit.org/textbook/ch-algorithms/superdense-coding.html\nhttps://en.wikipedia.org/wiki/Superdense_coding\n\"\"\"\n\nimport math\n\nimport qiskit\nfrom qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute\n\n\ndef superdense_coding(bit_1: int = 1, bit_2: int = 1) -> qiskit.result.counts.Counts:\n \"\"\"\n The input refer to the classical message\n that you wants to send. {'00','01','10','11'}\n result for default values: {11: 1000}\n ┌───┐ ┌───┐\n qr_0: ─────┤ X ├──────────┤ X ├─────\n ┌───┐└─┬─┘┌───┐┌───┐└─┬─┘┌───┐\n qr_1: ┤ H ├──■──┤ X ├┤ Z ├──■──┤ H ├\n └───┘ └───┘└───┘ └───┘\n cr: 2/══════════════════════════════\n Args:\n bit_1: bit 1 of classical information to send.\n bit_2: bit 2 of classical information to send.\n Returns:\n qiskit.result.counts.Counts: counts of send state.\n >>> superdense_coding(0,0)\n {'00': 1000}\n >>> superdense_coding(0,1)\n {'01': 1000}\n >>> superdense_coding(-1,0)\n Traceback (most recent call last):\n ...\n ValueError: inputs must be positive.\n >>> superdense_coding(1,'j')\n Traceback (most recent call last):\n ...\n TypeError: inputs must be integers.\n >>> superdense_coding(1,0.5)\n Traceback (most recent call last):\n ...\n ValueError: inputs must be exact integers.\n >>> superdense_coding(2,1)\n Traceback (most recent call last):\n ...\n ValueError: inputs must be less or equal to 1.\n \"\"\"\n if (type(bit_1) == str) or (type(bit_2) == str):\n raise TypeError(\"inputs must be integers.\")\n if (bit_1 < 0) or (bit_2 < 0):\n raise ValueError(\"inputs must be positive.\")\n if (math.floor(bit_1) != bit_1) or (math.floor(bit_2) != bit_2):\n raise ValueError(\"inputs must be exact integers.\")\n if (bit_1 > 1) or (bit_2 > 1):\n raise ValueError(\"inputs must be less or equal to 1.\")\n\n # build registers\n qr = QuantumRegister(2, \"qr\")\n cr = ClassicalRegister(2, \"cr\")\n\n quantum_circuit = QuantumCircuit(qr, cr)\n\n # entanglement the qubits\n quantum_circuit.h(1)\n quantum_circuit.cx(1, 0)\n\n # send the information\n c_information = str(bit_1) + str(bit_2)\n\n if c_information == \"11\":\n quantum_circuit.x(1)\n quantum_circuit.z(1)\n elif c_information == \"10\":\n quantum_circuit.z(1)\n elif c_information == \"01\":\n quantum_circuit.x(1)\n else:\n quantum_circuit.i(1)\n\n # unentangled the circuit\n quantum_circuit.cx(1, 0)\n quantum_circuit.h(1)\n\n # measure the circuit\n quantum_circuit.measure(qr, cr)\n\n backend = Aer.get_backend(\"qasm_simulator\")\n job = execute(quantum_circuit, backend, shots=1000)\n\n return job.result().get_counts(quantum_circuit)\n\n\nif __name__ == \"__main__\":\n print(f\"Counts for classical state send: {superdense_coding(1,1)}\")\n",
"path": "quantum/superdense_coding.py"
}
] | [
{
"content": "\"\"\"\nBuild the superdense coding protocol. This quantum\ncircuit can send two classical bits using one quantum\nbit. This circuit is designed using the Qiskit\nframework. This experiment run in IBM Q simulator\nwith 1000 shots.\n.\nReferences:\nhttps://qiskit.org/textbook/ch-algorithms/superdense-coding.html\nhttps://en.wikipedia.org/wiki/Superdense_coding\n\"\"\"\n\nimport math\n\nimport qiskit\nfrom qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute\n\n\ndef superdense_coding(bit_1: int = 1, bit_2: int = 1) -> qiskit.result.counts.Counts:\n \"\"\"\n The input refer to the classical message\n that you wants to send. {'00','01','10','11'}\n result for default values: {11: 1000}\n ┌───┐ ┌───┐\n qr_0: ─────┤ X ├──────────┤ X ├─────\n ┌───┐└─┬─┘┌───┐┌───┐└─┬─┘┌───┐\n qr_1: ┤ H ├──■──┤ X ├┤ Z ├──■──┤ H ├\n └───┘ └───┘└───┘ └───┘\n cr: 2/══════════════════════════════\n Args:\n bit_1: bit 1 of classical information to send.\n bit_2: bit 2 of classical information to send.\n Returns:\n qiskit.result.counts.Counts: counts of send state.\n >>> superdense_coding(0,0)\n {'00': 1000}\n >>> superdense_coding(0,1)\n {'01': 1000}\n >>> superdense_coding(-1,0)\n Traceback (most recent call last):\n ...\n ValueError: inputs must be positive.\n >>> superdense_coding(1,'j')\n Traceback (most recent call last):\n ...\n TypeError: inputs must be integers.\n >>> superdense_coding(1,0.5)\n Traceback (most recent call last):\n ...\n ValueError: inputs must be exact integers.\n >>> superdense_coding(2,1)\n Traceback (most recent call last):\n ...\n ValueError: inputs must be less or equal to 1.\n \"\"\"\n if (type(bit_1) == str) or (type(bit_2) == str):\n raise TypeError(\"inputs must be integers.\")\n if (bit_1 < 0) or (bit_2 < 0):\n raise ValueError(\"inputs must be positive.\")\n if (math.floor(bit_1) != bit_1) or (math.floor(bit_2) != bit_2):\n raise ValueError(\"inputs must be exact integers.\")\n if (bit_1 > 1) or (bit_2 > 1):\n raise ValueError(\"inputs must be less or equal to 1.\")\n\n # build registers\n qr = QuantumRegister(2, \"qr\")\n cr = ClassicalRegister(2, \"cr\")\n\n quantum_circuit = QuantumCircuit(qr, cr)\n\n # entanglement the qubits\n quantum_circuit.h(1)\n quantum_circuit.cx(1, 0)\n\n # send the information\n c_information = str(bit_1) + str(bit_2)\n\n if c_information == \"11\":\n quantum_circuit.x(1)\n quantum_circuit.z(1)\n elif c_information == \"10\":\n quantum_circuit.z(1)\n elif c_information == \"01\":\n quantum_circuit.x(1)\n else:\n quantum_circuit.i(1)\n\n # unentangled the circuit\n quantum_circuit.cx(1, 0)\n quantum_circuit.h(1)\n\n # measure the circuit\n quantum_circuit.measure(qr, cr)\n\n backend = Aer.get_backend(\"aer_simulator\")\n job = execute(quantum_circuit, backend, shots=1000)\n\n return job.result().get_counts(quantum_circuit)\n\n\nif __name__ == \"__main__\":\n print(f\"Counts for classical state send: {superdense_coding(1,1)}\")\n",
"path": "quantum/superdense_coding.py"
}
] | diff --git a/quantum/superdense_coding.py b/quantum/superdense_coding.py
index c8eda381158b..10ebc2d3593c 100644
--- a/quantum/superdense_coding.py
+++ b/quantum/superdense_coding.py
@@ -92,7 +92,7 @@ def superdense_coding(bit_1: int = 1, bit_2: int = 1) -> qiskit.result.counts.Co
# measure the circuit
quantum_circuit.measure(qr, cr)
- backend = Aer.get_backend("qasm_simulator")
+ backend = Aer.get_backend("aer_simulator")
job = execute(quantum_circuit, backend, shots=1000)
return job.result().get_counts(quantum_circuit)
|
pwr-Solaar__Solaar-1810 | eliminate visual glitching when updating a setting
**Information**
- Solaar version (`solaar --version` and `git describe --tags`): 1.1.6
**Is your feature request related to a problem? Please describe.**
Some setting displays glitch when updated, maybe only range settings. For a very short time, a spinner is displayed and the size of the setting control is reduced.
**Describe the solution you'd like**
The size of the setting control should not change.
| [
{
"content": "# -*- python-mode -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nimport traceback\n\nfrom logging import WARNING as _WARNING\nfrom logging import getLogger\nfrom threading import Timer as _Timer\n\nfrom gi.repository import Gdk, GLib, Gtk\nfrom logitech_receiver.settings import KIND as _SETTING_KIND\nfrom logitech_receiver.settings import SENSITIVITY_IGNORE as _SENSITIVITY_IGNORE\nfrom solaar.i18n import _, ngettext\nfrom solaar.ui import ui_async as _ui_async\n\n_log = getLogger(__name__)\ndel getLogger\n\n#\n#\n#\n\n\ndef _read_async(setting, force_read, sbox, device_is_online, sensitive):\n def _do_read(s, force, sb, online, sensitive):\n v = s.read(not force)\n GLib.idle_add(_update_setting_item, sb, v, online, sensitive, priority=99)\n\n _ui_async(_do_read, setting, force_read, sbox, device_is_online, sensitive)\n\n\ndef _write_async(setting, value, sbox, sensitive=True, key=None):\n def _do_write(s, v, sb, key):\n try:\n if key is None:\n v = setting.write(v)\n else:\n v = setting.write_key_value(key, v)\n v = {key: v}\n except Exception:\n traceback.print_exc()\n v = None\n if sb:\n GLib.idle_add(_update_setting_item, sb, v, True, sensitive, priority=99)\n\n if sbox:\n sbox._control.set_sensitive(False)\n sbox._failed.set_visible(False)\n sbox._spinner.set_visible(True)\n sbox._spinner.start()\n _ui_async(_do_write, setting, value, sbox, key)\n\n\n#\n#\n#\n\n\nclass Control():\n def __init__(**kwargs):\n pass\n\n def init(self, sbox, delegate):\n self.sbox = sbox\n self.delegate = delegate if delegate else self\n\n def changed(self, *args):\n if self.get_sensitive():\n self.delegate.update()\n\n def update(self):\n _write_async(self.sbox.setting, self.get_value(), self.sbox)\n\n def layout(self, sbox, label, change, spinner, failed):\n sbox.pack_start(label, False, False, 0)\n sbox.pack_end(change, False, False, 0)\n sbox.pack_end(self, sbox.setting.kind == _SETTING_KIND.range, sbox.setting.kind == _SETTING_KIND.range, 0)\n sbox.pack_end(spinner, False, False, 0)\n sbox.pack_end(failed, False, False, 0)\n return self\n\n\nclass ToggleControl(Gtk.Switch, Control):\n def __init__(self, sbox, delegate=None):\n super().__init__(halign=Gtk.Align.CENTER, valign=Gtk.Align.CENTER)\n self.init(sbox, delegate)\n self.connect('notify::active', self.changed)\n\n def set_value(self, value):\n self.set_state(value)\n\n def get_value(self):\n return self.get_state()\n\n\nclass SliderControl(Gtk.Scale, Control):\n def __init__(self, sbox, delegate=None):\n super().__init__(halign=Gtk.Align.FILL)\n self.init(sbox, delegate)\n self.timer = None\n self.set_range(*self.sbox.setting.range)\n self.set_round_digits(0)\n self.set_digits(0)\n self.set_increments(1, 5)\n self.connect('value-changed', self.changed)\n\n def get_value(self):\n return int(super().get_value())\n\n def changed(self, *args):\n if self.get_sensitive():\n if self.timer:\n self.timer.cancel()\n self.timer = _Timer(0.5, lambda: GLib.idle_add(self.do_change))\n self.timer.start()\n\n def do_change(self):\n self.timer.cancel()\n self.update()\n\n\ndef _create_choice_control(sbox, delegate=None, choices=None):\n if 50 > len(choices if choices else sbox.setting.choices):\n return ChoiceControlLittle(sbox, choices=choices, delegate=delegate)\n else:\n return ChoiceControlBig(sbox, choices=choices, delegate=delegate)\n\n\n# GTK boxes have property lists, but the keys must be strings\nclass ChoiceControlLittle(Gtk.ComboBoxText, Control):\n def __init__(self, sbox, delegate=None, choices=None):\n super().__init__(halign=Gtk.Align.FILL)\n self.init(sbox, delegate)\n self.choices = choices if choices is not None else sbox.setting.choices\n for entry in self.choices:\n self.append(str(int(entry)), str(entry))\n self.connect('changed', self.changed)\n\n def get_value(self):\n return int(self.get_active_id()) if self.get_active_id() is not None else None\n\n def set_value(self, value):\n self.set_active_id(str(int(value)))\n\n def get_choice(self):\n id = self.get_value()\n return next((x for x in self.choices if x == id), None)\n\n def set_choices(self, choices):\n self.remove_all()\n for choice in choices:\n self.append(str(int(choice)), _(str(choice)))\n\n\nclass ChoiceControlBig(Gtk.Entry, Control):\n def __init__(self, sbox, delegate=None, choices=None):\n super().__init__(halign=Gtk.Align.FILL)\n self.init(sbox, delegate)\n self.choices = choices if choices is not None else sbox.setting.choices\n self.value = None\n self.set_width_chars(max([len(str(x)) for x in self.choices]) + 5)\n liststore = Gtk.ListStore(int, str)\n for v in self.choices:\n liststore.append((int(v), str(v)))\n completion = Gtk.EntryCompletion()\n completion.set_model(liststore)\n norm = lambda s: s.replace('_', '').replace(' ', '').lower()\n completion.set_match_func(lambda completion, key, it: norm(key) in norm(completion.get_model()[it][1]))\n completion.set_text_column(1)\n self.set_completion(completion)\n self.connect('changed', self.changed)\n self.connect('activate', self.activate)\n completion.connect('match_selected', self.select)\n\n def get_value(self):\n choice = self.get_choice()\n return int(choice) if choice is not None else None\n\n def set_value(self, value):\n self.set_text(str(next((x for x in self.choices if x == value), None)))\n\n def get_choice(self):\n key = self.get_text()\n return next((x for x in self.choices if x == key), None)\n\n def changed(self, *args):\n self.value = self.get_choice()\n icon = 'dialog-warning' if self.value is None else 'dialog-question' if self.get_sensitive() else ''\n self.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, icon)\n tooltip = _('Incomplete') if self.value is None else _('Complete - ENTER to change')\n self.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, tooltip)\n\n def activate(self, *args):\n if self.value is not None and self.get_sensitive():\n self.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, '')\n self.delegate.update()\n\n def select(self, completion, model, iter):\n self.set_value(model.get(iter, 0)[0])\n if self.value and self.get_sensitive():\n self.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, '')\n self.delegate.update()\n\n\nclass MapChoiceControl(Gtk.HBox, Control):\n def __init__(self, sbox, delegate=None):\n super().__init__(homogeneous=False, spacing=6)\n self.init(sbox, delegate)\n self.keyBox = Gtk.ComboBoxText()\n for entry in sbox.setting.choices:\n self.keyBox.append(str(int(entry)), _(str(entry)))\n self.keyBox.set_active(0)\n key_choice = int(self.keyBox.get_active_id())\n self.value_choices = self.sbox.setting.choices[key_choice]\n self.valueBox = _create_choice_control(sbox.setting, choices=self.value_choices, delegate=self)\n self.pack_start(self.keyBox, False, False, 0)\n self.pack_end(self.valueBox, False, False, 0)\n self.keyBox.connect('changed', self.map_value_notify_key)\n\n def get_value(self):\n key_choice = int(self.keyBox.get_active_id())\n if key_choice is not None and self.valueBox.get_value() is not None:\n return self.valueBox.get_value()\n\n def set_value(self, value):\n self.valueBox.set_sensitive(self.get_sensitive())\n key = int(self.keyBox.get_active_id())\n if value.get(key) is not None:\n self.valueBox.set_value(value.get(key))\n self.valueBox.set_sensitive(True)\n\n def map_populate_value_box(self, key_choice):\n choices = self.sbox.setting.choices[key_choice]\n if choices != self.value_choices:\n self.value_choices = choices\n self.valueBox.remove_all()\n self.valueBox.set_choices(choices)\n current = self.sbox.setting._value.get(key_choice) if self.sbox.setting._value else None\n if current is not None:\n self.valueBox.set_value(current)\n\n def map_value_notify_key(self, *args):\n key_choice = int(self.keyBox.get_active_id())\n if self.keyBox.get_sensitive():\n self.map_populate_value_box(key_choice)\n\n def update(self):\n key_choice = int(self.keyBox.get_active_id())\n value = self.get_value()\n if value is not None and self.valueBox.get_sensitive() and self.sbox.setting._value.get(key_choice) != value:\n self.sbox.setting._value[int(key_choice)] = value\n _write_async(self.sbox.setting, value, self.sbox, key=int(key_choice))\n\n\nclass MultipleControl(Gtk.ListBox, Control):\n def __init__(self, sbox, change, button_label='...', delegate=None):\n super().__init__()\n self.init(sbox, delegate)\n self.set_selection_mode(Gtk.SelectionMode.NONE)\n self.set_no_show_all(True)\n self._showing = True\n self.setup(sbox.setting) # set up the data and boxes for the sub-controls\n btn = Gtk.Button(button_label)\n btn.set_alignment(1.0, 0.5)\n btn.connect('clicked', self.toggle_display)\n self._button = btn\n hbox = Gtk.HBox(homogeneous=False, spacing=6)\n hbox.pack_end(change, False, False, 0)\n hbox.pack_end(btn, False, False, 0)\n self._header = hbox\n vbox = Gtk.VBox(homogeneous=False, spacing=6)\n vbox.pack_start(hbox, True, True, 0)\n vbox.pack_end(self, True, True, 0)\n self.vbox = vbox\n self.toggle_display()\n _disable_listbox_highlight_bg(self)\n\n def layout(self, sbox, label, change, spinner, failed):\n self._header.pack_start(label, False, False, 0)\n self._header.pack_end(spinner, False, False, 0)\n self._header.pack_end(failed, False, False, 0)\n sbox.pack_start(self.vbox, True, True, 0)\n sbox._button = self._button\n return True\n\n def toggle_display(self, *args):\n self._showing = not self._showing\n if not self._showing:\n for c in self.get_children():\n c.hide()\n self.hide()\n else:\n self.show()\n for c in self.get_children():\n c.show_all()\n\n\nclass MultipleToggleControl(MultipleControl):\n def setup(self, setting):\n self._label_control_pairs = []\n for k in setting._validator.get_options():\n h = Gtk.HBox(homogeneous=False, spacing=0)\n lbl_text = str(k)\n lbl_tooltip = None\n if hasattr(setting, '_labels'):\n l1, l2 = setting._labels.get(k, (None, None))\n lbl_text = l1 if l1 else lbl_text\n lbl_tooltip = l2 if l2 else lbl_tooltip\n lbl = Gtk.Label(lbl_text)\n h.set_tooltip_text(lbl_tooltip or ' ')\n control = Gtk.Switch()\n control._setting_key = int(k)\n control.connect('notify::active', self.toggle_notify)\n h.pack_start(lbl, False, False, 0)\n h.pack_end(control, False, False, 0)\n lbl.set_alignment(0.0, 0.5)\n lbl.set_margin_left(30)\n self.add(h)\n self._label_control_pairs.append((lbl, control))\n\n def toggle_notify(self, switch, active):\n if switch.get_sensitive():\n key = switch._setting_key\n new_state = switch.get_state()\n if self.sbox.setting._value[key] != new_state:\n self.sbox.setting._value[key] = new_state\n _write_async(self.sbox.setting, new_state, self.sbox, key=int(key))\n\n def set_value(self, value):\n active = 0\n total = len(self._label_control_pairs)\n to_join = []\n for lbl, elem in self._label_control_pairs:\n v = value.get(elem._setting_key, None)\n if v is not None:\n elem.set_state(v)\n if elem.get_state():\n active += 1\n to_join.append(lbl.get_text() + ': ' + str(elem.get_state()))\n b = ', '.join(to_join)\n self._button.set_label(f'{active} / {total}')\n self._button.set_tooltip_text(b)\n\n\nclass MultipleRangeControl(MultipleControl):\n def setup(self, setting):\n self._items = []\n for item in setting._validator.items:\n lbl_text = str(item)\n lbl_tooltip = None\n if hasattr(setting, '_labels'):\n l1, l2 = setting._labels.get(int(item), (None, None))\n lbl_text = l1 if l1 else lbl_text\n lbl_tooltip = l2 if l2 else lbl_tooltip\n item_lbl = Gtk.Label(lbl_text)\n self.add(item_lbl)\n self.set_tooltip_text(lbl_tooltip or ' ')\n item_lb = Gtk.ListBox()\n item_lb.set_selection_mode(Gtk.SelectionMode.NONE)\n item_lb._sub_items = []\n for sub_item in setting._validator.sub_items[item]:\n h = Gtk.HBox(homogeneous=False, spacing=20)\n lbl_text = str(sub_item)\n lbl_tooltip = None\n if hasattr(setting, '_labels_sub'):\n l1, l2 = setting._labels_sub.get(str(sub_item), (None, None))\n lbl_text = l1 if l1 else lbl_text\n lbl_tooltip = l2 if l2 else lbl_tooltip\n sub_item_lbl = Gtk.Label(lbl_text)\n h.set_tooltip_text(lbl_tooltip or ' ')\n h.pack_start(sub_item_lbl, False, False, 0)\n sub_item_lbl.set_margin_left(30)\n sub_item_lbl.set_alignment(0.0, 0.5)\n if sub_item.widget == 'Scale':\n control = Gtk.Scale.new_with_range(Gtk.Orientation.HORIZONTAL, sub_item.minimum, sub_item.maximum, 1)\n control.set_round_digits(0)\n control.set_digits(0)\n h.pack_end(control, True, True, 0)\n elif sub_item.widget == 'SpinButton':\n control = Gtk.SpinButton.new_with_range(sub_item.minimum, sub_item.maximum, 1)\n control.set_digits(0)\n h.pack_end(control, False, False, 0)\n else:\n raise NotImplementedError\n control.connect('value-changed', self.changed, item, sub_item)\n item_lb.add(h)\n h._setting_sub_item = sub_item\n h._label, h._control = sub_item_lbl, control\n item_lb._sub_items.append(h)\n item_lb._setting_item = item\n _disable_listbox_highlight_bg(item_lb)\n self.add(item_lb)\n self._items.append(item_lb)\n\n def changed(self, control, item, sub_item):\n if control.get_sensitive():\n if hasattr(control, '_timer'):\n control._timer.cancel()\n control._timer = _Timer(0.5, lambda: GLib.idle_add(self._write, control, item, sub_item))\n control._timer.start()\n\n def _write(self, control, item, sub_item):\n control._timer.cancel()\n delattr(control, '_timer')\n new_state = int(control.get_value())\n if self.sbox.setting._value[int(item)][str(sub_item)] != new_state:\n self.sbox.setting._value[int(item)][str(sub_item)] = new_state\n _write_async(self.sbox.setting, self.sbox.setting._value[int(item)], self.sbox, key=int(item))\n\n def set_value(self, value):\n b = ''\n n = 0\n for ch in self._items:\n item = ch._setting_item\n v = value.get(int(item), None)\n if v is not None:\n b += str(item) + ': ('\n to_join = []\n for c in ch._sub_items:\n sub_item = c._setting_sub_item\n try:\n sub_item_value = v[str(sub_item)]\n except KeyError:\n sub_item_value = c._control.get_value()\n c._control.set_value(sub_item_value)\n n += 1\n to_join.append(str(sub_item) + f'={sub_item_value}')\n b += ', '.join(to_join) + ') '\n lbl_text = ngettext('%d value', '%d values', n) % n\n self._button.set_label(lbl_text)\n self._button.set_tooltip_text(b)\n\n\nclass PackedRangeControl(MultipleRangeControl):\n def setup(self, setting):\n validator = setting._validator\n self._items = []\n for item in range(validator.count):\n h = Gtk.HBox(homogeneous=False, spacing=0)\n lbl = Gtk.Label(str(validator.keys[item]))\n control = Gtk.Scale.new_with_range(Gtk.Orientation.HORIZONTAL, validator.min_value, validator.max_value, 1)\n control.set_round_digits(0)\n control.set_digits(0)\n control.connect('value-changed', self.changed, validator.keys[item])\n h.pack_start(lbl, False, False, 0)\n h.pack_end(control, True, True, 0)\n h._setting_item = validator.keys[item]\n h.control = control\n lbl.set_alignment(0.0, 0.5)\n lbl.set_margin_left(30)\n self.add(h)\n self._items.append(h)\n\n def changed(self, control, item):\n if control.get_sensitive():\n if hasattr(control, '_timer'):\n control._timer.cancel()\n control._timer = _Timer(0.5, lambda: GLib.idle_add(self._write, control, item))\n control._timer.start()\n\n def _write(self, control, item):\n control._timer.cancel()\n delattr(control, '_timer')\n new_state = int(control.get_value())\n if self.sbox.setting._value[int(item)] != new_state:\n self.sbox.setting._value[int(item)] = new_state\n _write_async(self.sbox.setting, self.sbox.setting._value[int(item)], self.sbox, key=int(item))\n\n def set_value(self, value):\n b = ''\n n = len(self._items)\n for h in self._items:\n item = h._setting_item\n v = value.get(int(item), None)\n if v is not None:\n h.control.set_value(v)\n else:\n v = self.sbox.setting._value[int(item)]\n b += str(item) + ': (' + str(v) + ') '\n lbl_text = ngettext('%d value', '%d values', n) % n\n self._button.set_label(lbl_text)\n self._button.set_tooltip_text(b)\n\n\n#\n#\n#\n\n_allowables_icons = {True: 'changes-allow', False: 'changes-prevent', _SENSITIVITY_IGNORE: 'dialog-error'}\n_allowables_tooltips = {\n True: _('Changes allowed'),\n False: _('No changes allowed'),\n _SENSITIVITY_IGNORE: _('Ignore this setting')\n}\n_next_allowable = {True: False, False: _SENSITIVITY_IGNORE, _SENSITIVITY_IGNORE: True}\n_icons_allowables = {v: k for k, v in _allowables_icons.items()}\n\n\n# clicking on the lock icon changes from changeable to unchangeable to ignore\ndef _change_click(button, sbox):\n icon = button.get_children()[0]\n icon_name, _ = icon.get_icon_name()\n allowed = _icons_allowables.get(icon_name, True)\n new_allowed = _next_allowable[allowed]\n sbox._control.set_sensitive(new_allowed is True)\n _change_icon(new_allowed, icon)\n if sbox.setting._device.persister: # remember the new setting sensitivity\n sbox.setting._device.persister.set_sensitivity(sbox.setting.name, new_allowed)\n if allowed == _SENSITIVITY_IGNORE: # update setting if it was being ignored\n setting = next((s for s in sbox.setting._device.settings if s.name == sbox.setting.name), None)\n if setting:\n persisted = sbox.setting._device.persister.get(setting.name) if sbox.setting._device.persister else None\n if setting.persist and persisted is not None:\n _write_async(setting, persisted, sbox)\n else:\n _read_async(setting, True, sbox, bool(sbox.setting._device.online), sbox._control.get_sensitive())\n return True\n\n\ndef _change_icon(allowed, icon):\n if allowed in _allowables_icons:\n icon._allowed = allowed\n icon.set_from_icon_name(_allowables_icons[allowed], Gtk.IconSize.LARGE_TOOLBAR)\n icon.set_tooltip_text(_allowables_tooltips[allowed])\n\n\ndef _create_sbox(s, device):\n sbox = Gtk.HBox(homogeneous=False, spacing=6)\n sbox.setting = s\n sbox.kind = s.kind\n if s.description:\n sbox.set_tooltip_text(s.description)\n lbl = Gtk.Label(s.label)\n lbl.set_alignment(0.0, 0.5)\n label = Gtk.EventBox()\n label.add(lbl)\n spinner = Gtk.Spinner()\n spinner.set_tooltip_text(_('Working') + '...')\n sbox._spinner = spinner\n failed = Gtk.Image.new_from_icon_name('dialog-warning', Gtk.IconSize.SMALL_TOOLBAR)\n failed.set_tooltip_text(_('Read/write operation failed.'))\n sbox._failed = failed\n change_icon = Gtk.Image.new_from_icon_name('changes-prevent', Gtk.IconSize.LARGE_TOOLBAR)\n sbox._change_icon = change_icon\n _change_icon(False, change_icon)\n change = Gtk.Button()\n change.set_relief(Gtk.ReliefStyle.NONE)\n change.add(change_icon)\n change.set_sensitive(True)\n change.connect('clicked', _change_click, sbox)\n\n if s.kind == _SETTING_KIND.toggle:\n control = ToggleControl(sbox)\n elif s.kind == _SETTING_KIND.range:\n control = SliderControl(sbox)\n elif s.kind == _SETTING_KIND.choice:\n control = _create_choice_control(sbox)\n elif s.kind == _SETTING_KIND.map_choice:\n control = MapChoiceControl(sbox)\n elif s.kind == _SETTING_KIND.multiple_toggle:\n control = MultipleToggleControl(sbox, change)\n elif s.kind == _SETTING_KIND.multiple_range:\n control = MultipleRangeControl(sbox, change)\n elif s.kind == _SETTING_KIND.packed_range:\n control = PackedRangeControl(sbox, change)\n else:\n if _log.isEnabledFor(_WARNING):\n _log.warn('setting %s display not implemented', s.label)\n return None\n\n control.set_sensitive(False) # the first read will enable it\n control.layout(sbox, label, change, spinner, failed)\n sbox._control = control\n sbox.show_all()\n spinner.start() # the first read will stop it\n failed.set_visible(False)\n return sbox\n\n\ndef _update_setting_item(sbox, value, is_online=True, sensitive=True):\n sbox._spinner.set_visible(False)\n sbox._spinner.stop()\n if value is None:\n sbox._control.set_sensitive(False)\n _change_icon(False, sbox._change_icon)\n sbox._failed.set_visible(is_online)\n return\n sbox._failed.set_visible(False)\n sbox._control.set_sensitive(False)\n sbox._control.set_value(value)\n sensitive = sbox._change_icon._allowed if sensitive is None else sensitive\n sbox._control.set_sensitive(sensitive is True)\n _change_icon(sensitive, sbox._change_icon)\n\n\ndef _disable_listbox_highlight_bg(lb):\n colour = Gdk.RGBA()\n colour.parse('rgba(0,0,0,0)')\n for child in lb.get_children():\n child.override_background_color(Gtk.StateFlags.PRELIGHT, colour)\n\n\n#\n#\n#\n\n# config panel\n_box = None\n_items = {}\n\n\ndef create():\n global _box\n assert _box is None\n _box = Gtk.VBox(homogeneous=False, spacing=8)\n _box._last_device = None\n\n config_scroll = Gtk.ScrolledWindow()\n config_scroll.add(_box)\n config_scroll.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)\n config_scroll.set_shadow_type(Gtk.ShadowType.IN)\n config_scroll.set_size_request(0, 350) # ask for enough vertical space for about eight settings\n\n return config_scroll\n\n\ndef update(device, is_online=None):\n assert _box is not None\n assert device\n device_id = (device.receiver.path if device.receiver else device.path, device.number)\n if is_online is None:\n is_online = bool(device.online)\n\n # if the device changed since last update, clear the box first\n if device_id != _box._last_device:\n _box.set_visible(False)\n _box._last_device = device_id\n\n # hide controls belonging to other devices\n for k, sbox in _items.items():\n sbox = _items[k]\n sbox.set_visible(k[0:2] == device_id)\n\n for s in device.settings:\n k = (device_id[0], device_id[1], s.name)\n if k in _items:\n sbox = _items[k]\n else:\n sbox = _create_sbox(s, device)\n if sbox is None:\n continue\n _items[k] = sbox\n _box.pack_start(sbox, False, False, 0)\n sensitive = device.persister.get_sensitivity(s.name) if device.persister else True\n _read_async(s, False, sbox, is_online, sensitive)\n\n _box.set_visible(True)\n\n\ndef clean(device):\n \"\"\"Remove the controls for a given device serial.\n Needed after the device has been unpaired.\n \"\"\"\n assert _box is not None\n device_id = (device.receiver.path if device.receiver else device.path, device.number)\n for k in list(_items.keys()):\n if k[0:2] == device_id:\n _box.remove(_items[k])\n del _items[k]\n\n\ndef destroy():\n global _box\n _box = None\n _items.clear()\n\n\ndef change_setting(device, setting, values):\n \"\"\"External interface to change a setting and have the GUI show the change\"\"\"\n assert device == setting._device\n GLib.idle_add(_change_setting, device, setting, values, priority=99)\n\n\ndef _change_setting(device, setting, values):\n device_path = device.receiver.path if device.receiver else device.path\n if (device_path, device.number, setting.name) in _items:\n sbox = _items[(device_path, device.number, setting.name)]\n else:\n sbox = None\n _write_async(setting, values[-1], sbox, None, key=values[0] if len(values) > 1 else None)\n",
"path": "lib/solaar/ui/config_panel.py"
}
] | [
{
"content": "# -*- python-mode -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nimport traceback\n\nfrom logging import WARNING as _WARNING\nfrom logging import getLogger\nfrom threading import Timer as _Timer\n\nfrom gi.repository import Gdk, GLib, Gtk\nfrom logitech_receiver.settings import KIND as _SETTING_KIND\nfrom logitech_receiver.settings import SENSITIVITY_IGNORE as _SENSITIVITY_IGNORE\nfrom solaar.i18n import _, ngettext\nfrom solaar.ui import ui_async as _ui_async\n\n_log = getLogger(__name__)\ndel getLogger\n\n#\n#\n#\n\n\ndef _read_async(setting, force_read, sbox, device_is_online, sensitive):\n def _do_read(s, force, sb, online, sensitive):\n v = s.read(not force)\n GLib.idle_add(_update_setting_item, sb, v, online, sensitive, priority=99)\n\n _ui_async(_do_read, setting, force_read, sbox, device_is_online, sensitive)\n\n\ndef _write_async(setting, value, sbox, sensitive=True, key=None):\n def _do_write(s, v, sb, key):\n try:\n if key is None:\n v = setting.write(v)\n else:\n v = setting.write_key_value(key, v)\n v = {key: v}\n except Exception:\n traceback.print_exc()\n v = None\n if sb:\n GLib.idle_add(_update_setting_item, sb, v, True, sensitive, priority=99)\n\n if sbox:\n sbox._control.set_sensitive(False)\n sbox._failed.set_visible(False)\n sbox._spinner.set_visible(True)\n sbox._spinner.start()\n _ui_async(_do_write, setting, value, sbox, key)\n\n\n#\n#\n#\n\n\nclass Control():\n def __init__(**kwargs):\n pass\n\n def init(self, sbox, delegate):\n self.sbox = sbox\n self.delegate = delegate if delegate else self\n\n def changed(self, *args):\n if self.get_sensitive():\n self.delegate.update()\n\n def update(self):\n _write_async(self.sbox.setting, self.get_value(), self.sbox)\n\n def layout(self, sbox, label, change, spinner, failed):\n sbox.pack_start(label, False, False, 0)\n sbox.pack_end(change, False, False, 0)\n sbox.pack_end(self, sbox.setting.kind == _SETTING_KIND.range, sbox.setting.kind == _SETTING_KIND.range, 0)\n sbox.pack_end(spinner, False, False, 0)\n sbox.pack_end(failed, False, False, 0)\n return self\n\n\nclass ToggleControl(Gtk.Switch, Control):\n def __init__(self, sbox, delegate=None):\n super().__init__(halign=Gtk.Align.CENTER, valign=Gtk.Align.CENTER)\n self.init(sbox, delegate)\n self.connect('notify::active', self.changed)\n\n def set_value(self, value):\n self.set_state(value)\n\n def get_value(self):\n return self.get_state()\n\n\nclass SliderControl(Gtk.Scale, Control):\n def __init__(self, sbox, delegate=None):\n super().__init__(halign=Gtk.Align.FILL)\n self.init(sbox, delegate)\n self.timer = None\n self.set_range(*self.sbox.setting.range)\n self.set_round_digits(0)\n self.set_digits(0)\n self.set_increments(1, 5)\n self.connect('value-changed', self.changed)\n\n def get_value(self):\n return int(super().get_value())\n\n def changed(self, *args):\n if self.get_sensitive():\n if self.timer:\n self.timer.cancel()\n self.timer = _Timer(0.5, lambda: GLib.idle_add(self.do_change))\n self.timer.start()\n\n def do_change(self):\n self.timer.cancel()\n self.update()\n\n\ndef _create_choice_control(sbox, delegate=None, choices=None):\n if 50 > len(choices if choices else sbox.setting.choices):\n return ChoiceControlLittle(sbox, choices=choices, delegate=delegate)\n else:\n return ChoiceControlBig(sbox, choices=choices, delegate=delegate)\n\n\n# GTK boxes have property lists, but the keys must be strings\nclass ChoiceControlLittle(Gtk.ComboBoxText, Control):\n def __init__(self, sbox, delegate=None, choices=None):\n super().__init__(halign=Gtk.Align.FILL)\n self.init(sbox, delegate)\n self.choices = choices if choices is not None else sbox.setting.choices\n for entry in self.choices:\n self.append(str(int(entry)), str(entry))\n self.connect('changed', self.changed)\n\n def get_value(self):\n return int(self.get_active_id()) if self.get_active_id() is not None else None\n\n def set_value(self, value):\n self.set_active_id(str(int(value)))\n\n def get_choice(self):\n id = self.get_value()\n return next((x for x in self.choices if x == id), None)\n\n def set_choices(self, choices):\n self.remove_all()\n for choice in choices:\n self.append(str(int(choice)), _(str(choice)))\n\n\nclass ChoiceControlBig(Gtk.Entry, Control):\n def __init__(self, sbox, delegate=None, choices=None):\n super().__init__(halign=Gtk.Align.FILL)\n self.init(sbox, delegate)\n self.choices = choices if choices is not None else sbox.setting.choices\n self.value = None\n self.set_width_chars(max([len(str(x)) for x in self.choices]) + 5)\n liststore = Gtk.ListStore(int, str)\n for v in self.choices:\n liststore.append((int(v), str(v)))\n completion = Gtk.EntryCompletion()\n completion.set_model(liststore)\n norm = lambda s: s.replace('_', '').replace(' ', '').lower()\n completion.set_match_func(lambda completion, key, it: norm(key) in norm(completion.get_model()[it][1]))\n completion.set_text_column(1)\n self.set_completion(completion)\n self.connect('changed', self.changed)\n self.connect('activate', self.activate)\n completion.connect('match_selected', self.select)\n\n def get_value(self):\n choice = self.get_choice()\n return int(choice) if choice is not None else None\n\n def set_value(self, value):\n self.set_text(str(next((x for x in self.choices if x == value), None)))\n\n def get_choice(self):\n key = self.get_text()\n return next((x for x in self.choices if x == key), None)\n\n def changed(self, *args):\n self.value = self.get_choice()\n icon = 'dialog-warning' if self.value is None else 'dialog-question' if self.get_sensitive() else ''\n self.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, icon)\n tooltip = _('Incomplete') if self.value is None else _('Complete - ENTER to change')\n self.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, tooltip)\n\n def activate(self, *args):\n if self.value is not None and self.get_sensitive():\n self.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, '')\n self.delegate.update()\n\n def select(self, completion, model, iter):\n self.set_value(model.get(iter, 0)[0])\n if self.value and self.get_sensitive():\n self.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, '')\n self.delegate.update()\n\n\nclass MapChoiceControl(Gtk.HBox, Control):\n def __init__(self, sbox, delegate=None):\n super().__init__(homogeneous=False, spacing=6)\n self.init(sbox, delegate)\n self.keyBox = Gtk.ComboBoxText()\n for entry in sbox.setting.choices:\n self.keyBox.append(str(int(entry)), _(str(entry)))\n self.keyBox.set_active(0)\n key_choice = int(self.keyBox.get_active_id())\n self.value_choices = self.sbox.setting.choices[key_choice]\n self.valueBox = _create_choice_control(sbox.setting, choices=self.value_choices, delegate=self)\n self.pack_start(self.keyBox, False, False, 0)\n self.pack_end(self.valueBox, False, False, 0)\n self.keyBox.connect('changed', self.map_value_notify_key)\n\n def get_value(self):\n key_choice = int(self.keyBox.get_active_id())\n if key_choice is not None and self.valueBox.get_value() is not None:\n return self.valueBox.get_value()\n\n def set_value(self, value):\n self.valueBox.set_sensitive(self.get_sensitive())\n key = int(self.keyBox.get_active_id())\n if value.get(key) is not None:\n self.valueBox.set_value(value.get(key))\n self.valueBox.set_sensitive(True)\n\n def map_populate_value_box(self, key_choice):\n choices = self.sbox.setting.choices[key_choice]\n if choices != self.value_choices:\n self.value_choices = choices\n self.valueBox.remove_all()\n self.valueBox.set_choices(choices)\n current = self.sbox.setting._value.get(key_choice) if self.sbox.setting._value else None\n if current is not None:\n self.valueBox.set_value(current)\n\n def map_value_notify_key(self, *args):\n key_choice = int(self.keyBox.get_active_id())\n if self.keyBox.get_sensitive():\n self.map_populate_value_box(key_choice)\n\n def update(self):\n key_choice = int(self.keyBox.get_active_id())\n value = self.get_value()\n if value is not None and self.valueBox.get_sensitive() and self.sbox.setting._value.get(key_choice) != value:\n self.sbox.setting._value[int(key_choice)] = value\n _write_async(self.sbox.setting, value, self.sbox, key=int(key_choice))\n\n\nclass MultipleControl(Gtk.ListBox, Control):\n def __init__(self, sbox, change, button_label='...', delegate=None):\n super().__init__()\n self.init(sbox, delegate)\n self.set_selection_mode(Gtk.SelectionMode.NONE)\n self.set_no_show_all(True)\n self._showing = True\n self.setup(sbox.setting) # set up the data and boxes for the sub-controls\n btn = Gtk.Button(button_label)\n btn.set_alignment(1.0, 0.5)\n btn.connect('clicked', self.toggle_display)\n self._button = btn\n hbox = Gtk.HBox(homogeneous=False, spacing=6)\n hbox.pack_end(change, False, False, 0)\n hbox.pack_end(btn, False, False, 0)\n self._header = hbox\n vbox = Gtk.VBox(homogeneous=False, spacing=6)\n vbox.pack_start(hbox, True, True, 0)\n vbox.pack_end(self, True, True, 0)\n self.vbox = vbox\n self.toggle_display()\n _disable_listbox_highlight_bg(self)\n\n def layout(self, sbox, label, change, spinner, failed):\n self._header.pack_start(label, False, False, 0)\n self._header.pack_end(spinner, False, False, 0)\n self._header.pack_end(failed, False, False, 0)\n sbox.pack_start(self.vbox, True, True, 0)\n sbox._button = self._button\n return True\n\n def toggle_display(self, *args):\n self._showing = not self._showing\n if not self._showing:\n for c in self.get_children():\n c.hide()\n self.hide()\n else:\n self.show()\n for c in self.get_children():\n c.show_all()\n\n\nclass MultipleToggleControl(MultipleControl):\n def setup(self, setting):\n self._label_control_pairs = []\n for k in setting._validator.get_options():\n h = Gtk.HBox(homogeneous=False, spacing=0)\n lbl_text = str(k)\n lbl_tooltip = None\n if hasattr(setting, '_labels'):\n l1, l2 = setting._labels.get(k, (None, None))\n lbl_text = l1 if l1 else lbl_text\n lbl_tooltip = l2 if l2 else lbl_tooltip\n lbl = Gtk.Label(lbl_text)\n h.set_tooltip_text(lbl_tooltip or ' ')\n control = Gtk.Switch()\n control._setting_key = int(k)\n control.connect('notify::active', self.toggle_notify)\n h.pack_start(lbl, False, False, 0)\n h.pack_end(control, False, False, 0)\n lbl.set_alignment(0.0, 0.5)\n lbl.set_margin_left(30)\n self.add(h)\n self._label_control_pairs.append((lbl, control))\n\n def toggle_notify(self, switch, active):\n if switch.get_sensitive():\n key = switch._setting_key\n new_state = switch.get_state()\n if self.sbox.setting._value[key] != new_state:\n self.sbox.setting._value[key] = new_state\n _write_async(self.sbox.setting, new_state, self.sbox, key=int(key))\n\n def set_value(self, value):\n active = 0\n total = len(self._label_control_pairs)\n to_join = []\n for lbl, elem in self._label_control_pairs:\n v = value.get(elem._setting_key, None)\n if v is not None:\n elem.set_state(v)\n if elem.get_state():\n active += 1\n to_join.append(lbl.get_text() + ': ' + str(elem.get_state()))\n b = ', '.join(to_join)\n self._button.set_label(f'{active} / {total}')\n self._button.set_tooltip_text(b)\n\n\nclass MultipleRangeControl(MultipleControl):\n def setup(self, setting):\n self._items = []\n for item in setting._validator.items:\n lbl_text = str(item)\n lbl_tooltip = None\n if hasattr(setting, '_labels'):\n l1, l2 = setting._labels.get(int(item), (None, None))\n lbl_text = l1 if l1 else lbl_text\n lbl_tooltip = l2 if l2 else lbl_tooltip\n item_lbl = Gtk.Label(lbl_text)\n self.add(item_lbl)\n self.set_tooltip_text(lbl_tooltip or ' ')\n item_lb = Gtk.ListBox()\n item_lb.set_selection_mode(Gtk.SelectionMode.NONE)\n item_lb._sub_items = []\n for sub_item in setting._validator.sub_items[item]:\n h = Gtk.HBox(homogeneous=False, spacing=20)\n lbl_text = str(sub_item)\n lbl_tooltip = None\n if hasattr(setting, '_labels_sub'):\n l1, l2 = setting._labels_sub.get(str(sub_item), (None, None))\n lbl_text = l1 if l1 else lbl_text\n lbl_tooltip = l2 if l2 else lbl_tooltip\n sub_item_lbl = Gtk.Label(lbl_text)\n h.set_tooltip_text(lbl_tooltip or ' ')\n h.pack_start(sub_item_lbl, False, False, 0)\n sub_item_lbl.set_margin_left(30)\n sub_item_lbl.set_alignment(0.0, 0.5)\n if sub_item.widget == 'Scale':\n control = Gtk.Scale.new_with_range(Gtk.Orientation.HORIZONTAL, sub_item.minimum, sub_item.maximum, 1)\n control.set_round_digits(0)\n control.set_digits(0)\n h.pack_end(control, True, True, 0)\n elif sub_item.widget == 'SpinButton':\n control = Gtk.SpinButton.new_with_range(sub_item.minimum, sub_item.maximum, 1)\n control.set_digits(0)\n h.pack_end(control, False, False, 0)\n else:\n raise NotImplementedError\n control.connect('value-changed', self.changed, item, sub_item)\n item_lb.add(h)\n h._setting_sub_item = sub_item\n h._label, h._control = sub_item_lbl, control\n item_lb._sub_items.append(h)\n item_lb._setting_item = item\n _disable_listbox_highlight_bg(item_lb)\n self.add(item_lb)\n self._items.append(item_lb)\n\n def changed(self, control, item, sub_item):\n if control.get_sensitive():\n if hasattr(control, '_timer'):\n control._timer.cancel()\n control._timer = _Timer(0.5, lambda: GLib.idle_add(self._write, control, item, sub_item))\n control._timer.start()\n\n def _write(self, control, item, sub_item):\n control._timer.cancel()\n delattr(control, '_timer')\n new_state = int(control.get_value())\n if self.sbox.setting._value[int(item)][str(sub_item)] != new_state:\n self.sbox.setting._value[int(item)][str(sub_item)] = new_state\n _write_async(self.sbox.setting, self.sbox.setting._value[int(item)], self.sbox, key=int(item))\n\n def set_value(self, value):\n b = ''\n n = 0\n for ch in self._items:\n item = ch._setting_item\n v = value.get(int(item), None)\n if v is not None:\n b += str(item) + ': ('\n to_join = []\n for c in ch._sub_items:\n sub_item = c._setting_sub_item\n try:\n sub_item_value = v[str(sub_item)]\n except KeyError:\n sub_item_value = c._control.get_value()\n c._control.set_value(sub_item_value)\n n += 1\n to_join.append(str(sub_item) + f'={sub_item_value}')\n b += ', '.join(to_join) + ') '\n lbl_text = ngettext('%d value', '%d values', n) % n\n self._button.set_label(lbl_text)\n self._button.set_tooltip_text(b)\n\n\nclass PackedRangeControl(MultipleRangeControl):\n def setup(self, setting):\n validator = setting._validator\n self._items = []\n for item in range(validator.count):\n h = Gtk.HBox(homogeneous=False, spacing=0)\n lbl = Gtk.Label(str(validator.keys[item]))\n control = Gtk.Scale.new_with_range(Gtk.Orientation.HORIZONTAL, validator.min_value, validator.max_value, 1)\n control.set_round_digits(0)\n control.set_digits(0)\n control.connect('value-changed', self.changed, validator.keys[item])\n h.pack_start(lbl, False, False, 0)\n h.pack_end(control, True, True, 0)\n h._setting_item = validator.keys[item]\n h.control = control\n lbl.set_alignment(0.0, 0.5)\n lbl.set_margin_left(30)\n self.add(h)\n self._items.append(h)\n\n def changed(self, control, item):\n if control.get_sensitive():\n if hasattr(control, '_timer'):\n control._timer.cancel()\n control._timer = _Timer(0.5, lambda: GLib.idle_add(self._write, control, item))\n control._timer.start()\n\n def _write(self, control, item):\n control._timer.cancel()\n delattr(control, '_timer')\n new_state = int(control.get_value())\n if self.sbox.setting._value[int(item)] != new_state:\n self.sbox.setting._value[int(item)] = new_state\n _write_async(self.sbox.setting, self.sbox.setting._value[int(item)], self.sbox, key=int(item))\n\n def set_value(self, value):\n b = ''\n n = len(self._items)\n for h in self._items:\n item = h._setting_item\n v = value.get(int(item), None)\n if v is not None:\n h.control.set_value(v)\n else:\n v = self.sbox.setting._value[int(item)]\n b += str(item) + ': (' + str(v) + ') '\n lbl_text = ngettext('%d value', '%d values', n) % n\n self._button.set_label(lbl_text)\n self._button.set_tooltip_text(b)\n\n\n#\n#\n#\n\n_allowables_icons = {True: 'changes-allow', False: 'changes-prevent', _SENSITIVITY_IGNORE: 'dialog-error'}\n_allowables_tooltips = {\n True: _('Changes allowed'),\n False: _('No changes allowed'),\n _SENSITIVITY_IGNORE: _('Ignore this setting')\n}\n_next_allowable = {True: False, False: _SENSITIVITY_IGNORE, _SENSITIVITY_IGNORE: True}\n_icons_allowables = {v: k for k, v in _allowables_icons.items()}\n\n\n# clicking on the lock icon changes from changeable to unchangeable to ignore\ndef _change_click(button, sbox):\n icon = button.get_children()[0]\n icon_name, _ = icon.get_icon_name()\n allowed = _icons_allowables.get(icon_name, True)\n new_allowed = _next_allowable[allowed]\n sbox._control.set_sensitive(new_allowed is True)\n _change_icon(new_allowed, icon)\n if sbox.setting._device.persister: # remember the new setting sensitivity\n sbox.setting._device.persister.set_sensitivity(sbox.setting.name, new_allowed)\n if allowed == _SENSITIVITY_IGNORE: # update setting if it was being ignored\n setting = next((s for s in sbox.setting._device.settings if s.name == sbox.setting.name), None)\n if setting:\n persisted = sbox.setting._device.persister.get(setting.name) if sbox.setting._device.persister else None\n if setting.persist and persisted is not None:\n _write_async(setting, persisted, sbox)\n else:\n _read_async(setting, True, sbox, bool(sbox.setting._device.online), sbox._control.get_sensitive())\n return True\n\n\ndef _change_icon(allowed, icon):\n if allowed in _allowables_icons:\n icon._allowed = allowed\n icon.set_from_icon_name(_allowables_icons[allowed], Gtk.IconSize.LARGE_TOOLBAR)\n icon.set_tooltip_text(_allowables_tooltips[allowed])\n\n\ndef _create_sbox(s, device):\n sbox = Gtk.HBox(homogeneous=False, spacing=6)\n sbox.setting = s\n sbox.kind = s.kind\n if s.description:\n sbox.set_tooltip_text(s.description)\n lbl = Gtk.Label(s.label)\n lbl.set_alignment(0.0, 0.5)\n label = Gtk.EventBox()\n label.add(lbl)\n spinner = Gtk.Spinner()\n spinner.set_tooltip_text(_('Working') + '...')\n sbox._spinner = spinner\n failed = Gtk.Image.new_from_icon_name('dialog-warning', Gtk.IconSize.SMALL_TOOLBAR)\n failed.set_tooltip_text(_('Read/write operation failed.'))\n sbox._failed = failed\n change_icon = Gtk.Image.new_from_icon_name('changes-prevent', Gtk.IconSize.LARGE_TOOLBAR)\n sbox._change_icon = change_icon\n _change_icon(False, change_icon)\n change = Gtk.Button()\n change.set_relief(Gtk.ReliefStyle.NONE)\n change.add(change_icon)\n change.set_sensitive(True)\n change.connect('clicked', _change_click, sbox)\n\n if s.kind == _SETTING_KIND.toggle:\n control = ToggleControl(sbox)\n elif s.kind == _SETTING_KIND.range:\n control = SliderControl(sbox)\n elif s.kind == _SETTING_KIND.choice:\n control = _create_choice_control(sbox)\n elif s.kind == _SETTING_KIND.map_choice:\n control = MapChoiceControl(sbox)\n elif s.kind == _SETTING_KIND.multiple_toggle:\n control = MultipleToggleControl(sbox, change)\n elif s.kind == _SETTING_KIND.multiple_range:\n control = MultipleRangeControl(sbox, change)\n elif s.kind == _SETTING_KIND.packed_range:\n control = PackedRangeControl(sbox, change)\n else:\n if _log.isEnabledFor(_WARNING):\n _log.warn('setting %s display not implemented', s.label)\n return None\n\n control.set_sensitive(False) # the first read will enable it\n control.layout(sbox, label, change, spinner, failed)\n sbox._control = control\n sbox.show_all()\n spinner.start() # the first read will stop it\n failed.set_visible(False)\n return sbox\n\n\ndef _update_setting_item(sbox, value, is_online=True, sensitive=True):\n # sbox._spinner.set_visible(False) # don't repack item box\n sbox._spinner.stop()\n if value is None:\n sbox._control.set_sensitive(False)\n _change_icon(False, sbox._change_icon)\n sbox._failed.set_visible(is_online)\n return\n sbox._failed.set_visible(False)\n sbox._control.set_sensitive(False)\n sbox._control.set_value(value)\n sensitive = sbox._change_icon._allowed if sensitive is None else sensitive\n sbox._control.set_sensitive(sensitive is True)\n _change_icon(sensitive, sbox._change_icon)\n\n\ndef _disable_listbox_highlight_bg(lb):\n colour = Gdk.RGBA()\n colour.parse('rgba(0,0,0,0)')\n for child in lb.get_children():\n child.override_background_color(Gtk.StateFlags.PRELIGHT, colour)\n\n\n#\n#\n#\n\n# config panel\n_box = None\n_items = {}\n\n\ndef create():\n global _box\n assert _box is None\n _box = Gtk.VBox(homogeneous=False, spacing=8)\n _box._last_device = None\n\n config_scroll = Gtk.ScrolledWindow()\n config_scroll.add(_box)\n config_scroll.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)\n config_scroll.set_shadow_type(Gtk.ShadowType.IN)\n config_scroll.set_size_request(0, 350) # ask for enough vertical space for about eight settings\n\n return config_scroll\n\n\ndef update(device, is_online=None):\n assert _box is not None\n assert device\n device_id = (device.receiver.path if device.receiver else device.path, device.number)\n if is_online is None:\n is_online = bool(device.online)\n\n # if the device changed since last update, clear the box first\n if device_id != _box._last_device:\n _box.set_visible(False)\n _box._last_device = device_id\n\n # hide controls belonging to other devices\n for k, sbox in _items.items():\n sbox = _items[k]\n sbox.set_visible(k[0:2] == device_id)\n\n for s in device.settings:\n k = (device_id[0], device_id[1], s.name)\n if k in _items:\n sbox = _items[k]\n else:\n sbox = _create_sbox(s, device)\n if sbox is None:\n continue\n _items[k] = sbox\n _box.pack_start(sbox, False, False, 0)\n sensitive = device.persister.get_sensitivity(s.name) if device.persister else True\n _read_async(s, False, sbox, is_online, sensitive)\n\n _box.set_visible(True)\n\n\ndef clean(device):\n \"\"\"Remove the controls for a given device serial.\n Needed after the device has been unpaired.\n \"\"\"\n assert _box is not None\n device_id = (device.receiver.path if device.receiver else device.path, device.number)\n for k in list(_items.keys()):\n if k[0:2] == device_id:\n _box.remove(_items[k])\n del _items[k]\n\n\ndef destroy():\n global _box\n _box = None\n _items.clear()\n\n\ndef change_setting(device, setting, values):\n \"\"\"External interface to change a setting and have the GUI show the change\"\"\"\n assert device == setting._device\n GLib.idle_add(_change_setting, device, setting, values, priority=99)\n\n\ndef _change_setting(device, setting, values):\n device_path = device.receiver.path if device.receiver else device.path\n if (device_path, device.number, setting.name) in _items:\n sbox = _items[(device_path, device.number, setting.name)]\n else:\n sbox = None\n _write_async(setting, values[-1], sbox, None, key=values[0] if len(values) > 1 else None)\n",
"path": "lib/solaar/ui/config_panel.py"
}
] | diff --git a/lib/solaar/ui/config_panel.py b/lib/solaar/ui/config_panel.py
index 9c019af749..b54736712c 100644
--- a/lib/solaar/ui/config_panel.py
+++ b/lib/solaar/ui/config_panel.py
@@ -593,7 +593,7 @@ def _create_sbox(s, device):
def _update_setting_item(sbox, value, is_online=True, sensitive=True):
- sbox._spinner.set_visible(False)
+ # sbox._spinner.set_visible(False) # don't repack item box
sbox._spinner.stop()
if value is None:
sbox._control.set_sensitive(False)
|
huggingface__trl-528 | DPO evaluation error---tensors on two devices
Hi!
Thanks for the awesome codebase. I ran the DPO example `trl/examples/dpo.py` but encountered an error at the evaluation step: `Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!`
Here is a [colab notebook](https://colab.research.google.com/drive/11AVym7U3gkTn_qTfrnSDtA_AO5_zJkkd?usp=sharing) that shows this problem. To expose the problem faster, I set `training_args.eval_steps = 1`.
To solve it, a hotfix can be adding `.to(self.accelerator.device)` at a few places in [concatenated_forward](https://github.com/lvwerra/trl/blob/main/trl/trainer/dpo_trainer.py#L288-L296):
```python
all_logits = model(
concatenated_batch["concatenated_input_ids"].to(self.accelerator.device),
attention_mask=concatenated_batch["concatenated_attention_mask"].to(self.accelerator.device),
).logits.to(torch.float32)
all_logps = self._get_batch_logps(
all_logits,
concatenated_batch["concatenated_labels"].to(self.accelerator.device),
average_log_prob=False,
)
```
However, I am not sure why the trainer does not handle the device change automatically. If this hotfix is fine, I can submit a pull request. Otherwise, I'm also happy to learn how to address this problem more generically.
Tianlin
| [
{
"content": "# DPO Authors: Rafael Rafailov, Archit Sharma, Eric Mitchell, Stefano Ermon, Christopher D. Manning, and Chelsea Finn 2023\n# Copyright 2023 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport warnings\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom datasets import Dataset\nfrom transformers import DataCollator, PreTrainedModel, PreTrainedTokenizerBase, Trainer, TrainingArguments\nfrom transformers.trainer_callback import TrainerCallback\n\nfrom ..import_utils import is_peft_available\nfrom .utils import DPODataCollatorWithPadding, pad_to_length\n\n\nif is_peft_available():\n from peft import get_peft_model, prepare_model_for_int8_training\n\n\nclass DPOTrainer(Trainer):\n r\"\"\"\n Initialize DPOTrainer.\n\n Args:\n model (`transformers.PreTrainedModel`):\n The model to train, preferably an `AutoModelForSequenceClassification`.\n ref_model (`PreTrainedModelWrapper`):\n Hugging Face transformer model with a casual language modelling head. Used for implicit reward computation and loss.\n beta (`float`, defaults to 0.1):\n The beta factor in DPO loss. Higher beta means less divergence from the initial policy.\n args (`transformers.TrainingArguments`):\n The arguments to use for training.\n data_collator (`transformers.DataCollator`):\n The data collator to use for training. If None is specified, the default data collator (`DPODataCollatorWithPadding`) will be used\n which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences.\n label_pad_token_id (`int`, defaults to `-100`):\n The label pad token id. This argument is required if you want to use the default data collator.\n padding_value (`int`, defaults to `0`):\n The padding value. This argument is required if you want to use the default data collator.\n truncation_mode (`str`, defaults to `keep_end`):\n The truncation mode to use, either `keep_end` or `keep_start`. This argument is required if you want to use the default data collator.\n train_dataset (`datasets.Dataset`):\n The dataset to use for training.\n eval_dataset (`datasets.Dataset`):\n The dataset to use for evaluation.\n tokenizer (`transformers.PreTrainedTokenizerBase`):\n The tokenizer to use for training. This argument is required if you want to use the default data collator.\n model_init (`Callable[[], transformers.PreTrainedModel]`):\n The model initializer to use for training. If None is specified, the default model initializer will be used.\n callbacks (`List[transformers.TrainerCallback]`):\n The callbacks to use for training.\n optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`):\n The optimizer and scheduler to use for training.\n preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`):\n The function to use to preprocess the logits before computing the metrics.\n max_length (`int`, defaults to `None`):\n The maximum length of the sequences in the batch. This argument is required if you want to use the default data collator.\n max_prompt_length (`int`, defaults to `None`):\n The maximum length of the prompt. This argument is required if you want to use the default data collator.\n peft_config (`Dict`, defaults to `None`):\n The PEFT configuration to use for training. If you pass a PEFT configuration, the model will be wrapped in a PEFT model.\n \"\"\"\n\n def __init__(\n self,\n model: Union[PreTrainedModel, nn.Module] = None,\n ref_model: Union[PreTrainedModel, nn.Module] = None,\n beta: float = 0.1,\n args: TrainingArguments = None,\n data_collator: Optional[DataCollator] = None,\n label_pad_token_id: int = -100,\n padding_value: int = 0,\n truncation_mode: str = \"keep_end\",\n train_dataset: Optional[Dataset] = None,\n eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None,\n tokenizer: Optional[PreTrainedTokenizerBase] = None,\n model_init: Optional[Callable[[], PreTrainedModel]] = None,\n callbacks: Optional[List[TrainerCallback]] = None,\n optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (\n None,\n None,\n ),\n preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,\n max_length: Optional[int] = None,\n max_prompt_length: Optional[int] = None,\n peft_config: Optional[Dict] = None,\n ):\n if not is_peft_available() and peft_config is not None:\n raise ValueError(\n \"PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models\"\n )\n elif is_peft_available() and peft_config is not None:\n if getattr(model, \"is_loaded_in_8bit\", False) or getattr(model, \"is_loaded_in_4bit\", False):\n model = prepare_model_for_int8_training(model)\n model = get_peft_model(model, peft_config)\n\n if data_collator is None:\n if tokenizer is None:\n raise ValueError(\n \"max_length or a tokenizer must be specified when using the default DPODataCollatorWithPadding\"\n )\n if max_length is None:\n warnings.warn(\n \"When using DPODataCollatorWithPadding, you should set `max_length` in the DPOTrainer's init\"\n \" it will be set to `512` by default, but you should do it yourself in the future.\",\n UserWarning,\n )\n max_length = 512\n if max_prompt_length is None:\n warnings.warn(\n \"When using DPODataCollatorWithPadding, you should set `max_prompt_length` in the DPOTrainer's init\"\n \" it will be set to `128` by default, but you should do it yourself in the future.\",\n UserWarning,\n )\n max_prompt_length = 128\n\n data_collator = DPODataCollatorWithPadding(\n tokenizer,\n max_length=max_length,\n max_prompt_length=max_prompt_length,\n batch_size=args.train_batch_size,\n label_pad_token_id=label_pad_token_id,\n padding_value=padding_value,\n truncation_mode=truncation_mode,\n )\n\n if args.remove_unused_columns:\n args.remove_unused_columns = False\n # warn users\n warnings.warn(\n \"When using DPODataCollatorWithPadding, you should set `remove_unused_columns=False` in your TrainingArguments\"\n \" we have set it for you, but you should do it yourself in the future.\",\n UserWarning,\n )\n\n self.use_dpo_data_collator = True\n else:\n self.use_dpo_data_collator = False\n\n self.label_pad_token_id = label_pad_token_id\n self.padding_value = padding_value\n\n self.beta = beta\n self.ref_model = ref_model\n\n super().__init__(\n model,\n args,\n data_collator,\n train_dataset,\n eval_dataset,\n tokenizer,\n model_init,\n None,\n callbacks,\n optimizers,\n preprocess_logits_for_metrics,\n )\n\n # Since we inherit from trainer we always have access to an accelerator\n if hasattr(self, \"accelerator\"):\n self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True)\n else:\n raise AttributeError(\n \"Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`.\"\n )\n\n def concatenated_inputs(self, batch: Dict[str, Union[List, torch.LongTensor]]) -> Dict[str, torch.LongTensor]:\n \"\"\"Concatenate the chosen and rejected inputs into a single tensor.\n\n Args:\n batch: A batch of data. Must contain the keys 'chosen_input_ids' and 'rejected_input_ids', which are tensors of shape (batch_size, sequence_length).\n\n Returns:\n A dictionary containing the concatenated inputs under the key 'concatenated_input_ids'.\n \"\"\"\n max_length = max(batch[\"chosen_input_ids\"].shape[1], batch[\"rejected_input_ids\"].shape[1])\n concatenated_batch = {}\n for k in batch:\n if k.startswith(\"chosen\") and isinstance(batch[k], torch.Tensor):\n pad_value = self.label_pad_token_id if \"labels\" in k else self.padding_value\n concatenated_key = k.replace(\"chosen\", \"concatenated\")\n concatenated_batch[concatenated_key] = pad_to_length(batch[k], max_length, pad_value=pad_value)\n for k in batch:\n if k.startswith(\"rejected\") and isinstance(batch[k], torch.Tensor):\n pad_value = self.label_pad_token_id if \"labels\" in k else self.padding_value\n concatenated_key = k.replace(\"rejected\", \"concatenated\")\n concatenated_batch[concatenated_key] = torch.cat(\n (\n concatenated_batch[concatenated_key],\n pad_to_length(batch[k], max_length, pad_value=pad_value),\n ),\n dim=0,\n )\n return concatenated_batch\n\n def dpo_loss(\n self,\n policy_chosen_logps: torch.FloatTensor,\n policy_rejected_logps: torch.FloatTensor,\n reference_chosen_logps: torch.FloatTensor,\n reference_rejected_logps: torch.FloatTensor,\n reference_free: bool = False,\n ) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:\n \"\"\"Compute the DPO loss for a batch of policy and reference model log probabilities.\n\n Args:\n policy_chosen_logps: Log probabilities of the policy model for the chosen responses. Shape: (batch_size,)\n policy_rejected_logps: Log probabilities of the policy model for the rejected responses. Shape: (batch_size,)\n reference_chosen_logps: Log probabilities of the reference model for the chosen responses. Shape: (batch_size,)\n reference_rejected_logps: Log probabilities of the reference model for the rejected responses. Shape: (batch_size,)\n beta: Temperature parameter for the DPO loss, typically something in the range of 0.1 to 0.5. We ignore the reference model as beta -> 0.\n reference_free: If True, we ignore the _provided_ reference model and implicitly use a reference model that assigns equal probability to all responses.\n\n Returns:\n A tuple of three tensors: (losses, chosen_rewards, rejected_rewards).\n The losses tensor contains the DPO loss for each example in the batch.\n The chosen_rewards and rejected_rewards tensors contain the rewards for the chosen and rejected responses, respectively.\n \"\"\"\n pi_logratios = policy_chosen_logps - policy_rejected_logps\n ref_logratios = reference_chosen_logps - reference_rejected_logps\n\n if reference_free:\n ref_logratios = 0\n\n logits = pi_logratios - ref_logratios\n\n losses = -F.logsigmoid(self.beta * logits)\n chosen_rewards = self.beta * (policy_chosen_logps - reference_chosen_logps).detach()\n rejected_rewards = self.beta * (policy_rejected_logps - reference_rejected_logps).detach()\n\n return losses, chosen_rewards, rejected_rewards\n\n def _get_batch_logps(\n self,\n logits: torch.FloatTensor,\n labels: torch.LongTensor,\n average_log_prob: bool = False,\n ) -> torch.FloatTensor:\n \"\"\"Compute the log probabilities of the given labels under the given logits.\n\n Args:\n logits: Logits of the model (unnormalized). Shape: (batch_size, sequence_length, vocab_size)\n labels: Labels for which to compute the log probabilities. Label tokens with a value of label_pad_token_id are ignored. Shape: (batch_size, sequence_length)\n average_log_prob: If True, return the average log probability per (non-masked) token. Otherwise, return the sum of the log probabilities of the (non-masked) tokens.\n\n Returns:\n A tensor of shape (batch_size,) containing the average/sum log probabilities of the given labels under the given logits.\n \"\"\"\n if logits.shape[:-1] != labels.shape:\n raise ValueError(\"Logits (batch and sequence length dim) and labels must have the same shape.\")\n\n labels = labels[:, 1:].clone()\n logits = logits[:, :-1, :]\n loss_mask = labels != self.label_pad_token_id\n\n # dummy token; we'll ignore the losses on these tokens later\n labels[labels == self.label_pad_token_id] = 0\n\n per_token_logps = torch.gather(logits.log_softmax(-1), dim=2, index=labels.unsqueeze(2)).squeeze(2)\n\n if average_log_prob:\n return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1)\n else:\n return (per_token_logps * loss_mask).sum(-1)\n\n def concatenated_forward(\n self, model: nn.Module, batch: Dict[str, Union[List, torch.LongTensor]]\n ) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:\n \"\"\"Run the given model on the given batch of inputs, concatenating the chosen and rejected inputs together.\n\n We do this to avoid doing two forward passes, because it's faster for FSDP.\n \"\"\"\n concatenated_batch = self.concatenated_inputs(batch)\n all_logits = model(\n concatenated_batch[\"concatenated_input_ids\"],\n attention_mask=concatenated_batch[\"concatenated_attention_mask\"],\n ).logits.to(torch.float32)\n all_logps = self._get_batch_logps(\n all_logits,\n concatenated_batch[\"concatenated_labels\"],\n average_log_prob=False,\n )\n chosen_logps = all_logps[: batch[\"chosen_input_ids\"].shape[0]]\n rejected_logps = all_logps[batch[\"chosen_input_ids\"].shape[0] :]\n\n chosen_logits = all_logits[: batch[\"chosen_input_ids\"].shape[0]]\n rejected_logits = all_logits[batch[\"chosen_input_ids\"].shape[0] :]\n return (chosen_logps, rejected_logps, chosen_logits, rejected_logits)\n\n def get_batch_metrics(\n self,\n model,\n batch: Dict[str, Union[List, torch.LongTensor]],\n train_test: str = \"train\",\n ):\n \"\"\"Compute the DPO loss and other metrics for the given batch of inputs for train or test.\"\"\"\n metrics = {}\n\n (\n policy_chosen_logps,\n policy_rejected_logps,\n policy_chosen_logits,\n policy_rejected_logits,\n ) = self.concatenated_forward(model, batch)\n with torch.no_grad():\n (\n reference_chosen_logps,\n reference_rejected_logps,\n _,\n _,\n ) = self.concatenated_forward(self.ref_model, batch)\n\n losses, chosen_rewards, rejected_rewards = self.dpo_loss(\n policy_chosen_logps,\n policy_rejected_logps,\n reference_chosen_logps,\n reference_rejected_logps,\n )\n reward_accuracies = (chosen_rewards > rejected_rewards).float()\n\n metrics[f\"rewards_{train_test}/chosen\"] = chosen_rewards.cpu().numpy().mean()\n metrics[f\"rewards_{train_test}/rejected\"] = rejected_rewards.cpu().numpy().mean()\n metrics[f\"rewards_{train_test}/accuracies\"] = reward_accuracies.cpu().numpy().mean()\n metrics[f\"rewards_{train_test}/margins\"] = (chosen_rewards - rejected_rewards).cpu().numpy().mean()\n metrics[f\"logps_{train_test}/rejected\"] = policy_rejected_logps.detach().cpu().numpy().mean()\n metrics[f\"logps_{train_test}/chosen\"] = policy_chosen_logps.detach().cpu().numpy().mean()\n\n metrics[f\"logits_{train_test}/rejected\"] = policy_rejected_logits.detach().cpu().numpy().mean()\n metrics[f\"logits_{train_test}/chosen\"] = policy_chosen_logits.detach().cpu().numpy().mean()\n\n metrics[f\"loss/{train_test}\"] = losses.detach().cpu().numpy().mean()\n\n return losses.mean(), metrics\n\n def compute_loss(\n self,\n model: Union[PreTrainedModel, nn.Module],\n inputs: Dict[str, Union[torch.Tensor, Any]],\n return_outputs=False,\n ) -> Union[torch.Tensor, Tuple[torch.Tensor, Dict[str, torch.Tensor]]]:\n if not self.use_dpo_data_collator:\n raise NotImplementedError(\n \"compute_loss is only implemented for DPODataCollatorWithPadding, please implement your own compute_loss method if you are using a custom data collator\"\n )\n loss, metrics = self.get_batch_metrics(model, inputs, train_test=\"train\")\n\n # force log the metrics\n if self.accelerator.is_main_process:\n self.log_metrics(\"train\", metrics)\n\n if return_outputs:\n return (loss, metrics)\n return loss\n\n def get_batch_samples(self, model, batch: Dict[str, torch.LongTensor]) -> Tuple[str, str]:\n \"\"\"Generate samples from the model and reference model for the given batch of inputs.\"\"\"\n\n policy_output = model.generate(\n batch[\"prompt_input_ids\"],\n attention_mask=batch[\"prompt_attention_mask\"],\n max_length=self.config.max_length,\n do_sample=True,\n pad_token_id=self.tokenizer.pad_token_id,\n )\n\n reference_output = self.ref_model.generate(\n batch[\"prompt_input_ids\"],\n attention_mask=batch[\"prompt_attention_mask\"],\n max_length=self.config.max_length,\n do_sample=True,\n pad_token_id=self.tokenizer.pad_token_id,\n )\n\n policy_output = pad_to_length(policy_output, self.config.max_length, self.tokenizer.pad_token_id)\n policy_output_decoded = self.tokenizer.batch_decode(policy_output, skip_special_tokens=True)\n\n reference_output = pad_to_length(reference_output, self.config.max_length, self.tokenizer.pad_token_id)\n reference_output_decoded = self.tokenizer.batch_decode(reference_output, skip_special_tokens=True)\n\n return policy_output_decoded, reference_output_decoded\n\n def prediction_step(\n self,\n model: Union[PreTrainedModel, nn.Module],\n inputs: Dict[str, Union[torch.Tensor, Any]],\n prediction_loss_only: bool,\n ignore_keys: Optional[List[str]] = None,\n ):\n if not self.use_dpo_data_collator:\n raise NotImplementedError(\n \"prediction_step is only implemented for DPODataCollatorWithPadding, please implement your own prediction_step method if you are using a custom data collator\"\n )\n if ignore_keys is None:\n if hasattr(model, \"config\"):\n ignore_keys = getattr(model.config, \"keys_to_ignore_at_inference\", [])\n else:\n ignore_keys = []\n\n with torch.no_grad():\n loss, metrics = self.get_batch_metrics(model, inputs, train_test=\"test\")\n\n # force log the metrics\n if self.accelerator.is_main_process:\n self.log_metrics(\"test\", metrics)\n\n if prediction_loss_only:\n return (loss.detach(), None, None)\n\n # logits for the chosen and rejected samples from model\n logits_dict = {\n \"logits_test/chosen\": metrics[\"logits_test/chosen\"],\n \"logits_test/rejected\": metrics[\"logits_test/rejected\"],\n }\n logits = tuple(v for k, v in logits_dict.items() if k not in ignore_keys)\n logits = torch.stack(logits).mean(axis=1)\n labels = torch.zeros(logits.shape[0])\n\n return (loss.detach(), logits, labels)\n",
"path": "trl/trainer/dpo_trainer.py"
}
] | [
{
"content": "# DPO Authors: Rafael Rafailov, Archit Sharma, Eric Mitchell, Stefano Ermon, Christopher D. Manning, and Chelsea Finn 2023\n# Copyright 2023 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport warnings\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom datasets import Dataset\nfrom transformers import DataCollator, PreTrainedModel, PreTrainedTokenizerBase, Trainer, TrainingArguments\nfrom transformers.trainer_callback import TrainerCallback\n\nfrom ..import_utils import is_peft_available\nfrom .utils import DPODataCollatorWithPadding, pad_to_length\n\n\nif is_peft_available():\n from peft import get_peft_model, prepare_model_for_int8_training\n\n\nclass DPOTrainer(Trainer):\n r\"\"\"\n Initialize DPOTrainer.\n\n Args:\n model (`transformers.PreTrainedModel`):\n The model to train, preferably an `AutoModelForSequenceClassification`.\n ref_model (`PreTrainedModelWrapper`):\n Hugging Face transformer model with a casual language modelling head. Used for implicit reward computation and loss.\n beta (`float`, defaults to 0.1):\n The beta factor in DPO loss. Higher beta means less divergence from the initial policy.\n args (`transformers.TrainingArguments`):\n The arguments to use for training.\n data_collator (`transformers.DataCollator`):\n The data collator to use for training. If None is specified, the default data collator (`DPODataCollatorWithPadding`) will be used\n which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences.\n label_pad_token_id (`int`, defaults to `-100`):\n The label pad token id. This argument is required if you want to use the default data collator.\n padding_value (`int`, defaults to `0`):\n The padding value. This argument is required if you want to use the default data collator.\n truncation_mode (`str`, defaults to `keep_end`):\n The truncation mode to use, either `keep_end` or `keep_start`. This argument is required if you want to use the default data collator.\n train_dataset (`datasets.Dataset`):\n The dataset to use for training.\n eval_dataset (`datasets.Dataset`):\n The dataset to use for evaluation.\n tokenizer (`transformers.PreTrainedTokenizerBase`):\n The tokenizer to use for training. This argument is required if you want to use the default data collator.\n model_init (`Callable[[], transformers.PreTrainedModel]`):\n The model initializer to use for training. If None is specified, the default model initializer will be used.\n callbacks (`List[transformers.TrainerCallback]`):\n The callbacks to use for training.\n optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`):\n The optimizer and scheduler to use for training.\n preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`):\n The function to use to preprocess the logits before computing the metrics.\n max_length (`int`, defaults to `None`):\n The maximum length of the sequences in the batch. This argument is required if you want to use the default data collator.\n max_prompt_length (`int`, defaults to `None`):\n The maximum length of the prompt. This argument is required if you want to use the default data collator.\n peft_config (`Dict`, defaults to `None`):\n The PEFT configuration to use for training. If you pass a PEFT configuration, the model will be wrapped in a PEFT model.\n \"\"\"\n\n def __init__(\n self,\n model: Union[PreTrainedModel, nn.Module] = None,\n ref_model: Union[PreTrainedModel, nn.Module] = None,\n beta: float = 0.1,\n args: TrainingArguments = None,\n data_collator: Optional[DataCollator] = None,\n label_pad_token_id: int = -100,\n padding_value: int = 0,\n truncation_mode: str = \"keep_end\",\n train_dataset: Optional[Dataset] = None,\n eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None,\n tokenizer: Optional[PreTrainedTokenizerBase] = None,\n model_init: Optional[Callable[[], PreTrainedModel]] = None,\n callbacks: Optional[List[TrainerCallback]] = None,\n optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (\n None,\n None,\n ),\n preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,\n max_length: Optional[int] = None,\n max_prompt_length: Optional[int] = None,\n peft_config: Optional[Dict] = None,\n ):\n if not is_peft_available() and peft_config is not None:\n raise ValueError(\n \"PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models\"\n )\n elif is_peft_available() and peft_config is not None:\n if getattr(model, \"is_loaded_in_8bit\", False) or getattr(model, \"is_loaded_in_4bit\", False):\n model = prepare_model_for_int8_training(model)\n model = get_peft_model(model, peft_config)\n\n if data_collator is None:\n if tokenizer is None:\n raise ValueError(\n \"max_length or a tokenizer must be specified when using the default DPODataCollatorWithPadding\"\n )\n if max_length is None:\n warnings.warn(\n \"When using DPODataCollatorWithPadding, you should set `max_length` in the DPOTrainer's init\"\n \" it will be set to `512` by default, but you should do it yourself in the future.\",\n UserWarning,\n )\n max_length = 512\n if max_prompt_length is None:\n warnings.warn(\n \"When using DPODataCollatorWithPadding, you should set `max_prompt_length` in the DPOTrainer's init\"\n \" it will be set to `128` by default, but you should do it yourself in the future.\",\n UserWarning,\n )\n max_prompt_length = 128\n\n data_collator = DPODataCollatorWithPadding(\n tokenizer,\n max_length=max_length,\n max_prompt_length=max_prompt_length,\n batch_size=args.train_batch_size,\n label_pad_token_id=label_pad_token_id,\n padding_value=padding_value,\n truncation_mode=truncation_mode,\n )\n\n if args.remove_unused_columns:\n args.remove_unused_columns = False\n # warn users\n warnings.warn(\n \"When using DPODataCollatorWithPadding, you should set `remove_unused_columns=False` in your TrainingArguments\"\n \" we have set it for you, but you should do it yourself in the future.\",\n UserWarning,\n )\n\n self.use_dpo_data_collator = True\n else:\n self.use_dpo_data_collator = False\n\n self.label_pad_token_id = label_pad_token_id\n self.padding_value = padding_value\n\n self.beta = beta\n self.ref_model = ref_model\n\n super().__init__(\n model,\n args,\n data_collator,\n train_dataset,\n eval_dataset,\n tokenizer,\n model_init,\n None,\n callbacks,\n optimizers,\n preprocess_logits_for_metrics,\n )\n\n # Since we inherit from trainer we always have access to an accelerator\n if hasattr(self, \"accelerator\"):\n self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True)\n else:\n raise AttributeError(\n \"Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`.\"\n )\n\n def concatenated_inputs(self, batch: Dict[str, Union[List, torch.LongTensor]]) -> Dict[str, torch.LongTensor]:\n \"\"\"Concatenate the chosen and rejected inputs into a single tensor.\n\n Args:\n batch: A batch of data. Must contain the keys 'chosen_input_ids' and 'rejected_input_ids', which are tensors of shape (batch_size, sequence_length).\n\n Returns:\n A dictionary containing the concatenated inputs under the key 'concatenated_input_ids'.\n \"\"\"\n max_length = max(batch[\"chosen_input_ids\"].shape[1], batch[\"rejected_input_ids\"].shape[1])\n concatenated_batch = {}\n for k in batch:\n if k.startswith(\"chosen\") and isinstance(batch[k], torch.Tensor):\n pad_value = self.label_pad_token_id if \"labels\" in k else self.padding_value\n concatenated_key = k.replace(\"chosen\", \"concatenated\")\n concatenated_batch[concatenated_key] = pad_to_length(batch[k], max_length, pad_value=pad_value)\n for k in batch:\n if k.startswith(\"rejected\") and isinstance(batch[k], torch.Tensor):\n pad_value = self.label_pad_token_id if \"labels\" in k else self.padding_value\n concatenated_key = k.replace(\"rejected\", \"concatenated\")\n concatenated_batch[concatenated_key] = torch.cat(\n (\n concatenated_batch[concatenated_key],\n pad_to_length(batch[k], max_length, pad_value=pad_value),\n ),\n dim=0,\n ).to(self.accelerator.device)\n return concatenated_batch\n\n def dpo_loss(\n self,\n policy_chosen_logps: torch.FloatTensor,\n policy_rejected_logps: torch.FloatTensor,\n reference_chosen_logps: torch.FloatTensor,\n reference_rejected_logps: torch.FloatTensor,\n reference_free: bool = False,\n ) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:\n \"\"\"Compute the DPO loss for a batch of policy and reference model log probabilities.\n\n Args:\n policy_chosen_logps: Log probabilities of the policy model for the chosen responses. Shape: (batch_size,)\n policy_rejected_logps: Log probabilities of the policy model for the rejected responses. Shape: (batch_size,)\n reference_chosen_logps: Log probabilities of the reference model for the chosen responses. Shape: (batch_size,)\n reference_rejected_logps: Log probabilities of the reference model for the rejected responses. Shape: (batch_size,)\n beta: Temperature parameter for the DPO loss, typically something in the range of 0.1 to 0.5. We ignore the reference model as beta -> 0.\n reference_free: If True, we ignore the _provided_ reference model and implicitly use a reference model that assigns equal probability to all responses.\n\n Returns:\n A tuple of three tensors: (losses, chosen_rewards, rejected_rewards).\n The losses tensor contains the DPO loss for each example in the batch.\n The chosen_rewards and rejected_rewards tensors contain the rewards for the chosen and rejected responses, respectively.\n \"\"\"\n pi_logratios = policy_chosen_logps - policy_rejected_logps\n ref_logratios = reference_chosen_logps - reference_rejected_logps\n\n if reference_free:\n ref_logratios = 0\n\n logits = pi_logratios - ref_logratios\n\n losses = -F.logsigmoid(self.beta * logits)\n chosen_rewards = self.beta * (policy_chosen_logps - reference_chosen_logps).detach()\n rejected_rewards = self.beta * (policy_rejected_logps - reference_rejected_logps).detach()\n\n return losses, chosen_rewards, rejected_rewards\n\n def _get_batch_logps(\n self,\n logits: torch.FloatTensor,\n labels: torch.LongTensor,\n average_log_prob: bool = False,\n ) -> torch.FloatTensor:\n \"\"\"Compute the log probabilities of the given labels under the given logits.\n\n Args:\n logits: Logits of the model (unnormalized). Shape: (batch_size, sequence_length, vocab_size)\n labels: Labels for which to compute the log probabilities. Label tokens with a value of label_pad_token_id are ignored. Shape: (batch_size, sequence_length)\n average_log_prob: If True, return the average log probability per (non-masked) token. Otherwise, return the sum of the log probabilities of the (non-masked) tokens.\n\n Returns:\n A tensor of shape (batch_size,) containing the average/sum log probabilities of the given labels under the given logits.\n \"\"\"\n if logits.shape[:-1] != labels.shape:\n raise ValueError(\"Logits (batch and sequence length dim) and labels must have the same shape.\")\n\n labels = labels[:, 1:].clone()\n logits = logits[:, :-1, :]\n loss_mask = labels != self.label_pad_token_id\n\n # dummy token; we'll ignore the losses on these tokens later\n labels[labels == self.label_pad_token_id] = 0\n\n per_token_logps = torch.gather(logits.log_softmax(-1), dim=2, index=labels.unsqueeze(2)).squeeze(2)\n\n if average_log_prob:\n return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1)\n else:\n return (per_token_logps * loss_mask).sum(-1)\n\n def concatenated_forward(\n self, model: nn.Module, batch: Dict[str, Union[List, torch.LongTensor]]\n ) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:\n \"\"\"Run the given model on the given batch of inputs, concatenating the chosen and rejected inputs together.\n\n We do this to avoid doing two forward passes, because it's faster for FSDP.\n \"\"\"\n concatenated_batch = self.concatenated_inputs(batch)\n all_logits = model(\n concatenated_batch[\"concatenated_input_ids\"],\n attention_mask=concatenated_batch[\"concatenated_attention_mask\"],\n ).logits.to(torch.float32)\n all_logps = self._get_batch_logps(\n all_logits,\n concatenated_batch[\"concatenated_labels\"],\n average_log_prob=False,\n )\n chosen_logps = all_logps[: batch[\"chosen_input_ids\"].shape[0]]\n rejected_logps = all_logps[batch[\"chosen_input_ids\"].shape[0] :]\n\n chosen_logits = all_logits[: batch[\"chosen_input_ids\"].shape[0]]\n rejected_logits = all_logits[batch[\"chosen_input_ids\"].shape[0] :]\n return (chosen_logps, rejected_logps, chosen_logits, rejected_logits)\n\n def get_batch_metrics(\n self,\n model,\n batch: Dict[str, Union[List, torch.LongTensor]],\n train_test: str = \"train\",\n ):\n \"\"\"Compute the DPO loss and other metrics for the given batch of inputs for train or test.\"\"\"\n metrics = {}\n\n (\n policy_chosen_logps,\n policy_rejected_logps,\n policy_chosen_logits,\n policy_rejected_logits,\n ) = self.concatenated_forward(model, batch)\n with torch.no_grad():\n (\n reference_chosen_logps,\n reference_rejected_logps,\n _,\n _,\n ) = self.concatenated_forward(self.ref_model, batch)\n\n losses, chosen_rewards, rejected_rewards = self.dpo_loss(\n policy_chosen_logps,\n policy_rejected_logps,\n reference_chosen_logps,\n reference_rejected_logps,\n )\n reward_accuracies = (chosen_rewards > rejected_rewards).float()\n\n metrics[f\"rewards_{train_test}/chosen\"] = chosen_rewards.cpu().numpy().mean()\n metrics[f\"rewards_{train_test}/rejected\"] = rejected_rewards.cpu().numpy().mean()\n metrics[f\"rewards_{train_test}/accuracies\"] = reward_accuracies.cpu().numpy().mean()\n metrics[f\"rewards_{train_test}/margins\"] = (chosen_rewards - rejected_rewards).cpu().numpy().mean()\n metrics[f\"logps_{train_test}/rejected\"] = policy_rejected_logps.detach().cpu().numpy().mean()\n metrics[f\"logps_{train_test}/chosen\"] = policy_chosen_logps.detach().cpu().numpy().mean()\n\n metrics[f\"logits_{train_test}/rejected\"] = policy_rejected_logits.detach().cpu().numpy().mean()\n metrics[f\"logits_{train_test}/chosen\"] = policy_chosen_logits.detach().cpu().numpy().mean()\n\n metrics[f\"loss/{train_test}\"] = losses.detach().cpu().numpy().mean()\n\n return losses.mean(), metrics\n\n def compute_loss(\n self,\n model: Union[PreTrainedModel, nn.Module],\n inputs: Dict[str, Union[torch.Tensor, Any]],\n return_outputs=False,\n ) -> Union[torch.Tensor, Tuple[torch.Tensor, Dict[str, torch.Tensor]]]:\n if not self.use_dpo_data_collator:\n raise NotImplementedError(\n \"compute_loss is only implemented for DPODataCollatorWithPadding, please implement your own compute_loss method if you are using a custom data collator\"\n )\n loss, metrics = self.get_batch_metrics(model, inputs, train_test=\"train\")\n\n # force log the metrics\n if self.accelerator.is_main_process:\n self.log_metrics(\"train\", metrics)\n\n if return_outputs:\n return (loss, metrics)\n return loss\n\n def get_batch_samples(self, model, batch: Dict[str, torch.LongTensor]) -> Tuple[str, str]:\n \"\"\"Generate samples from the model and reference model for the given batch of inputs.\"\"\"\n\n policy_output = model.generate(\n batch[\"prompt_input_ids\"],\n attention_mask=batch[\"prompt_attention_mask\"],\n max_length=self.config.max_length,\n do_sample=True,\n pad_token_id=self.tokenizer.pad_token_id,\n )\n\n reference_output = self.ref_model.generate(\n batch[\"prompt_input_ids\"],\n attention_mask=batch[\"prompt_attention_mask\"],\n max_length=self.config.max_length,\n do_sample=True,\n pad_token_id=self.tokenizer.pad_token_id,\n )\n\n policy_output = pad_to_length(policy_output, self.config.max_length, self.tokenizer.pad_token_id)\n policy_output_decoded = self.tokenizer.batch_decode(policy_output, skip_special_tokens=True)\n\n reference_output = pad_to_length(reference_output, self.config.max_length, self.tokenizer.pad_token_id)\n reference_output_decoded = self.tokenizer.batch_decode(reference_output, skip_special_tokens=True)\n\n return policy_output_decoded, reference_output_decoded\n\n def prediction_step(\n self,\n model: Union[PreTrainedModel, nn.Module],\n inputs: Dict[str, Union[torch.Tensor, Any]],\n prediction_loss_only: bool,\n ignore_keys: Optional[List[str]] = None,\n ):\n if not self.use_dpo_data_collator:\n raise NotImplementedError(\n \"prediction_step is only implemented for DPODataCollatorWithPadding, please implement your own prediction_step method if you are using a custom data collator\"\n )\n if ignore_keys is None:\n if hasattr(model, \"config\"):\n ignore_keys = getattr(model.config, \"keys_to_ignore_at_inference\", [])\n else:\n ignore_keys = []\n\n with torch.no_grad():\n loss, metrics = self.get_batch_metrics(model, inputs, train_test=\"test\")\n\n # force log the metrics\n if self.accelerator.is_main_process:\n self.log_metrics(\"test\", metrics)\n\n if prediction_loss_only:\n return (loss.detach(), None, None)\n\n # logits for the chosen and rejected samples from model\n logits_dict = {\n \"logits_test/chosen\": metrics[\"logits_test/chosen\"],\n \"logits_test/rejected\": metrics[\"logits_test/rejected\"],\n }\n logits = tuple(v for k, v in logits_dict.items() if k not in ignore_keys)\n logits = torch.stack(logits).mean(axis=1)\n labels = torch.zeros(logits.shape[0])\n\n return (loss.detach(), logits, labels)\n",
"path": "trl/trainer/dpo_trainer.py"
}
] | diff --git a/trl/trainer/dpo_trainer.py b/trl/trainer/dpo_trainer.py
index 483c9f3cd5..6218fc7512 100644
--- a/trl/trainer/dpo_trainer.py
+++ b/trl/trainer/dpo_trainer.py
@@ -204,7 +204,7 @@ def concatenated_inputs(self, batch: Dict[str, Union[List, torch.LongTensor]]) -
pad_to_length(batch[k], max_length, pad_value=pad_value),
),
dim=0,
- )
+ ).to(self.accelerator.device)
return concatenated_batch
def dpo_loss(
|
python-poetry__poetry-794 | Support customizable POETRY_HOME
It would be nice to define where poetry gets installed (via get-poetry.py).
By reading the docstring I had assumed it would work in $POETRY_HOME, but that was quickly disproven.
Ideally this could be defined via an environment variable (POETRY_HOME) or via a flag to get-poetry.py.
| [
{
"content": "\"\"\"\nThis script will install poetry and its dependencies\nin isolation from the rest of the system.\n\nIt does, in order:\n\n - Downloads the latest stable (or pre-release) version of poetry.\n - Downloads all its dependencies in the poetry/_vendor directory.\n - Copies it and all extra files in $POETRY_HOME.\n - Updates the PATH in a system-specific way.\n\nThere will be a `poetry` script that will be installed in $POETRY_HOME/bin\nwhich will act as the poetry command but is slightly different in the sense\nthat it will use the current Python installation.\n\nWhat this means is that one Poetry installation can serve for multiple\nPython versions.\n\"\"\"\nimport argparse\nimport hashlib\nimport json\nimport os\nimport platform\nimport re\nimport shutil\nimport stat\nimport subprocess\nimport sys\nimport tarfile\nimport tempfile\n\nfrom contextlib import closing\nfrom contextlib import contextmanager\nfrom functools import cmp_to_key\nfrom gzip import GzipFile\nfrom io import UnsupportedOperation, open\n\ntry:\n from urllib.error import HTTPError\n from urllib.request import Request\n from urllib.request import urlopen\nexcept ImportError:\n from urllib2 import HTTPError\n from urllib2 import Request\n from urllib2 import urlopen\n\ntry:\n input = raw_input\nexcept NameError:\n pass\n\n\ntry:\n try:\n import winreg\n except ImportError:\n import _winreg as winreg\nexcept ImportError:\n winreg = None\n\ntry:\n u = unicode\nexcept NameError:\n u = str\n\nWINDOWS = sys.platform.startswith(\"win\") or (sys.platform == \"cli\" and os.name == \"nt\")\n\n\nFOREGROUND_COLORS = {\n \"black\": 30,\n \"red\": 31,\n \"green\": 32,\n \"yellow\": 33,\n \"blue\": 34,\n \"magenta\": 35,\n \"cyan\": 36,\n \"white\": 37,\n}\n\nBACKGROUND_COLORS = {\n \"black\": 40,\n \"red\": 41,\n \"green\": 42,\n \"yellow\": 43,\n \"blue\": 44,\n \"magenta\": 45,\n \"cyan\": 46,\n \"white\": 47,\n}\n\nOPTIONS = {\"bold\": 1, \"underscore\": 4, \"blink\": 5, \"reverse\": 7, \"conceal\": 8}\n\n\ndef style(fg, bg, options):\n codes = []\n\n if fg:\n codes.append(FOREGROUND_COLORS[fg])\n\n if bg:\n codes.append(BACKGROUND_COLORS[bg])\n\n if options:\n if not isinstance(options, (list, tuple)):\n options = [options]\n\n for option in options:\n codes.append(OPTIONS[option])\n\n return \"\\033[{}m\".format(\";\".join(map(str, codes)))\n\n\nSTYLES = {\n \"info\": style(\"green\", None, None),\n \"comment\": style(\"yellow\", None, None),\n \"error\": style(\"red\", None, None),\n \"warning\": style(\"yellow\", None, None),\n}\n\n\ndef is_decorated():\n if platform.system().lower() == \"windows\":\n return (\n os.getenv(\"ANSICON\") is not None\n or \"ON\" == os.getenv(\"ConEmuANSI\")\n or \"xterm\" == os.getenv(\"Term\")\n )\n\n if not hasattr(sys.stdout, \"fileno\"):\n return False\n\n try:\n return os.isatty(sys.stdout.fileno())\n except UnsupportedOperation:\n return False\n\n\ndef is_interactive():\n if not hasattr(sys.stdin, \"fileno\"):\n return False\n\n try:\n return os.isatty(sys.stdin.fileno())\n except UnsupportedOperation:\n return False\n\n\ndef colorize(style, text):\n if not is_decorated():\n return text\n\n return \"{}{}\\033[0m\".format(STYLES[style], text)\n\n\n@contextmanager\ndef temporary_directory(*args, **kwargs):\n try:\n from tempfile import TemporaryDirectory\n\n with TemporaryDirectory(*args, **kwargs) as name:\n yield name\n except ImportError:\n name = tempfile.mkdtemp(*args, **kwargs)\n\n yield name\n\n shutil.rmtree(name)\n\n\ndef string_to_bool(value):\n value = value.lower()\n\n return value in {\"true\", \"1\", \"y\", \"yes\"}\n\n\ndef expanduser(path):\n \"\"\"\n Expand ~ and ~user constructions.\n\n Includes a workaround for http://bugs.python.org/issue14768\n \"\"\"\n expanded = os.path.expanduser(path)\n if path.startswith(\"~/\") and expanded.startswith(\"//\"):\n expanded = expanded[1:]\n\n return expanded\n\n\nHOME = expanduser(\"~\")\nPOETRY_HOME = os.path.join(HOME, \".poetry\")\nPOETRY_BIN = os.path.join(POETRY_HOME, \"bin\")\nPOETRY_ENV = os.path.join(POETRY_HOME, \"env\")\nPOETRY_LIB = os.path.join(POETRY_HOME, \"lib\")\nPOETRY_LIB_BACKUP = os.path.join(POETRY_HOME, \"lib-backup\")\n\n\nBIN = \"\"\"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport glob\nimport sys\nimport os\n\nlib = os.path.normpath(os.path.join(os.path.realpath(__file__), \"../..\", \"lib\"))\n\nsys.path.insert(0, lib)\n\nif __name__ == \"__main__\":\n from poetry.console import main\n\n main()\n\"\"\"\n\nBAT = u('@echo off\\r\\npython \"{poetry_bin}\" %*\\r\\n')\n\n\nPRE_MESSAGE = \"\"\"# Welcome to {poetry}!\n\nThis will download and install the latest version of {poetry},\na dependency and package manager for Python.\n\nIt will add the `poetry` command to {poetry}'s bin directory, located at:\n\n{poetry_home_bin}\n\n{platform_msg}\n\nYou can uninstall at any time with `poetry self:uninstall`,\nor by executing this script with the --uninstall option,\nand these changes will be reverted.\n\"\"\"\n\nPRE_UNINSTALL_MESSAGE = \"\"\"# We are sorry to see you go!\n\nThis will uninstall {poetry}.\n\nIt will remove the `poetry` command from {poetry}'s bin directory, located at:\n\n{poetry_home_bin}\n\nThis will also remove {poetry} from your system's PATH.\n\"\"\"\n\n\nPRE_MESSAGE_UNIX = \"\"\"This path will then be added to your `PATH` environment variable by\nmodifying the profile file{plural} located at:\n\n{rcfiles}\"\"\"\n\n\nPRE_MESSAGE_WINDOWS = \"\"\"This path will then be added to your `PATH` environment variable by\nmodifying the `HKEY_CURRENT_USER/Environment/PATH` registry key.\"\"\"\n\nPRE_MESSAGE_NO_MODIFY_PATH = \"\"\"This path needs to be in your `PATH` environment variable,\nbut will not be added automatically.\"\"\"\n\nPOST_MESSAGE_UNIX = \"\"\"{poetry} ({version}) is installed now. Great!\n\nTo get started you need {poetry}'s bin directory ({poetry_home_bin}) in your `PATH`\nenvironment variable. Next time you log in this will be done\nautomatically.\n\nTo configure your current shell run `source {poetry_home_env}`\n\"\"\"\n\nPOST_MESSAGE_WINDOWS = \"\"\"{poetry} ({version}) is installed now. Great!\n\nTo get started you need Poetry's bin directory ({poetry_home_bin}) in your `PATH`\nenvironment variable. Future applications will automatically have the\ncorrect environment, but you may need to restart your current shell.\n\"\"\"\n\nPOST_MESSAGE_UNIX_NO_MODIFY_PATH = \"\"\"{poetry} ({version}) is installed now. Great!\n\nTo get started you need {poetry}'s bin directory ({poetry_home_bin}) in your `PATH`\nenvironment variable.\n\nTo configure your current shell run `source {poetry_home_env}`\n\"\"\"\n\nPOST_MESSAGE_WINDOWS_NO_MODIFY_PATH = \"\"\"{poetry} ({version}) is installed now. Great!\n\nTo get started you need Poetry's bin directory ({poetry_home_bin}) in your `PATH`\nenvironment variable. This has not been done automatically.\n\"\"\"\n\n\nclass Installer:\n\n CURRENT_PYTHON = sys.executable\n CURRENT_PYTHON_VERSION = sys.version_info[:2]\n METADATA_URL = \"https://pypi.org/pypi/poetry/json\"\n VERSION_REGEX = re.compile(\n r\"v?(\\d+)(?:\\.(\\d+))?(?:\\.(\\d+))?(?:\\.(\\d+))?\"\n \"(\"\n \"[._-]?\"\n r\"(?:(stable|beta|b|RC|alpha|a|patch|pl|p)((?:[.-]?\\d+)*)?)?\"\n \"([.-]?dev)?\"\n \")?\"\n r\"(?:\\+[^\\s]+)?\"\n )\n\n BASE_URL = \"https://github.com/sdispater/poetry/releases/download/\"\n\n def __init__(\n self,\n version=None,\n preview=False,\n force=False,\n accept_all=False,\n base_url=BASE_URL,\n ):\n self._version = version\n self._preview = preview\n self._force = force\n self._modify_path = True\n self._accept_all = accept_all\n self._base_url = base_url\n\n def allows_prereleases(self):\n return self._preview\n\n def run(self):\n version, current_version = self.get_version()\n\n if version is None:\n return 0\n\n self.customize_install()\n self.display_pre_message()\n self.ensure_home()\n\n try:\n self.install(version, upgrade=current_version is not None)\n except subprocess.CalledProcessError as e:\n print(colorize(\"error\", \"An error has occured: {}\".format(str(e))))\n print(e.output.decode())\n\n return e.returncode\n\n self.display_post_message(version)\n\n return 0\n\n def uninstall(self):\n self.display_pre_uninstall_message()\n\n if not self.customize_uninstall():\n return\n\n self.remove_home()\n self.remove_from_path()\n\n def get_version(self):\n print(colorize(\"info\", \"Retrieving Poetry metadata\"))\n\n metadata = json.loads(self._get(self.METADATA_URL).decode())\n\n def _compare_versions(x, y):\n mx = self.VERSION_REGEX.match(x)\n my = self.VERSION_REGEX.match(y)\n\n vx = tuple(int(p) for p in mx.groups()[:3]) + (mx.group(5),)\n vy = tuple(int(p) for p in my.groups()[:3]) + (my.group(5),)\n\n if vx < vy:\n return -1\n elif vx > vy:\n return 1\n\n return 0\n\n print(\"\")\n releases = sorted(\n metadata[\"releases\"].keys(), key=cmp_to_key(_compare_versions)\n )\n\n if self._version and self._version not in releases:\n print(colorize(\"error\", \"Version {} does not exist.\".format(self._version)))\n\n return None, None\n\n version = self._version\n if not version:\n for release in reversed(releases):\n m = self.VERSION_REGEX.match(release)\n if m.group(5) and not self.allows_prereleases():\n continue\n\n version = release\n\n break\n\n current_version = None\n if os.path.exists(POETRY_LIB):\n with open(\n os.path.join(POETRY_LIB, \"poetry\", \"__version__.py\"), encoding=\"utf-8\"\n ) as f:\n version_content = f.read()\n\n current_version_re = re.match(\n '(?ms).*__version__ = \"(.+)\".*', version_content\n )\n if not current_version_re:\n print(\n colorize(\n \"warning\",\n \"Unable to get the current Poetry version. Assuming None\",\n )\n )\n else:\n current_version = current_version_re.group(1)\n\n if current_version == version and not self._force:\n print(\"Latest version already installed.\")\n return None, current_version\n\n return version, current_version\n\n def customize_install(self):\n if not self._accept_all:\n print(\"Before we start, please answer the following questions.\")\n print(\"You may simply press the Enter key to leave unchanged.\")\n\n modify_path = input(\"Modify PATH variable? ([y]/n) \") or \"y\"\n if modify_path.lower() in {\"n\", \"no\"}:\n self._modify_path = False\n\n print(\"\")\n\n def customize_uninstall(self):\n if not self._accept_all:\n print()\n\n uninstall = (\n input(\"Are you sure you want to uninstall Poetry? (y/[n]) \") or \"n\"\n )\n if uninstall.lower() not in {\"y\", \"yes\"}:\n return False\n\n print(\"\")\n\n return True\n\n def ensure_home(self):\n \"\"\"\n Ensures that $POETRY_HOME exists or create it.\n \"\"\"\n if not os.path.exists(POETRY_HOME):\n os.mkdir(POETRY_HOME, 0o755)\n\n def remove_home(self):\n \"\"\"\n Removes $POETRY_HOME.\n \"\"\"\n if not os.path.exists(POETRY_HOME):\n return\n\n shutil.rmtree(POETRY_HOME)\n\n def install(self, version, upgrade=False):\n \"\"\"\n Installs Poetry in $POETRY_HOME.\n \"\"\"\n print(\"Installing version: \" + colorize(\"info\", version))\n\n self.make_lib(version)\n self.make_bin()\n self.make_env()\n self.update_path()\n\n return 0\n\n def make_lib(self, version):\n \"\"\"\n Packs everything into a single lib/ directory.\n \"\"\"\n if os.path.exists(POETRY_LIB_BACKUP):\n shutil.rmtree(POETRY_LIB_BACKUP)\n\n # Backup the current installation\n if os.path.exists(POETRY_LIB):\n shutil.copytree(POETRY_LIB, POETRY_LIB_BACKUP)\n shutil.rmtree(POETRY_LIB)\n\n try:\n self._make_lib(version)\n except Exception:\n if not os.path.exists(POETRY_LIB_BACKUP):\n raise\n\n shutil.copytree(POETRY_LIB_BACKUP, POETRY_LIB)\n shutil.rmtree(POETRY_LIB_BACKUP)\n\n raise\n finally:\n if os.path.exists(POETRY_LIB_BACKUP):\n shutil.rmtree(POETRY_LIB_BACKUP)\n\n def _make_lib(self, version):\n # We get the payload from the remote host\n platform = sys.platform\n if platform == \"linux2\":\n platform = \"linux\"\n\n url = self._base_url + \"{}/\".format(version)\n name = \"poetry-{}-{}.tar.gz\".format(version, platform)\n checksum = \"poetry-{}-{}.sha256sum\".format(version, platform)\n\n try:\n r = urlopen(url + \"{}\".format(checksum))\n except HTTPError as e:\n if e.code == 404:\n raise RuntimeError(\"Could not find {} file\".format(checksum))\n\n raise\n\n checksum = r.read().decode()\n\n try:\n r = urlopen(url + \"{}\".format(name))\n except HTTPError as e:\n if e.code == 404:\n raise RuntimeError(\"Could not find {} file\".format(name))\n\n raise\n\n meta = r.info()\n size = int(meta[\"Content-Length\"])\n current = 0\n block_size = 8192\n\n print(\n \" - Downloading {} ({:.2f}MB)\".format(\n colorize(\"comment\", name), size / 1024 / 1024\n )\n )\n\n sha = hashlib.sha256()\n with temporary_directory(prefix=\"poetry-installer-\") as dir_:\n tar = os.path.join(dir_, name)\n with open(tar, \"wb\") as f:\n while True:\n buffer = r.read(block_size)\n if not buffer:\n break\n\n current += len(buffer)\n f.write(buffer)\n sha.update(buffer)\n\n # Checking hashes\n if checksum != sha.hexdigest():\n raise RuntimeError(\n \"Hashes for {} do not match: {} != {}\".format(\n name, checksum, sha.hexdigest()\n )\n )\n\n gz = GzipFile(tar, mode=\"rb\")\n try:\n with tarfile.TarFile(tar, fileobj=gz, format=tarfile.PAX_FORMAT) as f:\n f.extractall(POETRY_LIB)\n finally:\n gz.close()\n\n def make_bin(self):\n if not os.path.exists(POETRY_BIN):\n os.mkdir(POETRY_BIN, 0o755)\n\n if WINDOWS:\n with open(os.path.join(POETRY_BIN, \"poetry.bat\"), \"w\") as f:\n f.write(\n u(\n BAT.format(\n poetry_bin=os.path.join(POETRY_BIN, \"poetry\").replace(\n os.environ[\"USERPROFILE\"], \"%USERPROFILE%\"\n )\n )\n )\n )\n\n with open(os.path.join(POETRY_BIN, \"poetry\"), \"w\", encoding=\"utf-8\") as f:\n f.write(u(BIN))\n\n if not WINDOWS:\n # Making the file executable\n st = os.stat(os.path.join(POETRY_BIN, \"poetry\"))\n os.chmod(os.path.join(POETRY_BIN, \"poetry\"), st.st_mode | stat.S_IEXEC)\n\n def make_env(self):\n if WINDOWS:\n return\n\n with open(os.path.join(POETRY_HOME, \"env\"), \"w\") as f:\n f.write(u(self.get_export_string()))\n\n def update_path(self):\n \"\"\"\n Tries to update the $PATH automatically.\n \"\"\"\n if WINDOWS:\n return self.add_to_windows_path()\n\n # Updating any profile we can on UNIX systems\n export_string = self.get_export_string()\n\n addition = \"\\n{}\\n\".format(export_string)\n\n updated = []\n profiles = self.get_unix_profiles()\n for profile in profiles:\n if not os.path.exists(profile):\n continue\n\n with open(profile, \"r\") as f:\n content = f.read()\n\n if addition not in content:\n with open(profile, \"a\") as f:\n f.write(u(addition))\n\n updated.append(os.path.relpath(profile, HOME))\n\n def add_to_windows_path(self):\n try:\n old_path = self.get_windows_path_var()\n except WindowsError:\n old_path = None\n\n if old_path is None:\n print(\n colorize(\n \"warning\",\n \"Unable to get the PATH value. It will not be updated automatically\",\n )\n )\n self._modify_path = False\n\n return\n\n new_path = POETRY_BIN\n if POETRY_BIN in old_path:\n old_path = old_path.replace(POETRY_BIN + \";\", \"\")\n\n if old_path:\n new_path += \";\"\n new_path += old_path\n\n self.set_windows_path_var(new_path)\n\n def get_windows_path_var(self):\n with winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER) as root:\n with winreg.OpenKey(root, \"Environment\", 0, winreg.KEY_ALL_ACCESS) as key:\n path, _ = winreg.QueryValueEx(key, \"PATH\")\n\n return path\n\n def set_windows_path_var(self, value):\n import ctypes\n\n with winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER) as root:\n with winreg.OpenKey(root, \"Environment\", 0, winreg.KEY_ALL_ACCESS) as key:\n winreg.SetValueEx(key, \"PATH\", 0, winreg.REG_EXPAND_SZ, value)\n\n # Tell other processes to update their environment\n HWND_BROADCAST = 0xFFFF\n WM_SETTINGCHANGE = 0x1A\n\n SMTO_ABORTIFHUNG = 0x0002\n\n result = ctypes.c_long()\n SendMessageTimeoutW = ctypes.windll.user32.SendMessageTimeoutW\n SendMessageTimeoutW(\n HWND_BROADCAST,\n WM_SETTINGCHANGE,\n 0,\n u\"Environment\",\n SMTO_ABORTIFHUNG,\n 5000,\n ctypes.byref(result),\n )\n\n def remove_from_path(self):\n if WINDOWS:\n return self.remove_from_windows_path()\n\n return self.remove_from_unix_path()\n\n def remove_from_windows_path(self):\n path = self.get_windows_path_var()\n\n poetry_path = POETRY_BIN\n if poetry_path in path:\n path = path.replace(POETRY_BIN + \";\", \"\")\n\n if poetry_path in path:\n path = path.replace(POETRY_BIN, \"\")\n\n self.set_windows_path_var(path)\n\n def remove_from_unix_path(self):\n # Updating any profile we can on UNIX systems\n export_string = self.get_export_string()\n\n addition = \"{}\\n\".format(export_string)\n\n profiles = self.get_unix_profiles()\n for profile in profiles:\n if not os.path.exists(profile):\n continue\n\n with open(profile, \"r\") as f:\n content = f.readlines()\n\n if addition not in content:\n continue\n\n new_content = []\n for line in content:\n if line == addition:\n if new_content and not new_content[-1].strip():\n new_content = new_content[:-1]\n\n continue\n\n new_content.append(line)\n\n with open(profile, \"w\") as f:\n f.writelines(new_content)\n\n def get_export_string(self):\n path = POETRY_BIN.replace(os.getenv(\"HOME\", \"\"), \"$HOME\")\n export_string = 'export PATH=\"{}:$PATH\"'.format(path)\n\n return export_string\n\n def get_unix_profiles(self):\n profiles = [os.path.join(HOME, \".profile\")]\n\n shell = os.getenv(\"SHELL\", \"\")\n if \"zsh\" in shell:\n zdotdir = os.getenv(\"ZDOTDIR\", HOME)\n profiles.append(os.path.join(zdotdir, \".zprofile\"))\n\n bash_profile = os.path.join(HOME, \".bash_profile\")\n if os.path.exists(bash_profile):\n profiles.append(bash_profile)\n\n return profiles\n\n def display_pre_message(self):\n if WINDOWS:\n home = POETRY_BIN.replace(os.getenv(\"USERPROFILE\", \"\"), \"%USERPROFILE%\")\n else:\n home = POETRY_BIN.replace(os.getenv(\"HOME\", \"\"), \"$HOME\")\n\n kwargs = {\n \"poetry\": colorize(\"info\", \"Poetry\"),\n \"poetry_home_bin\": colorize(\"comment\", home),\n }\n\n if not self._modify_path:\n kwargs[\"platform_msg\"] = PRE_MESSAGE_NO_MODIFY_PATH\n else:\n if WINDOWS:\n kwargs[\"platform_msg\"] = PRE_MESSAGE_WINDOWS\n else:\n profiles = [\n colorize(\"comment\", p.replace(os.getenv(\"HOME\", \"\"), \"$HOME\"))\n for p in self.get_unix_profiles()\n ]\n kwargs[\"platform_msg\"] = PRE_MESSAGE_UNIX.format(\n rcfiles=\"\\n\".join(profiles), plural=\"s\" if len(profiles) > 1 else \"\"\n )\n\n print(PRE_MESSAGE.format(**kwargs))\n\n def display_pre_uninstall_message(self):\n home_bin = POETRY_BIN\n if WINDOWS:\n home_bin = home_bin.replace(os.getenv(\"USERPROFILE\", \"\"), \"%USERPROFILE%\")\n else:\n home_bin = home_bin.replace(os.getenv(\"HOME\", \"\"), \"$HOME\")\n\n kwargs = {\n \"poetry\": colorize(\"info\", \"Poetry\"),\n \"poetry_home_bin\": colorize(\"comment\", home_bin),\n }\n\n print(PRE_UNINSTALL_MESSAGE.format(**kwargs))\n\n def display_post_message(self, version):\n print(\"\")\n\n kwargs = {\n \"poetry\": colorize(\"info\", \"Poetry\"),\n \"version\": colorize(\"comment\", version),\n }\n\n if WINDOWS:\n message = POST_MESSAGE_WINDOWS\n if not self._modify_path:\n message = POST_MESSAGE_WINDOWS_NO_MODIFY_PATH\n\n poetry_home_bin = POETRY_BIN.replace(\n os.getenv(\"USERPROFILE\", \"\"), \"%USERPROFILE%\"\n )\n else:\n message = POST_MESSAGE_UNIX\n if not self._modify_path:\n message = POST_MESSAGE_UNIX_NO_MODIFY_PATH\n\n poetry_home_bin = POETRY_BIN.replace(os.getenv(\"HOME\", \"\"), \"$HOME\")\n kwargs[\"poetry_home_env\"] = colorize(\n \"comment\", POETRY_ENV.replace(os.getenv(\"HOME\", \"\"), \"$HOME\")\n )\n\n kwargs[\"poetry_home_bin\"] = colorize(\"comment\", poetry_home_bin)\n\n print(message.format(**kwargs))\n\n def call(self, *args):\n return subprocess.check_output(args, stderr=subprocess.STDOUT)\n\n def _get(self, url):\n request = Request(url, headers={\"User-Agent\": \"Python Poetry\"})\n\n with closing(urlopen(request)) as r:\n return r.read()\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Installs the latest (or given) version of poetry\"\n )\n parser.add_argument(\n \"-p\", \"--preview\", dest=\"preview\", action=\"store_true\", default=False\n )\n parser.add_argument(\"--version\", dest=\"version\")\n parser.add_argument(\n \"-f\", \"--force\", dest=\"force\", action=\"store_true\", default=False\n )\n parser.add_argument(\n \"-y\", \"--yes\", dest=\"accept_all\", action=\"store_true\", default=False\n )\n parser.add_argument(\n \"--uninstall\", dest=\"uninstall\", action=\"store_true\", default=False\n )\n\n args = parser.parse_args()\n\n installer = Installer(\n version=args.version or os.getenv(\"POETRY_VERSION\"),\n preview=args.preview or string_to_bool(os.getenv(\"POETRY_PREVIEW\", \"0\")),\n force=args.force,\n accept_all=args.accept_all\n or string_to_bool(os.getenv(\"POETRY_ACCEPT\", \"0\"))\n or not is_interactive(),\n )\n\n if args.uninstall or string_to_bool(os.getenv(\"POETRY_UNINSTALL\", \"0\")):\n return installer.uninstall()\n\n return installer.run()\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n",
"path": "get-poetry.py"
}
] | [
{
"content": "\"\"\"\nThis script will install poetry and its dependencies\nin isolation from the rest of the system.\n\nIt does, in order:\n\n - Downloads the latest stable (or pre-release) version of poetry.\n - Downloads all its dependencies in the poetry/_vendor directory.\n - Copies it and all extra files in $POETRY_HOME.\n - Updates the PATH in a system-specific way.\n\nThere will be a `poetry` script that will be installed in $POETRY_HOME/bin\nwhich will act as the poetry command but is slightly different in the sense\nthat it will use the current Python installation.\n\nWhat this means is that one Poetry installation can serve for multiple\nPython versions.\n\"\"\"\nimport argparse\nimport hashlib\nimport json\nimport os\nimport platform\nimport re\nimport shutil\nimport stat\nimport subprocess\nimport sys\nimport tarfile\nimport tempfile\n\nfrom contextlib import closing\nfrom contextlib import contextmanager\nfrom functools import cmp_to_key\nfrom gzip import GzipFile\nfrom io import UnsupportedOperation, open\n\ntry:\n from urllib.error import HTTPError\n from urllib.request import Request\n from urllib.request import urlopen\nexcept ImportError:\n from urllib2 import HTTPError\n from urllib2 import Request\n from urllib2 import urlopen\n\ntry:\n input = raw_input\nexcept NameError:\n pass\n\n\ntry:\n try:\n import winreg\n except ImportError:\n import _winreg as winreg\nexcept ImportError:\n winreg = None\n\ntry:\n u = unicode\nexcept NameError:\n u = str\n\nWINDOWS = sys.platform.startswith(\"win\") or (sys.platform == \"cli\" and os.name == \"nt\")\n\n\nFOREGROUND_COLORS = {\n \"black\": 30,\n \"red\": 31,\n \"green\": 32,\n \"yellow\": 33,\n \"blue\": 34,\n \"magenta\": 35,\n \"cyan\": 36,\n \"white\": 37,\n}\n\nBACKGROUND_COLORS = {\n \"black\": 40,\n \"red\": 41,\n \"green\": 42,\n \"yellow\": 43,\n \"blue\": 44,\n \"magenta\": 45,\n \"cyan\": 46,\n \"white\": 47,\n}\n\nOPTIONS = {\"bold\": 1, \"underscore\": 4, \"blink\": 5, \"reverse\": 7, \"conceal\": 8}\n\n\ndef style(fg, bg, options):\n codes = []\n\n if fg:\n codes.append(FOREGROUND_COLORS[fg])\n\n if bg:\n codes.append(BACKGROUND_COLORS[bg])\n\n if options:\n if not isinstance(options, (list, tuple)):\n options = [options]\n\n for option in options:\n codes.append(OPTIONS[option])\n\n return \"\\033[{}m\".format(\";\".join(map(str, codes)))\n\n\nSTYLES = {\n \"info\": style(\"green\", None, None),\n \"comment\": style(\"yellow\", None, None),\n \"error\": style(\"red\", None, None),\n \"warning\": style(\"yellow\", None, None),\n}\n\n\ndef is_decorated():\n if platform.system().lower() == \"windows\":\n return (\n os.getenv(\"ANSICON\") is not None\n or \"ON\" == os.getenv(\"ConEmuANSI\")\n or \"xterm\" == os.getenv(\"Term\")\n )\n\n if not hasattr(sys.stdout, \"fileno\"):\n return False\n\n try:\n return os.isatty(sys.stdout.fileno())\n except UnsupportedOperation:\n return False\n\n\ndef is_interactive():\n if not hasattr(sys.stdin, \"fileno\"):\n return False\n\n try:\n return os.isatty(sys.stdin.fileno())\n except UnsupportedOperation:\n return False\n\n\ndef colorize(style, text):\n if not is_decorated():\n return text\n\n return \"{}{}\\033[0m\".format(STYLES[style], text)\n\n\n@contextmanager\ndef temporary_directory(*args, **kwargs):\n try:\n from tempfile import TemporaryDirectory\n\n with TemporaryDirectory(*args, **kwargs) as name:\n yield name\n except ImportError:\n name = tempfile.mkdtemp(*args, **kwargs)\n\n yield name\n\n shutil.rmtree(name)\n\n\ndef string_to_bool(value):\n value = value.lower()\n\n return value in {\"true\", \"1\", \"y\", \"yes\"}\n\n\ndef expanduser(path):\n \"\"\"\n Expand ~ and ~user constructions.\n\n Includes a workaround for http://bugs.python.org/issue14768\n \"\"\"\n expanded = os.path.expanduser(path)\n if path.startswith(\"~/\") and expanded.startswith(\"//\"):\n expanded = expanded[1:]\n\n return expanded\n\n\nHOME = expanduser(\"~\")\nPOETRY_HOME = os.environ.get(\"POETRY_HOME\") or os.path.join(HOME, \".poetry\")\nPOETRY_BIN = os.path.join(POETRY_HOME, \"bin\")\nPOETRY_ENV = os.path.join(POETRY_HOME, \"env\")\nPOETRY_LIB = os.path.join(POETRY_HOME, \"lib\")\nPOETRY_LIB_BACKUP = os.path.join(POETRY_HOME, \"lib-backup\")\n\n\nBIN = \"\"\"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport glob\nimport sys\nimport os\n\nlib = os.path.normpath(os.path.join(os.path.realpath(__file__), \"../..\", \"lib\"))\n\nsys.path.insert(0, lib)\n\nif __name__ == \"__main__\":\n from poetry.console import main\n\n main()\n\"\"\"\n\nBAT = u('@echo off\\r\\npython \"{poetry_bin}\" %*\\r\\n')\n\n\nPRE_MESSAGE = \"\"\"# Welcome to {poetry}!\n\nThis will download and install the latest version of {poetry},\na dependency and package manager for Python.\n\nIt will add the `poetry` command to {poetry}'s bin directory, located at:\n\n{poetry_home_bin}\n\n{platform_msg}\n\nYou can uninstall at any time with `poetry self:uninstall`,\nor by executing this script with the --uninstall option,\nand these changes will be reverted.\n\"\"\"\n\nPRE_UNINSTALL_MESSAGE = \"\"\"# We are sorry to see you go!\n\nThis will uninstall {poetry}.\n\nIt will remove the `poetry` command from {poetry}'s bin directory, located at:\n\n{poetry_home_bin}\n\nThis will also remove {poetry} from your system's PATH.\n\"\"\"\n\n\nPRE_MESSAGE_UNIX = \"\"\"This path will then be added to your `PATH` environment variable by\nmodifying the profile file{plural} located at:\n\n{rcfiles}\"\"\"\n\n\nPRE_MESSAGE_WINDOWS = \"\"\"This path will then be added to your `PATH` environment variable by\nmodifying the `HKEY_CURRENT_USER/Environment/PATH` registry key.\"\"\"\n\nPRE_MESSAGE_NO_MODIFY_PATH = \"\"\"This path needs to be in your `PATH` environment variable,\nbut will not be added automatically.\"\"\"\n\nPOST_MESSAGE_UNIX = \"\"\"{poetry} ({version}) is installed now. Great!\n\nTo get started you need {poetry}'s bin directory ({poetry_home_bin}) in your `PATH`\nenvironment variable. Next time you log in this will be done\nautomatically.\n\nTo configure your current shell run `source {poetry_home_env}`\n\"\"\"\n\nPOST_MESSAGE_WINDOWS = \"\"\"{poetry} ({version}) is installed now. Great!\n\nTo get started you need Poetry's bin directory ({poetry_home_bin}) in your `PATH`\nenvironment variable. Future applications will automatically have the\ncorrect environment, but you may need to restart your current shell.\n\"\"\"\n\nPOST_MESSAGE_UNIX_NO_MODIFY_PATH = \"\"\"{poetry} ({version}) is installed now. Great!\n\nTo get started you need {poetry}'s bin directory ({poetry_home_bin}) in your `PATH`\nenvironment variable.\n\nTo configure your current shell run `source {poetry_home_env}`\n\"\"\"\n\nPOST_MESSAGE_WINDOWS_NO_MODIFY_PATH = \"\"\"{poetry} ({version}) is installed now. Great!\n\nTo get started you need Poetry's bin directory ({poetry_home_bin}) in your `PATH`\nenvironment variable. This has not been done automatically.\n\"\"\"\n\n\nclass Installer:\n\n CURRENT_PYTHON = sys.executable\n CURRENT_PYTHON_VERSION = sys.version_info[:2]\n METADATA_URL = \"https://pypi.org/pypi/poetry/json\"\n VERSION_REGEX = re.compile(\n r\"v?(\\d+)(?:\\.(\\d+))?(?:\\.(\\d+))?(?:\\.(\\d+))?\"\n \"(\"\n \"[._-]?\"\n r\"(?:(stable|beta|b|RC|alpha|a|patch|pl|p)((?:[.-]?\\d+)*)?)?\"\n \"([.-]?dev)?\"\n \")?\"\n r\"(?:\\+[^\\s]+)?\"\n )\n\n BASE_URL = \"https://github.com/sdispater/poetry/releases/download/\"\n\n def __init__(\n self,\n version=None,\n preview=False,\n force=False,\n accept_all=False,\n base_url=BASE_URL,\n ):\n self._version = version\n self._preview = preview\n self._force = force\n self._modify_path = True\n self._accept_all = accept_all\n self._base_url = base_url\n\n def allows_prereleases(self):\n return self._preview\n\n def run(self):\n version, current_version = self.get_version()\n\n if version is None:\n return 0\n\n self.customize_install()\n self.display_pre_message()\n self.ensure_home()\n\n try:\n self.install(version, upgrade=current_version is not None)\n except subprocess.CalledProcessError as e:\n print(colorize(\"error\", \"An error has occured: {}\".format(str(e))))\n print(e.output.decode())\n\n return e.returncode\n\n self.display_post_message(version)\n\n return 0\n\n def uninstall(self):\n self.display_pre_uninstall_message()\n\n if not self.customize_uninstall():\n return\n\n self.remove_home()\n self.remove_from_path()\n\n def get_version(self):\n print(colorize(\"info\", \"Retrieving Poetry metadata\"))\n\n metadata = json.loads(self._get(self.METADATA_URL).decode())\n\n def _compare_versions(x, y):\n mx = self.VERSION_REGEX.match(x)\n my = self.VERSION_REGEX.match(y)\n\n vx = tuple(int(p) for p in mx.groups()[:3]) + (mx.group(5),)\n vy = tuple(int(p) for p in my.groups()[:3]) + (my.group(5),)\n\n if vx < vy:\n return -1\n elif vx > vy:\n return 1\n\n return 0\n\n print(\"\")\n releases = sorted(\n metadata[\"releases\"].keys(), key=cmp_to_key(_compare_versions)\n )\n\n if self._version and self._version not in releases:\n print(colorize(\"error\", \"Version {} does not exist.\".format(self._version)))\n\n return None, None\n\n version = self._version\n if not version:\n for release in reversed(releases):\n m = self.VERSION_REGEX.match(release)\n if m.group(5) and not self.allows_prereleases():\n continue\n\n version = release\n\n break\n\n current_version = None\n if os.path.exists(POETRY_LIB):\n with open(\n os.path.join(POETRY_LIB, \"poetry\", \"__version__.py\"), encoding=\"utf-8\"\n ) as f:\n version_content = f.read()\n\n current_version_re = re.match(\n '(?ms).*__version__ = \"(.+)\".*', version_content\n )\n if not current_version_re:\n print(\n colorize(\n \"warning\",\n \"Unable to get the current Poetry version. Assuming None\",\n )\n )\n else:\n current_version = current_version_re.group(1)\n\n if current_version == version and not self._force:\n print(\"Latest version already installed.\")\n return None, current_version\n\n return version, current_version\n\n def customize_install(self):\n if not self._accept_all:\n print(\"Before we start, please answer the following questions.\")\n print(\"You may simply press the Enter key to leave unchanged.\")\n\n modify_path = input(\"Modify PATH variable? ([y]/n) \") or \"y\"\n if modify_path.lower() in {\"n\", \"no\"}:\n self._modify_path = False\n\n print(\"\")\n\n def customize_uninstall(self):\n if not self._accept_all:\n print()\n\n uninstall = (\n input(\"Are you sure you want to uninstall Poetry? (y/[n]) \") or \"n\"\n )\n if uninstall.lower() not in {\"y\", \"yes\"}:\n return False\n\n print(\"\")\n\n return True\n\n def ensure_home(self):\n \"\"\"\n Ensures that $POETRY_HOME exists or create it.\n \"\"\"\n if not os.path.exists(POETRY_HOME):\n os.mkdir(POETRY_HOME, 0o755)\n\n def remove_home(self):\n \"\"\"\n Removes $POETRY_HOME.\n \"\"\"\n if not os.path.exists(POETRY_HOME):\n return\n\n shutil.rmtree(POETRY_HOME)\n\n def install(self, version, upgrade=False):\n \"\"\"\n Installs Poetry in $POETRY_HOME.\n \"\"\"\n print(\"Installing version: \" + colorize(\"info\", version))\n\n self.make_lib(version)\n self.make_bin()\n self.make_env()\n self.update_path()\n\n return 0\n\n def make_lib(self, version):\n \"\"\"\n Packs everything into a single lib/ directory.\n \"\"\"\n if os.path.exists(POETRY_LIB_BACKUP):\n shutil.rmtree(POETRY_LIB_BACKUP)\n\n # Backup the current installation\n if os.path.exists(POETRY_LIB):\n shutil.copytree(POETRY_LIB, POETRY_LIB_BACKUP)\n shutil.rmtree(POETRY_LIB)\n\n try:\n self._make_lib(version)\n except Exception:\n if not os.path.exists(POETRY_LIB_BACKUP):\n raise\n\n shutil.copytree(POETRY_LIB_BACKUP, POETRY_LIB)\n shutil.rmtree(POETRY_LIB_BACKUP)\n\n raise\n finally:\n if os.path.exists(POETRY_LIB_BACKUP):\n shutil.rmtree(POETRY_LIB_BACKUP)\n\n def _make_lib(self, version):\n # We get the payload from the remote host\n platform = sys.platform\n if platform == \"linux2\":\n platform = \"linux\"\n\n url = self._base_url + \"{}/\".format(version)\n name = \"poetry-{}-{}.tar.gz\".format(version, platform)\n checksum = \"poetry-{}-{}.sha256sum\".format(version, platform)\n\n try:\n r = urlopen(url + \"{}\".format(checksum))\n except HTTPError as e:\n if e.code == 404:\n raise RuntimeError(\"Could not find {} file\".format(checksum))\n\n raise\n\n checksum = r.read().decode()\n\n try:\n r = urlopen(url + \"{}\".format(name))\n except HTTPError as e:\n if e.code == 404:\n raise RuntimeError(\"Could not find {} file\".format(name))\n\n raise\n\n meta = r.info()\n size = int(meta[\"Content-Length\"])\n current = 0\n block_size = 8192\n\n print(\n \" - Downloading {} ({:.2f}MB)\".format(\n colorize(\"comment\", name), size / 1024 / 1024\n )\n )\n\n sha = hashlib.sha256()\n with temporary_directory(prefix=\"poetry-installer-\") as dir_:\n tar = os.path.join(dir_, name)\n with open(tar, \"wb\") as f:\n while True:\n buffer = r.read(block_size)\n if not buffer:\n break\n\n current += len(buffer)\n f.write(buffer)\n sha.update(buffer)\n\n # Checking hashes\n if checksum != sha.hexdigest():\n raise RuntimeError(\n \"Hashes for {} do not match: {} != {}\".format(\n name, checksum, sha.hexdigest()\n )\n )\n\n gz = GzipFile(tar, mode=\"rb\")\n try:\n with tarfile.TarFile(tar, fileobj=gz, format=tarfile.PAX_FORMAT) as f:\n f.extractall(POETRY_LIB)\n finally:\n gz.close()\n\n def make_bin(self):\n if not os.path.exists(POETRY_BIN):\n os.mkdir(POETRY_BIN, 0o755)\n\n if WINDOWS:\n with open(os.path.join(POETRY_BIN, \"poetry.bat\"), \"w\") as f:\n f.write(\n u(\n BAT.format(\n poetry_bin=os.path.join(POETRY_BIN, \"poetry\").replace(\n os.environ[\"USERPROFILE\"], \"%USERPROFILE%\"\n )\n )\n )\n )\n\n with open(os.path.join(POETRY_BIN, \"poetry\"), \"w\", encoding=\"utf-8\") as f:\n f.write(u(BIN))\n\n if not WINDOWS:\n # Making the file executable\n st = os.stat(os.path.join(POETRY_BIN, \"poetry\"))\n os.chmod(os.path.join(POETRY_BIN, \"poetry\"), st.st_mode | stat.S_IEXEC)\n\n def make_env(self):\n if WINDOWS:\n return\n\n with open(os.path.join(POETRY_HOME, \"env\"), \"w\") as f:\n f.write(u(self.get_export_string()))\n\n def update_path(self):\n \"\"\"\n Tries to update the $PATH automatically.\n \"\"\"\n if WINDOWS:\n return self.add_to_windows_path()\n\n # Updating any profile we can on UNIX systems\n export_string = self.get_export_string()\n\n addition = \"\\n{}\\n\".format(export_string)\n\n updated = []\n profiles = self.get_unix_profiles()\n for profile in profiles:\n if not os.path.exists(profile):\n continue\n\n with open(profile, \"r\") as f:\n content = f.read()\n\n if addition not in content:\n with open(profile, \"a\") as f:\n f.write(u(addition))\n\n updated.append(os.path.relpath(profile, HOME))\n\n def add_to_windows_path(self):\n try:\n old_path = self.get_windows_path_var()\n except WindowsError:\n old_path = None\n\n if old_path is None:\n print(\n colorize(\n \"warning\",\n \"Unable to get the PATH value. It will not be updated automatically\",\n )\n )\n self._modify_path = False\n\n return\n\n new_path = POETRY_BIN\n if POETRY_BIN in old_path:\n old_path = old_path.replace(POETRY_BIN + \";\", \"\")\n\n if old_path:\n new_path += \";\"\n new_path += old_path\n\n self.set_windows_path_var(new_path)\n\n def get_windows_path_var(self):\n with winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER) as root:\n with winreg.OpenKey(root, \"Environment\", 0, winreg.KEY_ALL_ACCESS) as key:\n path, _ = winreg.QueryValueEx(key, \"PATH\")\n\n return path\n\n def set_windows_path_var(self, value):\n import ctypes\n\n with winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER) as root:\n with winreg.OpenKey(root, \"Environment\", 0, winreg.KEY_ALL_ACCESS) as key:\n winreg.SetValueEx(key, \"PATH\", 0, winreg.REG_EXPAND_SZ, value)\n\n # Tell other processes to update their environment\n HWND_BROADCAST = 0xFFFF\n WM_SETTINGCHANGE = 0x1A\n\n SMTO_ABORTIFHUNG = 0x0002\n\n result = ctypes.c_long()\n SendMessageTimeoutW = ctypes.windll.user32.SendMessageTimeoutW\n SendMessageTimeoutW(\n HWND_BROADCAST,\n WM_SETTINGCHANGE,\n 0,\n u\"Environment\",\n SMTO_ABORTIFHUNG,\n 5000,\n ctypes.byref(result),\n )\n\n def remove_from_path(self):\n if WINDOWS:\n return self.remove_from_windows_path()\n\n return self.remove_from_unix_path()\n\n def remove_from_windows_path(self):\n path = self.get_windows_path_var()\n\n poetry_path = POETRY_BIN\n if poetry_path in path:\n path = path.replace(POETRY_BIN + \";\", \"\")\n\n if poetry_path in path:\n path = path.replace(POETRY_BIN, \"\")\n\n self.set_windows_path_var(path)\n\n def remove_from_unix_path(self):\n # Updating any profile we can on UNIX systems\n export_string = self.get_export_string()\n\n addition = \"{}\\n\".format(export_string)\n\n profiles = self.get_unix_profiles()\n for profile in profiles:\n if not os.path.exists(profile):\n continue\n\n with open(profile, \"r\") as f:\n content = f.readlines()\n\n if addition not in content:\n continue\n\n new_content = []\n for line in content:\n if line == addition:\n if new_content and not new_content[-1].strip():\n new_content = new_content[:-1]\n\n continue\n\n new_content.append(line)\n\n with open(profile, \"w\") as f:\n f.writelines(new_content)\n\n def get_export_string(self):\n path = POETRY_BIN.replace(os.getenv(\"HOME\", \"\"), \"$HOME\")\n export_string = 'export PATH=\"{}:$PATH\"'.format(path)\n\n return export_string\n\n def get_unix_profiles(self):\n profiles = [os.path.join(HOME, \".profile\")]\n\n shell = os.getenv(\"SHELL\", \"\")\n if \"zsh\" in shell:\n zdotdir = os.getenv(\"ZDOTDIR\", HOME)\n profiles.append(os.path.join(zdotdir, \".zprofile\"))\n\n bash_profile = os.path.join(HOME, \".bash_profile\")\n if os.path.exists(bash_profile):\n profiles.append(bash_profile)\n\n return profiles\n\n def display_pre_message(self):\n if WINDOWS:\n home = POETRY_BIN.replace(os.getenv(\"USERPROFILE\", \"\"), \"%USERPROFILE%\")\n else:\n home = POETRY_BIN.replace(os.getenv(\"HOME\", \"\"), \"$HOME\")\n\n kwargs = {\n \"poetry\": colorize(\"info\", \"Poetry\"),\n \"poetry_home_bin\": colorize(\"comment\", home),\n }\n\n if not self._modify_path:\n kwargs[\"platform_msg\"] = PRE_MESSAGE_NO_MODIFY_PATH\n else:\n if WINDOWS:\n kwargs[\"platform_msg\"] = PRE_MESSAGE_WINDOWS\n else:\n profiles = [\n colorize(\"comment\", p.replace(os.getenv(\"HOME\", \"\"), \"$HOME\"))\n for p in self.get_unix_profiles()\n ]\n kwargs[\"platform_msg\"] = PRE_MESSAGE_UNIX.format(\n rcfiles=\"\\n\".join(profiles), plural=\"s\" if len(profiles) > 1 else \"\"\n )\n\n print(PRE_MESSAGE.format(**kwargs))\n\n def display_pre_uninstall_message(self):\n home_bin = POETRY_BIN\n if WINDOWS:\n home_bin = home_bin.replace(os.getenv(\"USERPROFILE\", \"\"), \"%USERPROFILE%\")\n else:\n home_bin = home_bin.replace(os.getenv(\"HOME\", \"\"), \"$HOME\")\n\n kwargs = {\n \"poetry\": colorize(\"info\", \"Poetry\"),\n \"poetry_home_bin\": colorize(\"comment\", home_bin),\n }\n\n print(PRE_UNINSTALL_MESSAGE.format(**kwargs))\n\n def display_post_message(self, version):\n print(\"\")\n\n kwargs = {\n \"poetry\": colorize(\"info\", \"Poetry\"),\n \"version\": colorize(\"comment\", version),\n }\n\n if WINDOWS:\n message = POST_MESSAGE_WINDOWS\n if not self._modify_path:\n message = POST_MESSAGE_WINDOWS_NO_MODIFY_PATH\n\n poetry_home_bin = POETRY_BIN.replace(\n os.getenv(\"USERPROFILE\", \"\"), \"%USERPROFILE%\"\n )\n else:\n message = POST_MESSAGE_UNIX\n if not self._modify_path:\n message = POST_MESSAGE_UNIX_NO_MODIFY_PATH\n\n poetry_home_bin = POETRY_BIN.replace(os.getenv(\"HOME\", \"\"), \"$HOME\")\n kwargs[\"poetry_home_env\"] = colorize(\n \"comment\", POETRY_ENV.replace(os.getenv(\"HOME\", \"\"), \"$HOME\")\n )\n\n kwargs[\"poetry_home_bin\"] = colorize(\"comment\", poetry_home_bin)\n\n print(message.format(**kwargs))\n\n def call(self, *args):\n return subprocess.check_output(args, stderr=subprocess.STDOUT)\n\n def _get(self, url):\n request = Request(url, headers={\"User-Agent\": \"Python Poetry\"})\n\n with closing(urlopen(request)) as r:\n return r.read()\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Installs the latest (or given) version of poetry\"\n )\n parser.add_argument(\n \"-p\", \"--preview\", dest=\"preview\", action=\"store_true\", default=False\n )\n parser.add_argument(\"--version\", dest=\"version\")\n parser.add_argument(\n \"-f\", \"--force\", dest=\"force\", action=\"store_true\", default=False\n )\n parser.add_argument(\n \"-y\", \"--yes\", dest=\"accept_all\", action=\"store_true\", default=False\n )\n parser.add_argument(\n \"--uninstall\", dest=\"uninstall\", action=\"store_true\", default=False\n )\n\n args = parser.parse_args()\n\n installer = Installer(\n version=args.version or os.getenv(\"POETRY_VERSION\"),\n preview=args.preview or string_to_bool(os.getenv(\"POETRY_PREVIEW\", \"0\")),\n force=args.force,\n accept_all=args.accept_all\n or string_to_bool(os.getenv(\"POETRY_ACCEPT\", \"0\"))\n or not is_interactive(),\n )\n\n if args.uninstall or string_to_bool(os.getenv(\"POETRY_UNINSTALL\", \"0\")):\n return installer.uninstall()\n\n return installer.run()\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n",
"path": "get-poetry.py"
}
] | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9e4f35cf2ee..fc28d7e1a93 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -9,6 +9,7 @@
- Added a `env use` command to control the Python version used by the project.
- Added a `env list` command to list the virtualenvs associated with the current project.
- Added a `env remove` command to delete virtualenvs associated with the current project.
+- Added support for `POETRY_HOME` declaration within `get-poetry.py`.
- Added support for declaring a specific source for dependencies.
- Added support for disabling PyPI and making another repository the default one.
- Added support for declaring private repositories as secondary.
diff --git a/docs/docs/index.md b/docs/docs/index.md
index 2d4d12a0bf6..66fb0ade021 100644
--- a/docs/docs/index.md
+++ b/docs/docs/index.md
@@ -46,6 +46,12 @@ python get-poetry.py --uninstall
POETRY_UNINSTALL=1 python get-poetry.py
```
+By default, Poetry is installed into the user's platform-specific home directory. If you wish to change this, you may define the `POETRY_HOME` environment variable:
+
+```bash
+POETRY_HOME=/etc/poetry python get-poetry.py
+```
+
If you want to install prerelease versions, you can do so by passing `--preview` to `get-poetry.py`
or by using the `POETRY_PREVIEW` environment variable:
diff --git a/get-poetry.py b/get-poetry.py
index e369c866d32..e6cbe6db8fb 100644
--- a/get-poetry.py
+++ b/get-poetry.py
@@ -187,7 +187,7 @@ def expanduser(path):
HOME = expanduser("~")
-POETRY_HOME = os.path.join(HOME, ".poetry")
+POETRY_HOME = os.environ.get("POETRY_HOME") or os.path.join(HOME, ".poetry")
POETRY_BIN = os.path.join(POETRY_HOME, "bin")
POETRY_ENV = os.path.join(POETRY_HOME, "env")
POETRY_LIB = os.path.join(POETRY_HOME, "lib")
|
nautobot__nautobot-1199 | JobResult page may fail to list JobLogEntries in chronological order
### Environment
* Python version: 3.6
* Nautobot version: 1.2.1
### Steps to Reproduce
Unsure at this time
<!-- What did you expect to happen? -->
### Expected Behavior
JobLogEntries to be listed in chronological order.
<!-- What happened instead? -->
### Observed Behavior

Looking at the code, either the `JobLogEntry` class should define a `Meta.ordering` property, or else the `GitRepositoryResultView` and `JobResultView` view should add an `order_by()` to their `JobLogEntry` querysets.
| [
{
"content": "import inspect\n\nimport django_tables2 as tables\n\nfrom django.conf import settings\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\nfrom django_tables2.utils import Accessor\nfrom jsonschema.exceptions import ValidationError as JSONSchemaValidationError\n\nfrom nautobot.utilities.tables import (\n BaseTable,\n BooleanColumn,\n ButtonsColumn,\n ChoiceFieldColumn,\n ColorColumn,\n ColoredLabelColumn,\n ContentTypesColumn,\n TagColumn,\n ToggleColumn,\n)\nfrom nautobot.utilities.templatetags.helpers import render_markdown\nfrom .choices import LogLevelChoices\nfrom .jobs import Job\nfrom .models import (\n ComputedField,\n ConfigContext,\n ConfigContextSchema,\n CustomField,\n CustomLink,\n ExportTemplate,\n GitRepository,\n GraphQLQuery,\n JobResult,\n JobLogEntry,\n ObjectChange,\n Relationship,\n RelationshipAssociation,\n ScheduledJob,\n Secret,\n SecretsGroup,\n Status,\n Tag,\n TaggedItem,\n Webhook,\n)\nfrom .registry import registry\n\n\nTAGGED_ITEM = \"\"\"\n{% if value.get_absolute_url %}\n <a href=\"{{ value.get_absolute_url }}\">{{ value }}</a>\n{% else %}\n {{ value }}\n{% endif %}\n\"\"\"\n\nGITREPOSITORY_PROVIDES = \"\"\"\n<span class=\"text-nowrap\">\n{% for entry in datasource_contents %}\n<span style=\"display: inline-block\" title=\"{{ entry.name|title }}\"\nclass=\"label label-{% if entry.content_identifier in record.provided_contents %}success{% else %}default{% endif %}\">\n<i class=\"mdi {{ entry.icon }}\"></i></span>\n{% endfor %}\n</span>\n\"\"\"\n\nGITREPOSITORY_BUTTONS = \"\"\"\n<button data-url=\"{% url 'extras:gitrepository_sync' slug=record.slug %}\" type=\"submit\" class=\"btn btn-primary btn-xs sync-repository\" title=\"Sync\" {% if not perms.extras.change_gitrepository %}disabled=\"disabled\"{% endif %}><i class=\"mdi mdi-source-branch-sync\" aria-hidden=\"true\"></i></button>\n\"\"\"\n\nOBJECTCHANGE_OBJECT = \"\"\"\n{% if record.changed_object and record.changed_object.get_absolute_url %}\n <a href=\"{{ record.changed_object.get_absolute_url }}\">{{ record.object_repr }}</a>\n{% else %}\n {{ record.object_repr }}\n{% endif %}\n\"\"\"\n\nOBJECTCHANGE_REQUEST_ID = \"\"\"\n<a href=\"{% url 'extras:objectchange_list' %}?request_id={{ value }}\">{{ value }}</a>\n\"\"\"\n\n# TODO: Webhook content_types in table order_by\nWEBHOOK_CONTENT_TYPES = \"\"\"\n{{ value.all|join:\", \"|truncatewords:15 }}\n\"\"\"\n\nSCHEDULED_JOB_APPROVAL_QUEUE_BUTTONS = \"\"\"\n<button type=\"button\"\n onClick=\"handleDetailPostAction('{% url 'extras:scheduledjob_approval_request_view' scheduled_job=record.pk %}', '_dry_run')\"\n title=\"Dry Run\"\n class=\"btn btn-primary btn-xs\"{% if not perms.extras.run_job %} disabled=\"disabled\"{% endif %}>\n <i class=\"mdi mdi-play\"></i>\n</button>\n<button type=\"button\"\n onClick=\"handleDetailPostAction('{% url 'extras:scheduledjob_approval_request_view' scheduled_job=record.pk %}', '_approve')\"\n title=\"Approve\"\n class=\"btn btn-success btn-xs\"{% if not perms.extras.run_job %} disabled=\"disabled\"{% endif %}>\n <i class=\"mdi mdi-check\"></i>\n</button>\n<button type=\"button\"\n onClick=\"handleDetailPostAction('{% url 'extras:scheduledjob_approval_request_view' scheduled_job=record.pk %}', '_deny')\"\n title=\"Deny\"\n class=\"btn btn-danger btn-xs\"{% if not perms.extras.run_job %} disabled=\"disabled\"{% endif %}>\n <i class=\"mdi mdi-close\"></i>\n</button>\n\"\"\"\n\n\nclass ComputedFieldTable(BaseTable):\n pk = ToggleColumn()\n label = tables.Column(linkify=True)\n\n class Meta(BaseTable.Meta):\n model = ComputedField\n fields = (\n \"pk\",\n \"label\",\n \"slug\",\n \"content_type\",\n \"description\",\n \"weight\",\n )\n default_columns = (\n \"pk\",\n \"label\",\n \"slug\",\n \"content_type\",\n \"description\",\n )\n\n\nclass ConfigContextTable(BaseTable):\n pk = ToggleColumn()\n name = tables.LinkColumn()\n owner = tables.LinkColumn()\n is_active = BooleanColumn(verbose_name=\"Active\")\n\n class Meta(BaseTable.Meta):\n model = ConfigContext\n fields = (\n \"pk\",\n \"name\",\n \"owner\",\n \"weight\",\n \"is_active\",\n \"description\",\n \"regions\",\n \"sites\",\n \"roles\",\n \"platforms\",\n \"cluster_groups\",\n \"clusters\",\n \"tenant_groups\",\n \"tenants\",\n )\n default_columns = (\"pk\", \"name\", \"weight\", \"is_active\", \"description\")\n\n\nclass ConfigContextSchemaTable(BaseTable):\n pk = ToggleColumn()\n name = tables.LinkColumn()\n owner = tables.LinkColumn()\n actions = ButtonsColumn(ConfigContextSchema, pk_field=\"slug\")\n\n class Meta(BaseTable.Meta):\n model = ConfigContextSchema\n fields = (\n \"pk\",\n \"name\",\n \"owner\",\n \"description\",\n \"actions\",\n )\n default_columns = (\"pk\", \"name\", \"description\", \"actions\")\n\n\nclass ConfigContextSchemaValidationStateColumn(tables.Column):\n \"\"\"\n Custom column that validates an instance's context data against a config context schema\n \"\"\"\n\n def __init__(self, validator, data_field, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.validator = validator\n self.data_field = data_field\n\n def render(self, record):\n data = getattr(record, self.data_field)\n try:\n self.validator.validate(data)\n except JSONSchemaValidationError as e:\n # Return a red x (like a boolean column) and the validation error message\n return format_html(f'<span class=\"text-danger\"><i class=\"mdi mdi-close-thick\"></i>{e.message}</span>')\n\n # Return a green check (like a boolean column)\n return mark_safe('<span class=\"text-success\"><i class=\"mdi mdi-check-bold\"></i></span>')\n\n\nclass CustomFieldTable(BaseTable):\n pk = ToggleColumn()\n # TODO: Replace name column with slug #464\n slug = tables.Column(linkify=True, accessor=\"name\")\n content_types = ContentTypesColumn(truncate_words=15)\n required = BooleanColumn()\n\n class Meta(BaseTable.Meta):\n model = CustomField\n fields = (\n \"pk\",\n \"slug\",\n \"content_types\",\n \"type\",\n \"label\",\n \"description\",\n \"required\",\n \"default\",\n \"weight\",\n )\n default_columns = (\n \"pk\",\n \"slug\",\n \"content_types\",\n \"type\",\n \"label\",\n \"required\",\n \"weight\",\n )\n\n def render_description(self, record):\n if record.description:\n return mark_safe(render_markdown(record.description))\n return self.default\n\n\nclass CustomLinkTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(linkify=True)\n new_window = BooleanColumn()\n\n class Meta(BaseTable.Meta):\n model = CustomLink\n fields = (\n \"pk\",\n \"name\",\n \"content_type\",\n \"text\",\n \"target_url\",\n \"weight\",\n \"group_name\",\n \"button_class\",\n \"new_window\",\n )\n default_columns = (\n \"pk\",\n \"name\",\n \"content_type\",\n \"group_name\",\n \"weight\",\n )\n\n\nclass ExportTemplateTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(linkify=True)\n owner = tables.LinkColumn()\n\n class Meta(BaseTable.Meta):\n model = ExportTemplate\n fields = (\n \"pk\",\n \"owner\",\n \"content_type\",\n \"name\",\n \"description\",\n \"mime_type\",\n \"file_extension\",\n )\n default_columns = (\n \"pk\",\n \"name\",\n \"content_type\",\n \"file_extension\",\n )\n\n\nclass GitRepositoryTable(BaseTable):\n pk = ToggleColumn()\n name = tables.LinkColumn()\n remote_url = tables.Column(verbose_name=\"Remote URL\")\n secrets_group = tables.Column(linkify=True)\n last_sync_time = tables.DateTimeColumn(\n empty_values=(), format=settings.SHORT_DATETIME_FORMAT, verbose_name=\"Sync Time\"\n )\n\n last_sync_user = tables.Column(empty_values=(), verbose_name=\"Sync By\")\n\n class JobResultColumn(tables.TemplateColumn):\n def render(self, record, table, value, bound_column, **kwargs):\n if record.name in table.context.get(\"job_results\", {}):\n table.context.update({\"result\": table.context[\"job_results\"][record.name]})\n else:\n table.context.update({\"result\": None})\n return super().render(record, table, value, bound_column, **kwargs)\n\n last_sync_status = JobResultColumn(template_name=\"extras/inc/job_label.html\", verbose_name=\"Sync Status\")\n provides = tables.TemplateColumn(GITREPOSITORY_PROVIDES)\n actions = ButtonsColumn(GitRepository, pk_field=\"slug\", prepend_template=GITREPOSITORY_BUTTONS)\n\n class Meta(BaseTable.Meta):\n model = GitRepository\n fields = (\n \"pk\",\n \"name\",\n \"slug\",\n \"remote_url\",\n \"branch\",\n \"secrets_group\",\n \"provides\",\n \"last_sync_time\",\n \"last_sync_user\",\n \"last_sync_status\",\n \"actions\",\n )\n default_columns = (\n \"pk\",\n \"name\",\n \"remote_url\",\n \"branch\",\n \"provides\",\n \"last_sync_status\",\n \"actions\",\n )\n\n def render_last_sync_time(self, record):\n if record.name in self.context[\"job_results\"]:\n return self.context[\"job_results\"][record.name].completed\n return self.default\n\n def render_last_sync_user(self, record):\n if record.name in self.context[\"job_results\"]:\n user = self.context[\"job_results\"][record.name].user\n return user\n return self.default\n\n\nclass GitRepositoryBulkTable(BaseTable):\n pk = ToggleColumn()\n name = tables.LinkColumn()\n remote_url = tables.Column(verbose_name=\"Remote URL\")\n secrets_group = tables.Column(linkify=True)\n provides = tables.TemplateColumn(GITREPOSITORY_PROVIDES)\n\n class Meta(BaseTable.Meta):\n model = GitRepository\n fields = (\n \"pk\",\n \"name\",\n \"remote_url\",\n \"branch\",\n \"secrets_group\",\n \"provides\",\n )\n\n\nclass GraphQLQueryTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(linkify=True)\n\n class Meta(BaseTable.Meta):\n model = GraphQLQuery\n fields = (\n \"pk\",\n \"name\",\n \"slug\",\n )\n\n\ndef log_object_link(value, record):\n return record.absolute_url\n\n\ndef log_entry_color_css(record):\n if record.log_level.lower() == \"failure\":\n return \"danger\"\n return record.log_level.lower()\n\n\nclass JobLogEntryTable(BaseTable):\n created = tables.DateTimeColumn(verbose_name=\"Time\", format=settings.SHORT_DATETIME_FORMAT)\n grouping = tables.Column()\n log_level = tables.Column(\n verbose_name=\"Level\",\n attrs={\"td\": {\"class\": \"text-nowrap report-stats\"}},\n )\n log_object = tables.Column(verbose_name=\"Object\", linkify=log_object_link)\n message = tables.Column()\n\n def render_log_level(self, value):\n log_level = value.lower()\n # The css is label-danger for failure items.\n if log_level == \"failure\":\n log_level = \"danger\"\n\n return format_html('<label class=\"label label-{}\">{}</label>', log_level, value)\n\n class Meta(BaseTable.Meta):\n model = JobLogEntry\n fields = (\"created\", \"grouping\", \"log_level\", \"log_object\", \"message\")\n default_columns = (\"created\", \"grouping\", \"log_level\", \"log_object\", \"message\")\n row_attrs = {\n \"class\": log_entry_color_css,\n \"data-name\": lambda record: record.log_level,\n }\n attrs = {\n \"class\": \"table table-hover table-headings\",\n \"id\": \"logs\",\n }\n\n\ndef job_creator_link(value, record):\n \"\"\"\n Get a link to the related object, if any, associated with the given JobResult record.\n \"\"\"\n related_object = record.related_object\n if inspect.isclass(related_object) and issubclass(related_object, Job):\n return reverse(\"extras:job\", kwargs={\"class_path\": related_object.class_path})\n elif related_object:\n return related_object.get_absolute_url()\n return None\n\n\nclass JobResultTable(BaseTable):\n pk = ToggleColumn()\n obj_type = tables.Column(verbose_name=\"Object Type\", accessor=\"obj_type.name\")\n related_object = tables.Column(verbose_name=\"Related Object\", linkify=job_creator_link, accessor=\"related_name\")\n name = tables.Column()\n created = tables.DateTimeColumn(linkify=True, format=settings.SHORT_DATETIME_FORMAT)\n status = tables.TemplateColumn(\n template_code=\"{% include 'extras/inc/job_label.html' with result=record %}\",\n )\n summary = tables.Column(\n empty_values=(),\n verbose_name=\"Results\",\n orderable=False,\n attrs={\"td\": {\"class\": \"text-nowrap report-stats\"}},\n )\n\n def render_summary(self, record):\n \"\"\"\n Define custom rendering for the summary column.\n \"\"\"\n log_objects = JobLogEntry.objects.filter(job_result__pk=record.pk)\n success = log_objects.filter(log_level=LogLevelChoices.LOG_SUCCESS).count()\n info = log_objects.filter(log_level=LogLevelChoices.LOG_INFO).count()\n warning = log_objects.filter(log_level=LogLevelChoices.LOG_WARNING).count()\n failure = log_objects.filter(log_level=LogLevelChoices.LOG_FAILURE).count()\n return format_html(\n \"\"\"<label class=\"label label-success\">{}</label>\n <label class=\"label label-info\">{}</label>\n <label class=\"label label-warning\">{}</label>\n <label class=\"label label-danger\">{}</label>\"\"\",\n success,\n info,\n warning,\n failure,\n )\n\n class Meta(BaseTable.Meta):\n model = JobResult\n fields = (\n \"pk\",\n \"created\",\n \"name\",\n \"obj_type\",\n \"related_object\",\n \"duration\",\n \"completed\",\n \"user\",\n \"status\",\n \"logs\",\n )\n default_columns = (\"pk\", \"created\", \"related_object\", \"user\", \"status\", \"logs\")\n\n\n#\n# ScheduledJobs\n#\n\n\nclass ScheduledJobTable(BaseTable):\n pk = ToggleColumn()\n name = tables.LinkColumn()\n job_class = tables.Column(verbose_name=\"Job\")\n interval = tables.Column(verbose_name=\"Execution Type\")\n start_time = tables.Column(verbose_name=\"First Run\")\n last_run_at = tables.Column(verbose_name=\"Most Recent Run\")\n total_run_count = tables.Column(verbose_name=\"Total Run Count\")\n\n class Meta(BaseTable.Meta):\n model = ScheduledJob\n fields = (\"pk\", \"name\", \"job_class\", \"interval\", \"start_time\", \"last_run_at\")\n\n\nclass ScheduledJobApprovalQueueTable(BaseTable):\n name = tables.LinkColumn(viewname=\"extras:scheduledjob_approval_request_view\", args=[tables.A(\"pk\")])\n job_class = tables.Column(verbose_name=\"Job\")\n interval = tables.Column(verbose_name=\"Execution Type\")\n start_time = tables.Column(verbose_name=\"Requested\")\n user = tables.Column(verbose_name=\"Requestor\")\n actions = tables.TemplateColumn(SCHEDULED_JOB_APPROVAL_QUEUE_BUTTONS)\n\n class Meta(BaseTable.Meta):\n model = ScheduledJob\n fields = (\"name\", \"job_class\", \"interval\", \"user\", \"start_time\", \"actions\")\n\n\nclass ObjectChangeTable(BaseTable):\n time = tables.DateTimeColumn(linkify=True, format=settings.SHORT_DATETIME_FORMAT)\n action = ChoiceFieldColumn()\n changed_object_type = tables.Column(verbose_name=\"Type\")\n object_repr = tables.TemplateColumn(template_code=OBJECTCHANGE_OBJECT, verbose_name=\"Object\")\n request_id = tables.TemplateColumn(template_code=OBJECTCHANGE_REQUEST_ID, verbose_name=\"Request ID\")\n\n class Meta(BaseTable.Meta):\n model = ObjectChange\n fields = (\n \"time\",\n \"user_name\",\n \"action\",\n \"changed_object_type\",\n \"object_repr\",\n \"request_id\",\n )\n\n\n#\n# Relationship\n#\n\n\nclass RelationshipTable(BaseTable):\n pk = ToggleColumn()\n actions = ButtonsColumn(Relationship, buttons=(\"edit\", \"delete\"))\n\n class Meta(BaseTable.Meta):\n model = Relationship\n fields = (\n \"pk\",\n \"name\",\n \"description\",\n \"type\",\n \"source_type\",\n \"destination_type\",\n \"actions\",\n )\n\n\nclass RelationshipAssociationTable(BaseTable):\n pk = ToggleColumn()\n actions = ButtonsColumn(RelationshipAssociation, buttons=(\"delete\",))\n\n source_type = tables.Column()\n source = tables.Column(linkify=True, orderable=False)\n\n destination_type = tables.Column()\n destination = tables.Column(linkify=True, orderable=False)\n\n class Meta(BaseTable.Meta):\n model = RelationshipAssociation\n fields = (\"pk\", \"relationship\", \"source_type\", \"source\", \"destination_type\", \"destination\", \"actions\")\n default_columns = (\"pk\", \"relationship\", \"source\", \"destination\", \"actions\")\n\n\n#\n# Secrets\n#\n\n\nclass SecretTable(BaseTable):\n \"\"\"Table for list view of `Secret` objects.\"\"\"\n\n pk = ToggleColumn()\n name = tables.LinkColumn()\n tags = TagColumn(url_name=\"extras:secret_list\")\n\n class Meta(BaseTable.Meta):\n model = Secret\n fields = (\n \"pk\",\n \"name\",\n \"provider\",\n \"description\",\n \"tags\",\n )\n default_columns = (\n \"pk\",\n \"name\",\n \"provider\",\n \"description\",\n \"tags\",\n )\n\n def render_provider(self, value):\n return registry[\"secrets_providers\"][value].name if value in registry[\"secrets_providers\"] else value\n\n\nclass SecretsGroupTable(BaseTable):\n \"\"\"Table for list view of `SecretsGroup` objects.\"\"\"\n\n pk = ToggleColumn()\n name = tables.LinkColumn()\n\n class Meta(BaseTable.Meta):\n model = SecretsGroup\n fields = (\n \"pk\",\n \"name\",\n \"description\",\n )\n default_columns = (\n \"pk\",\n \"name\",\n \"description\",\n )\n\n\n#\n# Custom statuses\n#\n\n\nclass StatusTable(BaseTable):\n \"\"\"Table for list view of `Status` objects.\"\"\"\n\n pk = ToggleColumn()\n name = tables.LinkColumn(viewname=\"extras:status\", args=[Accessor(\"slug\")])\n color = ColorColumn()\n actions = ButtonsColumn(Status, pk_field=\"slug\")\n content_types = ContentTypesColumn(truncate_words=15)\n\n class Meta(BaseTable.Meta):\n model = Status\n fields = [\"pk\", \"name\", \"slug\", \"color\", \"content_types\", \"description\"]\n\n\nclass StatusTableMixin(BaseTable):\n \"\"\"Mixin to add a `status` field to a table.\"\"\"\n\n status = ColoredLabelColumn()\n\n\nclass TagTable(BaseTable):\n pk = ToggleColumn()\n name = tables.LinkColumn(viewname=\"extras:tag\", args=[Accessor(\"slug\")])\n color = ColorColumn()\n actions = ButtonsColumn(Tag, pk_field=\"slug\")\n\n class Meta(BaseTable.Meta):\n model = Tag\n fields = (\"pk\", \"name\", \"items\", \"slug\", \"color\", \"description\", \"actions\")\n\n\nclass TaggedItemTable(BaseTable):\n content_object = tables.TemplateColumn(template_code=TAGGED_ITEM, orderable=False, verbose_name=\"Object\")\n content_type = tables.Column(verbose_name=\"Type\")\n\n class Meta(BaseTable.Meta):\n model = TaggedItem\n fields = (\"content_object\", \"content_type\")\n\n\nclass WebhookTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(linkify=True)\n content_types = tables.TemplateColumn(WEBHOOK_CONTENT_TYPES)\n enabled = BooleanColumn()\n type_create = BooleanColumn()\n type_update = BooleanColumn()\n type_delete = BooleanColumn()\n ssl_verification = BooleanColumn()\n\n class Meta(BaseTable.Meta):\n model = Webhook\n fields = (\n \"pk\",\n \"name\",\n \"content_types\",\n \"payload_url\",\n \"http_content_type\",\n \"http_method\",\n \"enabled\",\n \"type_create\",\n \"type_update\",\n \"type_delete\",\n \"ssl_verification\",\n \"ca_file_path\",\n )\n default_columns = (\n \"pk\",\n \"name\",\n \"content_types\",\n \"payload_url\",\n \"http_content_type\",\n \"enabled\",\n )\n",
"path": "nautobot/extras/tables.py"
}
] | [
{
"content": "import inspect\n\nimport django_tables2 as tables\n\nfrom django.conf import settings\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\nfrom django_tables2.utils import Accessor\nfrom jsonschema.exceptions import ValidationError as JSONSchemaValidationError\n\nfrom nautobot.utilities.tables import (\n BaseTable,\n BooleanColumn,\n ButtonsColumn,\n ChoiceFieldColumn,\n ColorColumn,\n ColoredLabelColumn,\n ContentTypesColumn,\n TagColumn,\n ToggleColumn,\n)\nfrom nautobot.utilities.templatetags.helpers import render_markdown\nfrom .choices import LogLevelChoices\nfrom .jobs import Job\nfrom .models import (\n ComputedField,\n ConfigContext,\n ConfigContextSchema,\n CustomField,\n CustomLink,\n ExportTemplate,\n GitRepository,\n GraphQLQuery,\n JobResult,\n JobLogEntry,\n ObjectChange,\n Relationship,\n RelationshipAssociation,\n ScheduledJob,\n Secret,\n SecretsGroup,\n Status,\n Tag,\n TaggedItem,\n Webhook,\n)\nfrom .registry import registry\n\n\nTAGGED_ITEM = \"\"\"\n{% if value.get_absolute_url %}\n <a href=\"{{ value.get_absolute_url }}\">{{ value }}</a>\n{% else %}\n {{ value }}\n{% endif %}\n\"\"\"\n\nGITREPOSITORY_PROVIDES = \"\"\"\n<span class=\"text-nowrap\">\n{% for entry in datasource_contents %}\n<span style=\"display: inline-block\" title=\"{{ entry.name|title }}\"\nclass=\"label label-{% if entry.content_identifier in record.provided_contents %}success{% else %}default{% endif %}\">\n<i class=\"mdi {{ entry.icon }}\"></i></span>\n{% endfor %}\n</span>\n\"\"\"\n\nGITREPOSITORY_BUTTONS = \"\"\"\n<button data-url=\"{% url 'extras:gitrepository_sync' slug=record.slug %}\" type=\"submit\" class=\"btn btn-primary btn-xs sync-repository\" title=\"Sync\" {% if not perms.extras.change_gitrepository %}disabled=\"disabled\"{% endif %}><i class=\"mdi mdi-source-branch-sync\" aria-hidden=\"true\"></i></button>\n\"\"\"\n\nOBJECTCHANGE_OBJECT = \"\"\"\n{% if record.changed_object and record.changed_object.get_absolute_url %}\n <a href=\"{{ record.changed_object.get_absolute_url }}\">{{ record.object_repr }}</a>\n{% else %}\n {{ record.object_repr }}\n{% endif %}\n\"\"\"\n\nOBJECTCHANGE_REQUEST_ID = \"\"\"\n<a href=\"{% url 'extras:objectchange_list' %}?request_id={{ value }}\">{{ value }}</a>\n\"\"\"\n\n# TODO: Webhook content_types in table order_by\nWEBHOOK_CONTENT_TYPES = \"\"\"\n{{ value.all|join:\", \"|truncatewords:15 }}\n\"\"\"\n\nSCHEDULED_JOB_APPROVAL_QUEUE_BUTTONS = \"\"\"\n<button type=\"button\"\n onClick=\"handleDetailPostAction('{% url 'extras:scheduledjob_approval_request_view' scheduled_job=record.pk %}', '_dry_run')\"\n title=\"Dry Run\"\n class=\"btn btn-primary btn-xs\"{% if not perms.extras.run_job %} disabled=\"disabled\"{% endif %}>\n <i class=\"mdi mdi-play\"></i>\n</button>\n<button type=\"button\"\n onClick=\"handleDetailPostAction('{% url 'extras:scheduledjob_approval_request_view' scheduled_job=record.pk %}', '_approve')\"\n title=\"Approve\"\n class=\"btn btn-success btn-xs\"{% if not perms.extras.run_job %} disabled=\"disabled\"{% endif %}>\n <i class=\"mdi mdi-check\"></i>\n</button>\n<button type=\"button\"\n onClick=\"handleDetailPostAction('{% url 'extras:scheduledjob_approval_request_view' scheduled_job=record.pk %}', '_deny')\"\n title=\"Deny\"\n class=\"btn btn-danger btn-xs\"{% if not perms.extras.run_job %} disabled=\"disabled\"{% endif %}>\n <i class=\"mdi mdi-close\"></i>\n</button>\n\"\"\"\n\n\nclass ComputedFieldTable(BaseTable):\n pk = ToggleColumn()\n label = tables.Column(linkify=True)\n\n class Meta(BaseTable.Meta):\n model = ComputedField\n fields = (\n \"pk\",\n \"label\",\n \"slug\",\n \"content_type\",\n \"description\",\n \"weight\",\n )\n default_columns = (\n \"pk\",\n \"label\",\n \"slug\",\n \"content_type\",\n \"description\",\n )\n\n\nclass ConfigContextTable(BaseTable):\n pk = ToggleColumn()\n name = tables.LinkColumn()\n owner = tables.LinkColumn()\n is_active = BooleanColumn(verbose_name=\"Active\")\n\n class Meta(BaseTable.Meta):\n model = ConfigContext\n fields = (\n \"pk\",\n \"name\",\n \"owner\",\n \"weight\",\n \"is_active\",\n \"description\",\n \"regions\",\n \"sites\",\n \"roles\",\n \"platforms\",\n \"cluster_groups\",\n \"clusters\",\n \"tenant_groups\",\n \"tenants\",\n )\n default_columns = (\"pk\", \"name\", \"weight\", \"is_active\", \"description\")\n\n\nclass ConfigContextSchemaTable(BaseTable):\n pk = ToggleColumn()\n name = tables.LinkColumn()\n owner = tables.LinkColumn()\n actions = ButtonsColumn(ConfigContextSchema, pk_field=\"slug\")\n\n class Meta(BaseTable.Meta):\n model = ConfigContextSchema\n fields = (\n \"pk\",\n \"name\",\n \"owner\",\n \"description\",\n \"actions\",\n )\n default_columns = (\"pk\", \"name\", \"description\", \"actions\")\n\n\nclass ConfigContextSchemaValidationStateColumn(tables.Column):\n \"\"\"\n Custom column that validates an instance's context data against a config context schema\n \"\"\"\n\n def __init__(self, validator, data_field, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.validator = validator\n self.data_field = data_field\n\n def render(self, record):\n data = getattr(record, self.data_field)\n try:\n self.validator.validate(data)\n except JSONSchemaValidationError as e:\n # Return a red x (like a boolean column) and the validation error message\n return format_html(f'<span class=\"text-danger\"><i class=\"mdi mdi-close-thick\"></i>{e.message}</span>')\n\n # Return a green check (like a boolean column)\n return mark_safe('<span class=\"text-success\"><i class=\"mdi mdi-check-bold\"></i></span>')\n\n\nclass CustomFieldTable(BaseTable):\n pk = ToggleColumn()\n # TODO: Replace name column with slug #464\n slug = tables.Column(linkify=True, accessor=\"name\")\n content_types = ContentTypesColumn(truncate_words=15)\n required = BooleanColumn()\n\n class Meta(BaseTable.Meta):\n model = CustomField\n fields = (\n \"pk\",\n \"slug\",\n \"content_types\",\n \"type\",\n \"label\",\n \"description\",\n \"required\",\n \"default\",\n \"weight\",\n )\n default_columns = (\n \"pk\",\n \"slug\",\n \"content_types\",\n \"type\",\n \"label\",\n \"required\",\n \"weight\",\n )\n\n def render_description(self, record):\n if record.description:\n return mark_safe(render_markdown(record.description))\n return self.default\n\n\nclass CustomLinkTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(linkify=True)\n new_window = BooleanColumn()\n\n class Meta(BaseTable.Meta):\n model = CustomLink\n fields = (\n \"pk\",\n \"name\",\n \"content_type\",\n \"text\",\n \"target_url\",\n \"weight\",\n \"group_name\",\n \"button_class\",\n \"new_window\",\n )\n default_columns = (\n \"pk\",\n \"name\",\n \"content_type\",\n \"group_name\",\n \"weight\",\n )\n\n\nclass ExportTemplateTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(linkify=True)\n owner = tables.LinkColumn()\n\n class Meta(BaseTable.Meta):\n model = ExportTemplate\n fields = (\n \"pk\",\n \"owner\",\n \"content_type\",\n \"name\",\n \"description\",\n \"mime_type\",\n \"file_extension\",\n )\n default_columns = (\n \"pk\",\n \"name\",\n \"content_type\",\n \"file_extension\",\n )\n\n\nclass GitRepositoryTable(BaseTable):\n pk = ToggleColumn()\n name = tables.LinkColumn()\n remote_url = tables.Column(verbose_name=\"Remote URL\")\n secrets_group = tables.Column(linkify=True)\n last_sync_time = tables.DateTimeColumn(\n empty_values=(), format=settings.SHORT_DATETIME_FORMAT, verbose_name=\"Sync Time\"\n )\n\n last_sync_user = tables.Column(empty_values=(), verbose_name=\"Sync By\")\n\n class JobResultColumn(tables.TemplateColumn):\n def render(self, record, table, value, bound_column, **kwargs):\n if record.name in table.context.get(\"job_results\", {}):\n table.context.update({\"result\": table.context[\"job_results\"][record.name]})\n else:\n table.context.update({\"result\": None})\n return super().render(record, table, value, bound_column, **kwargs)\n\n last_sync_status = JobResultColumn(template_name=\"extras/inc/job_label.html\", verbose_name=\"Sync Status\")\n provides = tables.TemplateColumn(GITREPOSITORY_PROVIDES)\n actions = ButtonsColumn(GitRepository, pk_field=\"slug\", prepend_template=GITREPOSITORY_BUTTONS)\n\n class Meta(BaseTable.Meta):\n model = GitRepository\n fields = (\n \"pk\",\n \"name\",\n \"slug\",\n \"remote_url\",\n \"branch\",\n \"secrets_group\",\n \"provides\",\n \"last_sync_time\",\n \"last_sync_user\",\n \"last_sync_status\",\n \"actions\",\n )\n default_columns = (\n \"pk\",\n \"name\",\n \"remote_url\",\n \"branch\",\n \"provides\",\n \"last_sync_status\",\n \"actions\",\n )\n\n def render_last_sync_time(self, record):\n if record.name in self.context[\"job_results\"]:\n return self.context[\"job_results\"][record.name].completed\n return self.default\n\n def render_last_sync_user(self, record):\n if record.name in self.context[\"job_results\"]:\n user = self.context[\"job_results\"][record.name].user\n return user\n return self.default\n\n\nclass GitRepositoryBulkTable(BaseTable):\n pk = ToggleColumn()\n name = tables.LinkColumn()\n remote_url = tables.Column(verbose_name=\"Remote URL\")\n secrets_group = tables.Column(linkify=True)\n provides = tables.TemplateColumn(GITREPOSITORY_PROVIDES)\n\n class Meta(BaseTable.Meta):\n model = GitRepository\n fields = (\n \"pk\",\n \"name\",\n \"remote_url\",\n \"branch\",\n \"secrets_group\",\n \"provides\",\n )\n\n\nclass GraphQLQueryTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(linkify=True)\n\n class Meta(BaseTable.Meta):\n model = GraphQLQuery\n fields = (\n \"pk\",\n \"name\",\n \"slug\",\n )\n\n\ndef log_object_link(value, record):\n return record.absolute_url\n\n\ndef log_entry_color_css(record):\n if record.log_level.lower() == \"failure\":\n return \"danger\"\n return record.log_level.lower()\n\n\nclass JobLogEntryTable(BaseTable):\n created = tables.DateTimeColumn(verbose_name=\"Time\", format=\"Y-m-d H:i:s.u\")\n grouping = tables.Column()\n log_level = tables.Column(\n verbose_name=\"Level\",\n attrs={\"td\": {\"class\": \"text-nowrap report-stats\"}},\n )\n log_object = tables.Column(verbose_name=\"Object\", linkify=log_object_link)\n message = tables.Column()\n\n def render_log_level(self, value):\n log_level = value.lower()\n # The css is label-danger for failure items.\n if log_level == \"failure\":\n log_level = \"danger\"\n\n return format_html('<label class=\"label label-{}\">{}</label>', log_level, value)\n\n class Meta(BaseTable.Meta):\n model = JobLogEntry\n fields = (\"created\", \"grouping\", \"log_level\", \"log_object\", \"message\")\n default_columns = (\"created\", \"grouping\", \"log_level\", \"log_object\", \"message\")\n row_attrs = {\n \"class\": log_entry_color_css,\n \"data-name\": lambda record: record.log_level,\n }\n attrs = {\n \"class\": \"table table-hover table-headings\",\n \"id\": \"logs\",\n }\n\n\ndef job_creator_link(value, record):\n \"\"\"\n Get a link to the related object, if any, associated with the given JobResult record.\n \"\"\"\n related_object = record.related_object\n if inspect.isclass(related_object) and issubclass(related_object, Job):\n return reverse(\"extras:job\", kwargs={\"class_path\": related_object.class_path})\n elif related_object:\n return related_object.get_absolute_url()\n return None\n\n\nclass JobResultTable(BaseTable):\n pk = ToggleColumn()\n obj_type = tables.Column(verbose_name=\"Object Type\", accessor=\"obj_type.name\")\n related_object = tables.Column(verbose_name=\"Related Object\", linkify=job_creator_link, accessor=\"related_name\")\n name = tables.Column()\n created = tables.DateTimeColumn(linkify=True, format=settings.SHORT_DATETIME_FORMAT)\n status = tables.TemplateColumn(\n template_code=\"{% include 'extras/inc/job_label.html' with result=record %}\",\n )\n summary = tables.Column(\n empty_values=(),\n verbose_name=\"Results\",\n orderable=False,\n attrs={\"td\": {\"class\": \"text-nowrap report-stats\"}},\n )\n\n def render_summary(self, record):\n \"\"\"\n Define custom rendering for the summary column.\n \"\"\"\n log_objects = JobLogEntry.objects.filter(job_result__pk=record.pk)\n success = log_objects.filter(log_level=LogLevelChoices.LOG_SUCCESS).count()\n info = log_objects.filter(log_level=LogLevelChoices.LOG_INFO).count()\n warning = log_objects.filter(log_level=LogLevelChoices.LOG_WARNING).count()\n failure = log_objects.filter(log_level=LogLevelChoices.LOG_FAILURE).count()\n return format_html(\n \"\"\"<label class=\"label label-success\">{}</label>\n <label class=\"label label-info\">{}</label>\n <label class=\"label label-warning\">{}</label>\n <label class=\"label label-danger\">{}</label>\"\"\",\n success,\n info,\n warning,\n failure,\n )\n\n class Meta(BaseTable.Meta):\n model = JobResult\n fields = (\n \"pk\",\n \"created\",\n \"name\",\n \"obj_type\",\n \"related_object\",\n \"duration\",\n \"completed\",\n \"user\",\n \"status\",\n \"logs\",\n )\n default_columns = (\"pk\", \"created\", \"related_object\", \"user\", \"status\", \"logs\")\n\n\n#\n# ScheduledJobs\n#\n\n\nclass ScheduledJobTable(BaseTable):\n pk = ToggleColumn()\n name = tables.LinkColumn()\n job_class = tables.Column(verbose_name=\"Job\")\n interval = tables.Column(verbose_name=\"Execution Type\")\n start_time = tables.Column(verbose_name=\"First Run\")\n last_run_at = tables.Column(verbose_name=\"Most Recent Run\")\n total_run_count = tables.Column(verbose_name=\"Total Run Count\")\n\n class Meta(BaseTable.Meta):\n model = ScheduledJob\n fields = (\"pk\", \"name\", \"job_class\", \"interval\", \"start_time\", \"last_run_at\")\n\n\nclass ScheduledJobApprovalQueueTable(BaseTable):\n name = tables.LinkColumn(viewname=\"extras:scheduledjob_approval_request_view\", args=[tables.A(\"pk\")])\n job_class = tables.Column(verbose_name=\"Job\")\n interval = tables.Column(verbose_name=\"Execution Type\")\n start_time = tables.Column(verbose_name=\"Requested\")\n user = tables.Column(verbose_name=\"Requestor\")\n actions = tables.TemplateColumn(SCHEDULED_JOB_APPROVAL_QUEUE_BUTTONS)\n\n class Meta(BaseTable.Meta):\n model = ScheduledJob\n fields = (\"name\", \"job_class\", \"interval\", \"user\", \"start_time\", \"actions\")\n\n\nclass ObjectChangeTable(BaseTable):\n time = tables.DateTimeColumn(linkify=True, format=settings.SHORT_DATETIME_FORMAT)\n action = ChoiceFieldColumn()\n changed_object_type = tables.Column(verbose_name=\"Type\")\n object_repr = tables.TemplateColumn(template_code=OBJECTCHANGE_OBJECT, verbose_name=\"Object\")\n request_id = tables.TemplateColumn(template_code=OBJECTCHANGE_REQUEST_ID, verbose_name=\"Request ID\")\n\n class Meta(BaseTable.Meta):\n model = ObjectChange\n fields = (\n \"time\",\n \"user_name\",\n \"action\",\n \"changed_object_type\",\n \"object_repr\",\n \"request_id\",\n )\n\n\n#\n# Relationship\n#\n\n\nclass RelationshipTable(BaseTable):\n pk = ToggleColumn()\n actions = ButtonsColumn(Relationship, buttons=(\"edit\", \"delete\"))\n\n class Meta(BaseTable.Meta):\n model = Relationship\n fields = (\n \"pk\",\n \"name\",\n \"description\",\n \"type\",\n \"source_type\",\n \"destination_type\",\n \"actions\",\n )\n\n\nclass RelationshipAssociationTable(BaseTable):\n pk = ToggleColumn()\n actions = ButtonsColumn(RelationshipAssociation, buttons=(\"delete\",))\n\n source_type = tables.Column()\n source = tables.Column(linkify=True, orderable=False)\n\n destination_type = tables.Column()\n destination = tables.Column(linkify=True, orderable=False)\n\n class Meta(BaseTable.Meta):\n model = RelationshipAssociation\n fields = (\"pk\", \"relationship\", \"source_type\", \"source\", \"destination_type\", \"destination\", \"actions\")\n default_columns = (\"pk\", \"relationship\", \"source\", \"destination\", \"actions\")\n\n\n#\n# Secrets\n#\n\n\nclass SecretTable(BaseTable):\n \"\"\"Table for list view of `Secret` objects.\"\"\"\n\n pk = ToggleColumn()\n name = tables.LinkColumn()\n tags = TagColumn(url_name=\"extras:secret_list\")\n\n class Meta(BaseTable.Meta):\n model = Secret\n fields = (\n \"pk\",\n \"name\",\n \"provider\",\n \"description\",\n \"tags\",\n )\n default_columns = (\n \"pk\",\n \"name\",\n \"provider\",\n \"description\",\n \"tags\",\n )\n\n def render_provider(self, value):\n return registry[\"secrets_providers\"][value].name if value in registry[\"secrets_providers\"] else value\n\n\nclass SecretsGroupTable(BaseTable):\n \"\"\"Table for list view of `SecretsGroup` objects.\"\"\"\n\n pk = ToggleColumn()\n name = tables.LinkColumn()\n\n class Meta(BaseTable.Meta):\n model = SecretsGroup\n fields = (\n \"pk\",\n \"name\",\n \"description\",\n )\n default_columns = (\n \"pk\",\n \"name\",\n \"description\",\n )\n\n\n#\n# Custom statuses\n#\n\n\nclass StatusTable(BaseTable):\n \"\"\"Table for list view of `Status` objects.\"\"\"\n\n pk = ToggleColumn()\n name = tables.LinkColumn(viewname=\"extras:status\", args=[Accessor(\"slug\")])\n color = ColorColumn()\n actions = ButtonsColumn(Status, pk_field=\"slug\")\n content_types = ContentTypesColumn(truncate_words=15)\n\n class Meta(BaseTable.Meta):\n model = Status\n fields = [\"pk\", \"name\", \"slug\", \"color\", \"content_types\", \"description\"]\n\n\nclass StatusTableMixin(BaseTable):\n \"\"\"Mixin to add a `status` field to a table.\"\"\"\n\n status = ColoredLabelColumn()\n\n\nclass TagTable(BaseTable):\n pk = ToggleColumn()\n name = tables.LinkColumn(viewname=\"extras:tag\", args=[Accessor(\"slug\")])\n color = ColorColumn()\n actions = ButtonsColumn(Tag, pk_field=\"slug\")\n\n class Meta(BaseTable.Meta):\n model = Tag\n fields = (\"pk\", \"name\", \"items\", \"slug\", \"color\", \"description\", \"actions\")\n\n\nclass TaggedItemTable(BaseTable):\n content_object = tables.TemplateColumn(template_code=TAGGED_ITEM, orderable=False, verbose_name=\"Object\")\n content_type = tables.Column(verbose_name=\"Type\")\n\n class Meta(BaseTable.Meta):\n model = TaggedItem\n fields = (\"content_object\", \"content_type\")\n\n\nclass WebhookTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(linkify=True)\n content_types = tables.TemplateColumn(WEBHOOK_CONTENT_TYPES)\n enabled = BooleanColumn()\n type_create = BooleanColumn()\n type_update = BooleanColumn()\n type_delete = BooleanColumn()\n ssl_verification = BooleanColumn()\n\n class Meta(BaseTable.Meta):\n model = Webhook\n fields = (\n \"pk\",\n \"name\",\n \"content_types\",\n \"payload_url\",\n \"http_content_type\",\n \"http_method\",\n \"enabled\",\n \"type_create\",\n \"type_update\",\n \"type_delete\",\n \"ssl_verification\",\n \"ca_file_path\",\n )\n default_columns = (\n \"pk\",\n \"name\",\n \"content_types\",\n \"payload_url\",\n \"http_content_type\",\n \"enabled\",\n )\n",
"path": "nautobot/extras/tables.py"
}
] | diff --git a/nautobot/extras/tables.py b/nautobot/extras/tables.py
index ad0278e2cca..bfddba8717d 100644
--- a/nautobot/extras/tables.py
+++ b/nautobot/extras/tables.py
@@ -389,7 +389,7 @@ def log_entry_color_css(record):
class JobLogEntryTable(BaseTable):
- created = tables.DateTimeColumn(verbose_name="Time", format=settings.SHORT_DATETIME_FORMAT)
+ created = tables.DateTimeColumn(verbose_name="Time", format="Y-m-d H:i:s.u")
grouping = tables.Column()
log_level = tables.Column(
verbose_name="Level",
|
chainer__chainer-5613 | F.negative_sampling outputs float32 loss for any input dtypes, only in CPU mode
Version: current master b9e9267237d60b76211f42d13f80938d1b926e74
### Code to reproduce
```py
import chainer
import chainer.functions as F
import numpy
import cupy
batch_size = 2
in_size = 5
n_classes = 3
sample_size = 4
def func(xp, in_dtype, reduce):
def sampler(shape):
return xp.ones(shape, xp.int32)
x_arr = xp.ones((batch_size, in_size), in_dtype)
w_arr = xp.ones((n_classes, in_size), in_dtype)
t_arr = xp.ones((batch_size,), numpy.int32)
x = chainer.Variable(x_arr)
w = chainer.Variable(w_arr)
t = chainer.Variable(t_arr)
y = F.negative_sampling(x, t, w, sampler, sample_size, reduce=reduce)
print(in_dtype.__name__, ' -> ', y.dtype)
for reduce in ('sum', 'no'):
print('*** reduce: ', reduce)
for xp in (numpy, cupy):
print('xp: ', xp.__name__)
for in_dtype in (numpy.float16, numpy.float32, numpy.float64):
func(xp, in_dtype, reduce)
print()
```
### Result
```
*** reduce: sum
xp: numpy
float16 -> float32
float32 -> float32
float64 -> float32
xp: cupy
float16 -> float16
float32 -> float32
float64 -> float64
*** reduce: no
xp: numpy
float16 -> float16
float32 -> float32
float64 -> float64
xp: cupy
float16 -> float16
float32 -> float32
float64 -> float64
```
| [
{
"content": "import numpy\nimport six\n\nimport chainer\nfrom chainer import backend\nfrom chainer.backends import cuda\nfrom chainer import function_node\nfrom chainer.utils import argument\nfrom chainer.utils import type_check\n\n\ndef _sigmoid_grad(x, y, gy):\n return chainer.functions.activation.sigmoid.SigmoidGrad((x,)).apply(\n (y, gy))[0]\n\n\nclass NegativeSamplingFunction(function_node.FunctionNode):\n\n ignore_label = -1\n samples = None\n\n def __init__(self, sampler, sample_size, reduce='sum'):\n if reduce not in ('sum', 'no'):\n raise ValueError(\n \"only 'sum' and 'no' are valid for 'reduce', but '%s' is \"\n 'given' % reduce)\n\n self.sampler = sampler\n self.sample_size = sample_size\n self.reduce = reduce\n self.wx = None\n\n def _make_samples(self, t):\n size = int(t.shape[0])\n # first one is the positive, and others are sampled negatives\n samples = self.sampler((size, self.sample_size + 1))\n samples[:, 0] = t\n return samples\n\n def check_type_forward(self, in_types):\n type_check._argname(in_types, ('x', 't', 'W'))\n x_type, t_type, w_type = in_types\n\n type_check.expect(\n x_type.dtype.kind == 'f',\n x_type.ndim == 2,\n t_type.dtype == numpy.int32,\n t_type.ndim == 1,\n x_type.shape[0] == t_type.shape[0],\n w_type.dtype == x_type.dtype,\n w_type.ndim == 2,\n )\n\n def forward_cpu(self, inputs):\n self.retain_inputs((0, 1, 2))\n x, t, W = inputs\n\n self.ignore_mask = (t != self.ignore_label)\n samples = self._make_samples(t)\n\n w = W[samples]\n wx = numpy.einsum(\n 'ij,ikj->ik', x[self.ignore_mask], w[self.ignore_mask])\n wx[:, 0] *= -1\n\n loss = numpy.zeros(len(x), x.dtype)\n loss[self.ignore_mask] = numpy.sum(numpy.logaddexp(wx, 0), axis=1)\n\n if self.reduce == 'sum':\n loss = numpy.array(loss.sum(), 'f')\n\n self.samples = samples\n return loss,\n\n def forward_gpu(self, inputs):\n self.retain_inputs((0, 1, 2))\n x, t, W = inputs\n\n self.ignore_mask = (t != self.ignore_label)\n samples = self._make_samples(t)\n\n n_in = x.shape[1]\n self.wx = cuda.elementwise(\n 'raw T W, raw T x, bool mask, S k, int32 c, int32 m', 'T wx',\n '''\n T f = 0;\n if (mask == 1) {\n for (int j = 0; j < c; ++j) {\n int x_ind[] = {(i / m), j};\n int w_ind[] = {k, j};\n f += x[x_ind] * W[w_ind];\n }\n }\n wx = f;\n ''',\n 'negative_sampling_wx'\n )(W, x, self.ignore_mask[:, None], samples, n_in,\n self.sample_size + 1)\n\n loss = cuda.elementwise(\n 'T wx, int32 c, int32 m, bool mask', 'T y',\n '''\n if (mask) {\n T f = wx;\n if (i % m == 0) {\n f = -f;\n }\n if (f < 0) {\n y = __logf(1 + __expf(f));\n } else {\n y = f + __logf(1 + __expf(-f));\n }\n } else {\n y = 0;\n }\n ''',\n 'negative_sampling_forward'\n )(self.wx, n_in, self.sample_size + 1, self.ignore_mask[:, None])\n\n if self.reduce == 'sum':\n loss = loss.sum()\n else: # 'no':\n loss = loss.sum(axis=1)\n\n self.samples = samples\n return loss,\n\n def backward(self, indexes, grad_outputs):\n x, t, W = self.get_retained_inputs()\n gy, = grad_outputs\n return NegativeSamplingFunctionGrad(\n self.reduce, self.ignore_mask, self.sample_size, self.samples,\n self.wx).apply((x, W, gy))\n\n\nclass NegativeSamplingFunctionGrad(function_node.FunctionNode):\n\n def __init__(self, reduce, ignore_mask, sample_size, samples, wx):\n self.reduce = reduce\n self.ignore_mask = ignore_mask\n self.sample_size = sample_size\n self.samples = samples\n self.wx = wx\n\n def forward_cpu(self, inputs):\n self.retain_inputs((0, 1, 2))\n x, W, gloss = inputs\n\n gx = numpy.zeros_like(x)\n gW = numpy.zeros_like(W)\n\n for i in numpy.arange(len(self.ignore_mask))[self.ignore_mask]:\n ix = x[i]\n\n k = self.samples[i]\n if self.reduce == 'sum':\n igy = gloss\n else:\n igy = gloss[i]\n\n w = W[k]\n f = w.dot(ix)\n\n # g == -y * gloss / (1 + exp(yf))\n f[0] *= -1\n g = igy / (1 + numpy.exp(-f))\n g[0] *= -1\n\n gx[i] = g.dot(w)\n for ik, ig in six.moves.zip(k, g):\n gW[ik] += ig * ix\n return gx, None, gW\n\n def forward_gpu(self, inputs):\n self.retain_inputs((0, 1, 2))\n x, W, gy = inputs\n\n if self.reduce == 'no':\n gy = gy[:, None]\n\n wx = self.wx.astype(x.dtype, copy=False)\n g = cuda.elementwise(\n 'T wx, T gy, int32 m', 'T g',\n '''\n T y;\n if (i % m == 0) {\n y = 1;\n } else {\n y = -1;\n }\n\n g = -y * gy / (1.0f + __expf(wx * y));\n ''',\n 'negative_sampling_calculate_g'\n )(wx, gy, self.sample_size + 1)\n\n cupy = cuda.cupy\n gx = cupy.zeros_like(x)\n n_in = x.shape[1]\n cuda.elementwise(\n 'raw T g, raw T W, bool mask, raw S k, int32 c, int32 m', 'T gx',\n '''\n int d = i / c;\n T w = 0;\n if (mask == 1){\n for (int j = 0; j < m; ++j) {\n w += g[d * m + j] * W[k[d * m + j] * c + i % c];\n }\n }\n gx = w;\n ''',\n 'negative_sampling_calculate_gx'\n )(g, W, self.ignore_mask[:, None], self.samples, n_in,\n self.sample_size + 1, gx)\n\n gW = cupy.zeros_like(W)\n cuda.elementwise(\n 'T g, raw T x, S k, bool mask, int32 c, int32 m',\n 'raw T gW',\n '''\n T gi = g;\n if (mask == 1) {\n for (int j = 0; j < c; ++j) {\n atomicAdd(&gW[k * c + j], gi * x[(i / m) * c + j]);\n }\n }\n ''',\n 'negative_sampling_calculate_gw'\n )(g, x, self.samples, self.ignore_mask[:, None], n_in,\n self.sample_size + 1, gW)\n return gx, None, gW\n\n def backward(self, indexes, grad_outputs):\n x, W, gy = self.get_retained_inputs()\n\n xp = backend.get_array_module(x.data)\n\n if 0 in indexes:\n gx = chainer.Variable(xp.zeros_like(x.data))\n if 1 in indexes:\n gW = chainer.Variable(xp.zeros_like(W.data))\n if 2 in indexes:\n ggy = chainer.Variable(xp.zeros_like(gy.data))\n\n ggx, _, ggW = grad_outputs\n\n pos_neg_mask = xp.ones(self.sample_size + 1)\n pos_neg_mask[0] *= -1\n\n for i in xp.arange(len(self.ignore_mask))[self.ignore_mask]:\n # Partial forward pass to obtain intermediate `Variable`s\n ix = x[i]\n k = self.samples[i]\n\n if self.reduce == 'sum':\n igy = gy\n else:\n igy = gy[i]\n\n w = W[k]\n f = chainer.functions.flatten(\n chainer.functions.matmul(w, ix[:, None])) * pos_neg_mask\n sigf = chainer.functions.sigmoid(f)\n g = chainer.functions.broadcast_to(igy, f.shape) * sigf \\\n * pos_neg_mask\n\n dgW_dg = chainer.functions.flatten(\n chainer.functions.matmul(ggW[k], ix[:, None])) * pos_neg_mask\n dgW_df = chainer.functions.broadcast_to(igy, f.shape) \\\n * _sigmoid_grad(f, sigf, dgW_dg) * pos_neg_mask\n dgx_dg = chainer.functions.flatten(\n chainer.functions.matmul(ggx[i][None, :], w, transb=True))\n dgx_df = chainer.functions.broadcast_to(igy, f.shape) \\\n * _sigmoid_grad(f, sigf, dgx_dg)\n\n if 0 in indexes:\n # deriative of gx\n dgx = chainer.functions.matmul(w, dgx_df[:, None], transa=True)\n\n # derivative of gW\n dgx += chainer.functions.matmul(g[None, :], ggW[k]).T\n dgx += chainer.functions.matmul(\n w, dgW_df[:, None], transa=True)\n\n gx = chainer.functions.scatter_add(\n gx, i, chainer.functions.flatten(dgx))\n\n if 1 in indexes:\n # deriative of gx\n shape = ggx[i].shape\n for ik, ig, idgx_df in six.moves.zip(k, g, dgx_df):\n ig = chainer.functions.broadcast_to(ig, shape)\n idgx_df = chainer.functions.broadcast_to(idgx_df, shape)\n gW = chainer.functions.scatter_add(\n gW, ik, ig * ggx[i] + idgx_df * ix)\n\n # derivative of gW\n gW = chainer.functions.scatter_add(\n gW, k,\n chainer.functions.matmul(dgW_df[:, None], ix[None, :]))\n\n if 2 in indexes:\n dgx_dg *= pos_neg_mask\n dggy = chainer.functions.sum((dgx_dg + dgW_dg) * sigf)\n if self.reduce == 'sum':\n ggy += dggy\n else:\n ggy = chainer.functions.scatter_add(ggy, i, dggy)\n\n ret = []\n if 0 in indexes:\n ret.append(gx)\n if 1 in indexes:\n ret.append(gW)\n if 2 in indexes:\n ret.append(ggy)\n return ret\n\n\ndef negative_sampling(x, t, W, sampler, sample_size, reduce='sum', **kwargs):\n \"\"\"negative_sampling(x, t, W, sampler, sample_size, reduce='sum', *, return_samples=False)\n\n Negative sampling loss function.\n\n In natural language processing, especially language modeling, the number of\n words in a vocabulary can be very large.\n Therefore, you need to spend a lot of time calculating the gradient of the\n embedding matrix.\n\n By using the negative sampling trick you only need to calculate the\n gradient for a few sampled negative examples.\n\n The loss is defined as follows.\n\n .. math::\n\n f(x, p) = - \\\\log \\\\sigma(x^\\\\top w_p) - \\\\\n k E_{i \\\\sim P(i)}[\\\\log \\\\sigma(- x^\\\\top w_i)]\n\n where :math:`\\\\sigma(\\\\cdot)` is a sigmoid function, :math:`w_i` is the\n weight vector for the word :math:`i`, and :math:`p` is a positive example.\n It is approximated with :math:`k` examples :math:`N` sampled from\n probability :math:`P(i)`.\n\n .. math::\n\n f(x, p) \\\\approx - \\\\log \\\\sigma(x^\\\\top w_p) - \\\\\n \\\\sum_{n \\\\in N} \\\\log \\\\sigma(-x^\\\\top w_n)\n\n Each sample of :math:`N` is drawn from the word distribution\n :math:`P(w) = \\\\frac{1}{Z} c(w)^\\\\alpha`, where :math:`c(w)` is the\n unigram count of the word :math:`w`, :math:`\\\\alpha` is a hyper-parameter,\n and :math:`Z` is the normalization constant.\n\n Args:\n x (~chainer.Variable): Batch of input vectors.\n t (~chainer.Variable): Vector of ground truth labels.\n W (~chainer.Variable): Weight matrix.\n sampler (~types.FunctionType): Sampling function. It takes a shape and\n returns an integer array of the shape. Each element of this array\n is a sample from the word distribution.\n A :class:`~chainer.utils.WalkerAlias` object built with the power\n distribution of word frequency is recommended.\n sample_size (int): Number of samples.\n reduce (str): Reduction option. Its value must be either\n ``'sum'`` or ``'no'``. Otherwise, :class:`ValueError` is raised.\n return_samples (bool):\n If ``True``, the sample array is also returned.\n The sample array is a\n :math:`(\\\\text{batch_size}, \\\\text{sample_size} + 1)`-array of\n integers whose first column is fixed to the ground truth labels\n and the other columns are drawn from the ``sampler``.\n\n Returns:\n ~chainer.Variable or tuple:\n If ``return_samples`` is ``False`` (default), the output\n variable holding the loss value(s) calculated by the\n above equation is returned. Otherwise, a tuple of the output\n variable and the sample array is returned.\n\n If ``reduce`` is ``'no'``, the output variable holds array\n whose shape is same as one of (hence both of) input variables.\n If it is ``'sum'``, the output variable holds a scalar value.\n\n See: `Distributed Representations of Words and Phrases and their\\\n Compositionality <https://arxiv.org/abs/1310.4546>`_\n\n .. seealso:: :class:`~chainer.links.NegativeSampling`.\n\n \"\"\" # NOQA\n return_samples = False\n if kwargs:\n return_samples, = argument.parse_kwargs(\n kwargs, ('return_samples', return_samples))\n\n func = NegativeSamplingFunction(sampler, sample_size, reduce)\n out = func.apply((x, t, W))[0]\n\n if return_samples:\n return out, func.samples\n return out\n",
"path": "chainer/functions/loss/negative_sampling.py"
}
] | [
{
"content": "import numpy\nimport six\n\nimport chainer\nfrom chainer import backend\nfrom chainer.backends import cuda\nfrom chainer import function_node\nfrom chainer.utils import argument\nfrom chainer.utils import type_check\n\n\ndef _sigmoid_grad(x, y, gy):\n return chainer.functions.activation.sigmoid.SigmoidGrad((x,)).apply(\n (y, gy))[0]\n\n\nclass NegativeSamplingFunction(function_node.FunctionNode):\n\n ignore_label = -1\n samples = None\n\n def __init__(self, sampler, sample_size, reduce='sum'):\n if reduce not in ('sum', 'no'):\n raise ValueError(\n \"only 'sum' and 'no' are valid for 'reduce', but '%s' is \"\n 'given' % reduce)\n\n self.sampler = sampler\n self.sample_size = sample_size\n self.reduce = reduce\n self.wx = None\n\n def _make_samples(self, t):\n size = int(t.shape[0])\n # first one is the positive, and others are sampled negatives\n samples = self.sampler((size, self.sample_size + 1))\n samples[:, 0] = t\n return samples\n\n def check_type_forward(self, in_types):\n type_check._argname(in_types, ('x', 't', 'W'))\n x_type, t_type, w_type = in_types\n\n type_check.expect(\n x_type.dtype.kind == 'f',\n x_type.ndim == 2,\n t_type.dtype == numpy.int32,\n t_type.ndim == 1,\n x_type.shape[0] == t_type.shape[0],\n w_type.dtype == x_type.dtype,\n w_type.ndim == 2,\n )\n\n def forward_cpu(self, inputs):\n self.retain_inputs((0, 1, 2))\n x, t, W = inputs\n\n self.ignore_mask = (t != self.ignore_label)\n samples = self._make_samples(t)\n\n w = W[samples]\n wx = numpy.einsum(\n 'ij,ikj->ik', x[self.ignore_mask], w[self.ignore_mask])\n wx[:, 0] *= -1\n\n loss = numpy.zeros(len(x), x.dtype)\n loss[self.ignore_mask] = numpy.sum(numpy.logaddexp(wx, 0), axis=1)\n\n if self.reduce == 'sum':\n loss = numpy.array(loss.sum(), x.dtype)\n\n self.samples = samples\n return loss,\n\n def forward_gpu(self, inputs):\n self.retain_inputs((0, 1, 2))\n x, t, W = inputs\n\n self.ignore_mask = (t != self.ignore_label)\n samples = self._make_samples(t)\n\n n_in = x.shape[1]\n self.wx = cuda.elementwise(\n 'raw T W, raw T x, bool mask, S k, int32 c, int32 m', 'T wx',\n '''\n T f = 0;\n if (mask == 1) {\n for (int j = 0; j < c; ++j) {\n int x_ind[] = {(i / m), j};\n int w_ind[] = {k, j};\n f += x[x_ind] * W[w_ind];\n }\n }\n wx = f;\n ''',\n 'negative_sampling_wx'\n )(W, x, self.ignore_mask[:, None], samples, n_in,\n self.sample_size + 1)\n\n loss = cuda.elementwise(\n 'T wx, int32 c, int32 m, bool mask', 'T y',\n '''\n if (mask) {\n T f = wx;\n if (i % m == 0) {\n f = -f;\n }\n if (f < 0) {\n y = __logf(1 + __expf(f));\n } else {\n y = f + __logf(1 + __expf(-f));\n }\n } else {\n y = 0;\n }\n ''',\n 'negative_sampling_forward'\n )(self.wx, n_in, self.sample_size + 1, self.ignore_mask[:, None])\n\n if self.reduce == 'sum':\n loss = loss.sum()\n else: # 'no':\n loss = loss.sum(axis=1)\n\n self.samples = samples\n return loss,\n\n def backward(self, indexes, grad_outputs):\n x, t, W = self.get_retained_inputs()\n gy, = grad_outputs\n return NegativeSamplingFunctionGrad(\n self.reduce, self.ignore_mask, self.sample_size, self.samples,\n self.wx).apply((x, W, gy))\n\n\nclass NegativeSamplingFunctionGrad(function_node.FunctionNode):\n\n def __init__(self, reduce, ignore_mask, sample_size, samples, wx):\n self.reduce = reduce\n self.ignore_mask = ignore_mask\n self.sample_size = sample_size\n self.samples = samples\n self.wx = wx\n\n def forward_cpu(self, inputs):\n self.retain_inputs((0, 1, 2))\n x, W, gloss = inputs\n\n gx = numpy.zeros_like(x)\n gW = numpy.zeros_like(W)\n\n for i in numpy.arange(len(self.ignore_mask))[self.ignore_mask]:\n ix = x[i]\n\n k = self.samples[i]\n if self.reduce == 'sum':\n igy = gloss\n else:\n igy = gloss[i]\n\n w = W[k]\n f = w.dot(ix)\n\n # g == -y * gloss / (1 + exp(yf))\n f[0] *= -1\n g = igy / (1 + numpy.exp(-f))\n g[0] *= -1\n\n gx[i] = g.dot(w)\n for ik, ig in six.moves.zip(k, g):\n gW[ik] += ig * ix\n return gx, None, gW\n\n def forward_gpu(self, inputs):\n self.retain_inputs((0, 1, 2))\n x, W, gy = inputs\n\n if self.reduce == 'no':\n gy = gy[:, None]\n\n wx = self.wx.astype(x.dtype, copy=False)\n g = cuda.elementwise(\n 'T wx, T gy, int32 m', 'T g',\n '''\n T y;\n if (i % m == 0) {\n y = 1;\n } else {\n y = -1;\n }\n\n g = -y * gy / (1.0f + __expf(wx * y));\n ''',\n 'negative_sampling_calculate_g'\n )(wx, gy, self.sample_size + 1)\n\n cupy = cuda.cupy\n gx = cupy.zeros_like(x)\n n_in = x.shape[1]\n cuda.elementwise(\n 'raw T g, raw T W, bool mask, raw S k, int32 c, int32 m', 'T gx',\n '''\n int d = i / c;\n T w = 0;\n if (mask == 1){\n for (int j = 0; j < m; ++j) {\n w += g[d * m + j] * W[k[d * m + j] * c + i % c];\n }\n }\n gx = w;\n ''',\n 'negative_sampling_calculate_gx'\n )(g, W, self.ignore_mask[:, None], self.samples, n_in,\n self.sample_size + 1, gx)\n\n gW = cupy.zeros_like(W)\n cuda.elementwise(\n 'T g, raw T x, S k, bool mask, int32 c, int32 m',\n 'raw T gW',\n '''\n T gi = g;\n if (mask == 1) {\n for (int j = 0; j < c; ++j) {\n atomicAdd(&gW[k * c + j], gi * x[(i / m) * c + j]);\n }\n }\n ''',\n 'negative_sampling_calculate_gw'\n )(g, x, self.samples, self.ignore_mask[:, None], n_in,\n self.sample_size + 1, gW)\n return gx, None, gW\n\n def backward(self, indexes, grad_outputs):\n x, W, gy = self.get_retained_inputs()\n\n xp = backend.get_array_module(x.data)\n\n if 0 in indexes:\n gx = chainer.Variable(xp.zeros_like(x.data))\n if 1 in indexes:\n gW = chainer.Variable(xp.zeros_like(W.data))\n if 2 in indexes:\n ggy = chainer.Variable(xp.zeros_like(gy.data))\n\n ggx, _, ggW = grad_outputs\n\n pos_neg_mask = xp.ones(self.sample_size + 1)\n pos_neg_mask[0] *= -1\n\n for i in xp.arange(len(self.ignore_mask))[self.ignore_mask]:\n # Partial forward pass to obtain intermediate `Variable`s\n ix = x[i]\n k = self.samples[i]\n\n if self.reduce == 'sum':\n igy = gy\n else:\n igy = gy[i]\n\n w = W[k]\n f = chainer.functions.flatten(\n chainer.functions.matmul(w, ix[:, None])) * pos_neg_mask\n sigf = chainer.functions.sigmoid(f)\n g = chainer.functions.broadcast_to(igy, f.shape) * sigf \\\n * pos_neg_mask\n\n dgW_dg = chainer.functions.flatten(\n chainer.functions.matmul(ggW[k], ix[:, None])) * pos_neg_mask\n dgW_df = chainer.functions.broadcast_to(igy, f.shape) \\\n * _sigmoid_grad(f, sigf, dgW_dg) * pos_neg_mask\n dgx_dg = chainer.functions.flatten(\n chainer.functions.matmul(ggx[i][None, :], w, transb=True))\n dgx_df = chainer.functions.broadcast_to(igy, f.shape) \\\n * _sigmoid_grad(f, sigf, dgx_dg)\n\n if 0 in indexes:\n # deriative of gx\n dgx = chainer.functions.matmul(w, dgx_df[:, None], transa=True)\n\n # derivative of gW\n dgx += chainer.functions.matmul(g[None, :], ggW[k]).T\n dgx += chainer.functions.matmul(\n w, dgW_df[:, None], transa=True)\n\n gx = chainer.functions.scatter_add(\n gx, i, chainer.functions.flatten(dgx))\n\n if 1 in indexes:\n # deriative of gx\n shape = ggx[i].shape\n for ik, ig, idgx_df in six.moves.zip(k, g, dgx_df):\n ig = chainer.functions.broadcast_to(ig, shape)\n idgx_df = chainer.functions.broadcast_to(idgx_df, shape)\n gW = chainer.functions.scatter_add(\n gW, ik, ig * ggx[i] + idgx_df * ix)\n\n # derivative of gW\n gW = chainer.functions.scatter_add(\n gW, k,\n chainer.functions.matmul(dgW_df[:, None], ix[None, :]))\n\n if 2 in indexes:\n dgx_dg *= pos_neg_mask\n dggy = chainer.functions.sum((dgx_dg + dgW_dg) * sigf)\n if self.reduce == 'sum':\n ggy += dggy\n else:\n ggy = chainer.functions.scatter_add(ggy, i, dggy)\n\n ret = []\n if 0 in indexes:\n ret.append(gx)\n if 1 in indexes:\n ret.append(gW)\n if 2 in indexes:\n ret.append(ggy)\n return ret\n\n\ndef negative_sampling(x, t, W, sampler, sample_size, reduce='sum', **kwargs):\n \"\"\"negative_sampling(x, t, W, sampler, sample_size, reduce='sum', *, return_samples=False)\n\n Negative sampling loss function.\n\n In natural language processing, especially language modeling, the number of\n words in a vocabulary can be very large.\n Therefore, you need to spend a lot of time calculating the gradient of the\n embedding matrix.\n\n By using the negative sampling trick you only need to calculate the\n gradient for a few sampled negative examples.\n\n The loss is defined as follows.\n\n .. math::\n\n f(x, p) = - \\\\log \\\\sigma(x^\\\\top w_p) - \\\\\n k E_{i \\\\sim P(i)}[\\\\log \\\\sigma(- x^\\\\top w_i)]\n\n where :math:`\\\\sigma(\\\\cdot)` is a sigmoid function, :math:`w_i` is the\n weight vector for the word :math:`i`, and :math:`p` is a positive example.\n It is approximated with :math:`k` examples :math:`N` sampled from\n probability :math:`P(i)`.\n\n .. math::\n\n f(x, p) \\\\approx - \\\\log \\\\sigma(x^\\\\top w_p) - \\\\\n \\\\sum_{n \\\\in N} \\\\log \\\\sigma(-x^\\\\top w_n)\n\n Each sample of :math:`N` is drawn from the word distribution\n :math:`P(w) = \\\\frac{1}{Z} c(w)^\\\\alpha`, where :math:`c(w)` is the\n unigram count of the word :math:`w`, :math:`\\\\alpha` is a hyper-parameter,\n and :math:`Z` is the normalization constant.\n\n Args:\n x (~chainer.Variable): Batch of input vectors.\n t (~chainer.Variable): Vector of ground truth labels.\n W (~chainer.Variable): Weight matrix.\n sampler (~types.FunctionType): Sampling function. It takes a shape and\n returns an integer array of the shape. Each element of this array\n is a sample from the word distribution.\n A :class:`~chainer.utils.WalkerAlias` object built with the power\n distribution of word frequency is recommended.\n sample_size (int): Number of samples.\n reduce (str): Reduction option. Its value must be either\n ``'sum'`` or ``'no'``. Otherwise, :class:`ValueError` is raised.\n return_samples (bool):\n If ``True``, the sample array is also returned.\n The sample array is a\n :math:`(\\\\text{batch_size}, \\\\text{sample_size} + 1)`-array of\n integers whose first column is fixed to the ground truth labels\n and the other columns are drawn from the ``sampler``.\n\n Returns:\n ~chainer.Variable or tuple:\n If ``return_samples`` is ``False`` (default), the output\n variable holding the loss value(s) calculated by the\n above equation is returned. Otherwise, a tuple of the output\n variable and the sample array is returned.\n\n If ``reduce`` is ``'no'``, the output variable holds array\n whose shape is same as one of (hence both of) input variables.\n If it is ``'sum'``, the output variable holds a scalar value.\n\n See: `Distributed Representations of Words and Phrases and their\\\n Compositionality <https://arxiv.org/abs/1310.4546>`_\n\n .. seealso:: :class:`~chainer.links.NegativeSampling`.\n\n \"\"\" # NOQA\n return_samples = False\n if kwargs:\n return_samples, = argument.parse_kwargs(\n kwargs, ('return_samples', return_samples))\n\n func = NegativeSamplingFunction(sampler, sample_size, reduce)\n out = func.apply((x, t, W))[0]\n\n if return_samples:\n return out, func.samples\n return out\n",
"path": "chainer/functions/loss/negative_sampling.py"
}
] | diff --git a/chainer/functions/loss/negative_sampling.py b/chainer/functions/loss/negative_sampling.py
index f6d0c2bd9b90..9ec9a935127d 100644
--- a/chainer/functions/loss/negative_sampling.py
+++ b/chainer/functions/loss/negative_sampling.py
@@ -67,7 +67,7 @@ def forward_cpu(self, inputs):
loss[self.ignore_mask] = numpy.sum(numpy.logaddexp(wx, 0), axis=1)
if self.reduce == 'sum':
- loss = numpy.array(loss.sum(), 'f')
+ loss = numpy.array(loss.sum(), x.dtype)
self.samples = samples
return loss,
diff --git a/tests/chainer_tests/functions_tests/loss_tests/test_negative_sampling.py b/tests/chainer_tests/functions_tests/loss_tests/test_negative_sampling.py
index 13851e7efc57..12b838652bed 100644
--- a/tests/chainer_tests/functions_tests/loss_tests/test_negative_sampling.py
+++ b/tests/chainer_tests/functions_tests/loss_tests/test_negative_sampling.py
@@ -1,6 +1,7 @@
import unittest
import numpy
+import pytest
import six
import chainer
@@ -60,6 +61,7 @@ def setUp(self):
self.check_double_backward_options['dtype'] = numpy.float64
def check_forward(self, x_data, t_data, w_data, sampler):
+ batch_size = len(self.t)
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
w = chainer.Variable(w_data)
@@ -67,17 +69,24 @@ def check_forward(self, x_data, t_data, w_data, sampler):
# return_samples=False
y = functions.negative_sampling(
x, t, w, sampler, self.sample_size, reduce=self.reduce)
+ assert y.dtype == self.dtype
# return_samples=True
y_, samples = functions.negative_sampling(
x, t, w, sampler, self.sample_size, reduce=self.reduce,
return_samples=True)
+ xp = chainer.backend.get_array_module(x)
+ assert isinstance(samples, xp.ndarray)
+ assert samples.dtype == numpy.int32
+ assert samples.shape == (batch_size, self.sample_size + 1)
+
# Sampler is deterministic, so y and y_ should equal.
+ assert y.dtype == y_.dtype
numpy.testing.assert_array_equal(
cuda.to_cpu(y.array), cuda.to_cpu(y_.array))
- self.assertEqual(y.shape, self.gy.shape)
+ assert y.shape == self.gy.shape
samples = cuda.to_cpu(samples)
@@ -98,6 +107,7 @@ def check_forward(self, x_data, t_data, w_data, sampler):
if self.reduce == 'sum':
loss = loss.sum()
+ assert y.dtype == loss.dtype
testing.assert_allclose(y.data, loss, **self.check_forward_options)
def test_forward_cpu(self):
@@ -167,7 +177,7 @@ def check_invalid_option(self, xp):
t = xp.asarray(self.t)
w = xp.asarray(self.w)
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
negative_sampling.negative_sampling(
x, t, w, make_sampler(xp, 5), 2, reduce='invalid_option')
|
arviz-devs__arviz-1334 | Fix negative values in std
edit. There is an error in the numeric_utils.
This is a wrong order of operations
std_devs = np.diag(cov ** 0.5)
Correct order is
std_devs = np.diag(cov) ** 0.5
| [
{
"content": "\"\"\"Numerical utility functions for ArviZ.\"\"\"\nimport warnings\nimport numpy as np\nfrom scipy.signal import convolve, convolve2d\nfrom scipy.signal.windows import gaussian\nfrom scipy.sparse import coo_matrix\n\nfrom .stats.stats_utils import histogram\nfrom .utils import _stack, _dot, _cov\n\n\ndef _fast_kde(x, cumulative=False, bw=4.5, xmin=None, xmax=None):\n \"\"\"Fast Fourier transform-based Gaussian kernel density estimate (KDE).\n\n The code was adapted from https://github.com/mfouesneau/faststats\n\n Parameters\n ----------\n x : Numpy array or list\n cumulative : bool\n If true, estimate the cdf instead of the pdf\n bw : float\n Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the\n smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule\n of thumb (the default rule used by SciPy).\n xmin : float\n Manually set lower limit.\n xmax : float\n Manually set upper limit.\n\n Returns\n -------\n density: A gridded 1D KDE of the input points (x)\n xmin: minimum value of x\n xmax: maximum value of x\n \"\"\"\n x = np.asarray(x, dtype=float)\n x = x[np.isfinite(x)]\n if x.size == 0:\n warnings.warn(\"kde plot failed, you may want to check your data\")\n return np.array([np.nan]), np.nan, np.nan\n\n len_x = len(x)\n n_points = 200 if (xmin or xmax) is None else 500\n\n if xmin is None:\n xmin = np.min(x)\n if xmax is None:\n xmax = np.max(x)\n\n assert np.min(x) >= xmin\n assert np.max(x) <= xmax\n\n log_len_x = np.log(len_x) * bw\n\n n_bins = min(int(len_x ** (1 / 3) * log_len_x * 2), n_points)\n if n_bins < 2:\n warnings.warn(\"kde plot failed, you may want to check your data\")\n return np.array([np.nan]), np.nan, np.nan\n\n # hist, bin_edges = np.histogram(x, bins=n_bins, range=(xmin, xmax))\n # grid = hist / (hist.sum() * np.diff(bin_edges))\n\n _, grid, _ = histogram(x, n_bins, range_hist=(xmin, xmax))\n\n scotts_factor = len_x ** (-0.2)\n kern_nx = int(scotts_factor * 2 * np.pi * log_len_x)\n kernel = gaussian(kern_nx, scotts_factor * log_len_x)\n\n npad = min(n_bins, 2 * kern_nx)\n grid = np.concatenate([grid[npad:0:-1], grid, grid[n_bins : n_bins - npad : -1]])\n density = convolve(grid, kernel, mode=\"same\", method=\"direct\")[npad : npad + n_bins]\n norm_factor = (2 * np.pi * log_len_x ** 2 * scotts_factor ** 2) ** 0.5\n\n density /= norm_factor\n\n if cumulative:\n density = density.cumsum() / density.sum()\n\n return density, xmin, xmax\n\n\ndef _fast_kde_2d(x, y, gridsize=(128, 128), circular=False):\n \"\"\"\n 2D fft-based Gaussian kernel density estimate (KDE).\n\n The code was adapted from https://github.com/mfouesneau/faststats\n\n Parameters\n ----------\n x : Numpy array or list\n y : Numpy array or list\n gridsize : tuple\n Number of points used to discretize data. Use powers of 2 for fft optimization\n circular: bool\n If True, use circular boundaries. Defaults to False\n Returns\n -------\n grid: A gridded 2D KDE of the input points (x, y)\n xmin: minimum value of x\n xmax: maximum value of x\n ymin: minimum value of y\n ymax: maximum value of y\n \"\"\"\n x = np.asarray(x, dtype=float)\n x = x[np.isfinite(x)]\n y = np.asarray(y, dtype=float)\n y = y[np.isfinite(y)]\n\n xmin, xmax = x.min(), x.max()\n ymin, ymax = y.min(), y.max()\n\n len_x = len(x)\n weights = np.ones(len_x)\n n_x, n_y = gridsize\n\n d_x = (xmax - xmin) / (n_x - 1)\n d_y = (ymax - ymin) / (n_y - 1)\n\n xyi = _stack(x, y).T\n xyi -= [xmin, ymin]\n xyi /= [d_x, d_y]\n xyi = np.floor(xyi, xyi).T\n\n scotts_factor = len_x ** (-1 / 6)\n cov = _cov(xyi)\n std_devs = np.diag(cov ** 0.5)\n kern_nx, kern_ny = np.round(scotts_factor * 2 * np.pi * std_devs)\n\n inv_cov = np.linalg.inv(cov * scotts_factor ** 2)\n\n x_x = np.arange(kern_nx) - kern_nx / 2\n y_y = np.arange(kern_ny) - kern_ny / 2\n x_x, y_y = np.meshgrid(x_x, y_y)\n\n kernel = _stack(x_x.flatten(), y_y.flatten())\n kernel = _dot(inv_cov, kernel) * kernel\n kernel = np.exp(-kernel.sum(axis=0) / 2)\n kernel = kernel.reshape((int(kern_ny), int(kern_nx)))\n\n boundary = \"wrap\" if circular else \"symm\"\n\n grid = coo_matrix((weights, xyi), shape=(n_x, n_y)).toarray()\n grid = convolve2d(grid, kernel, mode=\"same\", boundary=boundary)\n\n norm_factor = np.linalg.det(2 * np.pi * cov * scotts_factor ** 2)\n norm_factor = len_x * d_x * d_y * norm_factor ** 0.5\n\n grid /= norm_factor\n\n return grid, xmin, xmax, ymin, ymax\n\n\ndef get_bins(values):\n \"\"\"\n Automatically compute the number of bins for discrete variables.\n\n Parameters\n ----------\n values = numpy array\n values\n\n Returns\n -------\n array with the bins\n\n Notes\n -----\n Computes the width of the bins by taking the maximun of the Sturges and the Freedman-Diaconis\n estimators. Acording to numpy `np.histogram` this provides good all around performance.\n\n The Sturges is a very simplistic estimator based on the assumption of normality of the data.\n This estimator has poor performance for non-normal data, which becomes especially obvious for\n large data sets. The estimate depends only on size of the data.\n\n The Freedman-Diaconis rule uses interquartile range (IQR) to estimate the binwidth.\n It is considered a robusts version of the Scott rule as the IQR is less affected by outliers\n than the standard deviation. However, the IQR depends on fewer points than the standard\n deviation, so it is less accurate, especially for long tailed distributions.\n \"\"\"\n x_min = values.min().astype(int)\n x_max = values.max().astype(int)\n\n # Sturges histogram bin estimator\n bins_sturges = (x_max - x_min) / (np.log2(values.size) + 1)\n\n # The Freedman-Diaconis histogram bin estimator.\n iqr = np.subtract(*np.percentile(values, [75, 25])) # pylint: disable=assignment-from-no-return\n bins_fd = 2 * iqr * values.size ** (-1 / 3)\n\n width = np.round(np.max([1, bins_sturges, bins_fd])).astype(int)\n\n return np.arange(x_min, x_max + width + 1, width)\n\n\ndef _sturges_formula(dataset, mult=1):\n \"\"\"Use Sturges' formula to determine number of bins.\n\n See https://en.wikipedia.org/wiki/Histogram#Sturges'_formula\n or https://doi.org/10.1080%2F01621459.1926.10502161\n\n Parameters\n ----------\n dataset: xarray.DataSet\n Must have the `draw` dimension\n\n mult: float\n Used to scale the number of bins up or down. Default is 1 for Sturges' formula.\n\n Returns\n -------\n int\n Number of bins to use\n \"\"\"\n return int(np.ceil(mult * np.log2(dataset.draw.size)) + 1)\n",
"path": "arviz/numeric_utils.py"
}
] | [
{
"content": "\"\"\"Numerical utility functions for ArviZ.\"\"\"\nimport warnings\nimport numpy as np\nfrom scipy.signal import convolve, convolve2d\nfrom scipy.signal.windows import gaussian\nfrom scipy.sparse import coo_matrix\n\nfrom .stats.stats_utils import histogram\nfrom .utils import _stack, _dot, _cov\n\n\ndef _fast_kde(x, cumulative=False, bw=4.5, xmin=None, xmax=None):\n \"\"\"Fast Fourier transform-based Gaussian kernel density estimate (KDE).\n\n The code was adapted from https://github.com/mfouesneau/faststats\n\n Parameters\n ----------\n x : Numpy array or list\n cumulative : bool\n If true, estimate the cdf instead of the pdf\n bw : float\n Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the\n smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule\n of thumb (the default rule used by SciPy).\n xmin : float\n Manually set lower limit.\n xmax : float\n Manually set upper limit.\n\n Returns\n -------\n density: A gridded 1D KDE of the input points (x)\n xmin: minimum value of x\n xmax: maximum value of x\n \"\"\"\n x = np.asarray(x, dtype=float)\n x = x[np.isfinite(x)]\n if x.size == 0:\n warnings.warn(\"kde plot failed, you may want to check your data\")\n return np.array([np.nan]), np.nan, np.nan\n\n len_x = len(x)\n n_points = 200 if (xmin or xmax) is None else 500\n\n if xmin is None:\n xmin = np.min(x)\n if xmax is None:\n xmax = np.max(x)\n\n assert np.min(x) >= xmin\n assert np.max(x) <= xmax\n\n log_len_x = np.log(len_x) * bw\n\n n_bins = min(int(len_x ** (1 / 3) * log_len_x * 2), n_points)\n if n_bins < 2:\n warnings.warn(\"kde plot failed, you may want to check your data\")\n return np.array([np.nan]), np.nan, np.nan\n\n # hist, bin_edges = np.histogram(x, bins=n_bins, range=(xmin, xmax))\n # grid = hist / (hist.sum() * np.diff(bin_edges))\n\n _, grid, _ = histogram(x, n_bins, range_hist=(xmin, xmax))\n\n scotts_factor = len_x ** (-0.2)\n kern_nx = int(scotts_factor * 2 * np.pi * log_len_x)\n kernel = gaussian(kern_nx, scotts_factor * log_len_x)\n\n npad = min(n_bins, 2 * kern_nx)\n grid = np.concatenate([grid[npad:0:-1], grid, grid[n_bins : n_bins - npad : -1]])\n density = convolve(grid, kernel, mode=\"same\", method=\"direct\")[npad : npad + n_bins]\n norm_factor = (2 * np.pi * log_len_x ** 2 * scotts_factor ** 2) ** 0.5\n\n density /= norm_factor\n\n if cumulative:\n density = density.cumsum() / density.sum()\n\n return density, xmin, xmax\n\n\ndef _fast_kde_2d(x, y, gridsize=(128, 128), circular=False):\n \"\"\"\n 2D fft-based Gaussian kernel density estimate (KDE).\n\n The code was adapted from https://github.com/mfouesneau/faststats\n\n Parameters\n ----------\n x : Numpy array or list\n y : Numpy array or list\n gridsize : tuple\n Number of points used to discretize data. Use powers of 2 for fft optimization\n circular: bool\n If True, use circular boundaries. Defaults to False\n Returns\n -------\n grid: A gridded 2D KDE of the input points (x, y)\n xmin: minimum value of x\n xmax: maximum value of x\n ymin: minimum value of y\n ymax: maximum value of y\n \"\"\"\n x = np.asarray(x, dtype=float)\n x = x[np.isfinite(x)]\n y = np.asarray(y, dtype=float)\n y = y[np.isfinite(y)]\n\n xmin, xmax = x.min(), x.max()\n ymin, ymax = y.min(), y.max()\n\n len_x = len(x)\n weights = np.ones(len_x)\n n_x, n_y = gridsize\n\n d_x = (xmax - xmin) / (n_x - 1)\n d_y = (ymax - ymin) / (n_y - 1)\n\n xyi = _stack(x, y).T\n xyi -= [xmin, ymin]\n xyi /= [d_x, d_y]\n xyi = np.floor(xyi, xyi).T\n\n scotts_factor = len_x ** (-1 / 6)\n cov = _cov(xyi)\n std_devs = np.diag(cov) ** 0.5\n kern_nx, kern_ny = np.round(scotts_factor * 2 * np.pi * std_devs)\n\n inv_cov = np.linalg.inv(cov * scotts_factor ** 2)\n\n x_x = np.arange(kern_nx) - kern_nx / 2\n y_y = np.arange(kern_ny) - kern_ny / 2\n x_x, y_y = np.meshgrid(x_x, y_y)\n\n kernel = _stack(x_x.flatten(), y_y.flatten())\n kernel = _dot(inv_cov, kernel) * kernel\n kernel = np.exp(-kernel.sum(axis=0) / 2)\n kernel = kernel.reshape((int(kern_ny), int(kern_nx)))\n\n boundary = \"wrap\" if circular else \"symm\"\n\n grid = coo_matrix((weights, xyi), shape=(n_x, n_y)).toarray()\n grid = convolve2d(grid, kernel, mode=\"same\", boundary=boundary)\n\n norm_factor = np.linalg.det(2 * np.pi * cov * scotts_factor ** 2)\n norm_factor = len_x * d_x * d_y * norm_factor ** 0.5\n\n grid /= norm_factor\n\n return grid, xmin, xmax, ymin, ymax\n\n\ndef get_bins(values):\n \"\"\"\n Automatically compute the number of bins for discrete variables.\n\n Parameters\n ----------\n values = numpy array\n values\n\n Returns\n -------\n array with the bins\n\n Notes\n -----\n Computes the width of the bins by taking the maximun of the Sturges and the Freedman-Diaconis\n estimators. Acording to numpy `np.histogram` this provides good all around performance.\n\n The Sturges is a very simplistic estimator based on the assumption of normality of the data.\n This estimator has poor performance for non-normal data, which becomes especially obvious for\n large data sets. The estimate depends only on size of the data.\n\n The Freedman-Diaconis rule uses interquartile range (IQR) to estimate the binwidth.\n It is considered a robusts version of the Scott rule as the IQR is less affected by outliers\n than the standard deviation. However, the IQR depends on fewer points than the standard\n deviation, so it is less accurate, especially for long tailed distributions.\n \"\"\"\n x_min = values.min().astype(int)\n x_max = values.max().astype(int)\n\n # Sturges histogram bin estimator\n bins_sturges = (x_max - x_min) / (np.log2(values.size) + 1)\n\n # The Freedman-Diaconis histogram bin estimator.\n iqr = np.subtract(*np.percentile(values, [75, 25])) # pylint: disable=assignment-from-no-return\n bins_fd = 2 * iqr * values.size ** (-1 / 3)\n\n width = np.round(np.max([1, bins_sturges, bins_fd])).astype(int)\n\n return np.arange(x_min, x_max + width + 1, width)\n\n\ndef _sturges_formula(dataset, mult=1):\n \"\"\"Use Sturges' formula to determine number of bins.\n\n See https://en.wikipedia.org/wiki/Histogram#Sturges'_formula\n or https://doi.org/10.1080%2F01621459.1926.10502161\n\n Parameters\n ----------\n dataset: xarray.DataSet\n Must have the `draw` dimension\n\n mult: float\n Used to scale the number of bins up or down. Default is 1 for Sturges' formula.\n\n Returns\n -------\n int\n Number of bins to use\n \"\"\"\n return int(np.ceil(mult * np.log2(dataset.draw.size)) + 1)\n",
"path": "arviz/numeric_utils.py"
}
] | diff --git a/arviz/numeric_utils.py b/arviz/numeric_utils.py
index 6731265a86..c849b251d0 100644
--- a/arviz/numeric_utils.py
+++ b/arviz/numeric_utils.py
@@ -124,7 +124,7 @@ def _fast_kde_2d(x, y, gridsize=(128, 128), circular=False):
scotts_factor = len_x ** (-1 / 6)
cov = _cov(xyi)
- std_devs = np.diag(cov ** 0.5)
+ std_devs = np.diag(cov) ** 0.5
kern_nx, kern_ny = np.round(scotts_factor * 2 * np.pi * std_devs)
inv_cov = np.linalg.inv(cov * scotts_factor ** 2)
|
encode__uvicorn-1534 | Latest version breaks something in the h11 implementation
I'm using uvicorn to run a FastAPI application. I just download the latest uvicorn version 0.18.0 and got the following error when trying to run my app:
```
Fatal error: protocol.data_received() call failed.
protocol: <uvicorn.protocols.http.h11_impl.H11Protocol object at 0x7fcbc87089a0>
transport: <_SelectorSocketTransport fd=11 read=polling write=<idle, bufsize=0>>
Traceback (most recent call last):
File "/.../python3.9/asyncio/selector_events.py", line 870, in _read_ready__data_received
self._protocol.data_received(data)
File "/.../venv/lib/python3.9/site-packages/uvicorn/protocols/http/h11_impl.py", line 161, in data_received
self.handle_events()
File "/.../venv/lib/python3.9/site-packages/uvicorn/protocols/http/h11_impl.py", line 166, in handle_events
event = self.conn.next_event()
File "/.../venv/lib/python3.9/site-packages/h11/_connection.py", line 471, in next_event
if len(self._receive_buffer) > self._max_incomplete_event_size:
TypeError: '>' not supported between instances of 'int' and 'NoneType'
```
(I removed the full paths)
When going back to version 0.17.0, I don't encounter this error...
Please let me know if there is more details I can share.
| [
{
"content": "import logging\nimport os\nimport platform\nimport ssl\nimport sys\nimport typing\n\nimport click\nfrom h11._connection import DEFAULT_MAX_INCOMPLETE_EVENT_SIZE\n\nimport uvicorn\nfrom uvicorn.config import (\n HTTP_PROTOCOLS,\n INTERFACES,\n LIFESPAN,\n LOG_LEVELS,\n LOGGING_CONFIG,\n LOOP_SETUPS,\n SSL_PROTOCOL_VERSION,\n WS_PROTOCOLS,\n Config,\n HTTPProtocolType,\n InterfaceType,\n LifespanType,\n LoopSetupType,\n WSProtocolType,\n)\nfrom uvicorn.server import Server, ServerState # noqa: F401 # Used to be defined here.\nfrom uvicorn.supervisors import ChangeReload, Multiprocess\n\nif typing.TYPE_CHECKING:\n from asgiref.typing import ASGIApplication\n\nLEVEL_CHOICES = click.Choice(list(LOG_LEVELS.keys()))\nHTTP_CHOICES = click.Choice(list(HTTP_PROTOCOLS.keys()))\nWS_CHOICES = click.Choice(list(WS_PROTOCOLS.keys()))\nLIFESPAN_CHOICES = click.Choice(list(LIFESPAN.keys()))\nLOOP_CHOICES = click.Choice([key for key in LOOP_SETUPS.keys() if key != \"none\"])\nINTERFACE_CHOICES = click.Choice(INTERFACES)\n\nSTARTUP_FAILURE = 3\n\nlogger = logging.getLogger(\"uvicorn.error\")\n\n\ndef print_version(ctx: click.Context, param: click.Parameter, value: bool) -> None:\n if not value or ctx.resilient_parsing:\n return\n click.echo(\n \"Running uvicorn %s with %s %s on %s\"\n % (\n uvicorn.__version__,\n platform.python_implementation(),\n platform.python_version(),\n platform.system(),\n )\n )\n ctx.exit()\n\n\[email protected](context_settings={\"auto_envvar_prefix\": \"UVICORN\"})\[email protected](\"app\")\[email protected](\n \"--host\",\n type=str,\n default=\"127.0.0.1\",\n help=\"Bind socket to this host.\",\n show_default=True,\n)\[email protected](\n \"--port\",\n type=int,\n default=8000,\n help=\"Bind socket to this port.\",\n show_default=True,\n)\[email protected](\"--uds\", type=str, default=None, help=\"Bind to a UNIX domain socket.\")\[email protected](\n \"--fd\", type=int, default=None, help=\"Bind to socket from this file descriptor.\"\n)\[email protected](\n \"--debug\", is_flag=True, default=False, help=\"Enable debug mode.\", hidden=True\n)\[email protected](\"--reload\", is_flag=True, default=False, help=\"Enable auto-reload.\")\[email protected](\n \"--reload-dir\",\n \"reload_dirs\",\n multiple=True,\n help=\"Set reload directories explicitly, instead of using the current working\"\n \" directory.\",\n type=click.Path(exists=True),\n)\[email protected](\n \"--reload-include\",\n \"reload_includes\",\n multiple=True,\n help=\"Set glob patterns to include while watching for files. Includes '*.py' \"\n \"by default; these defaults can be overridden with `--reload-exclude`. \"\n \"This option has no effect unless watchfiles is installed.\",\n)\[email protected](\n \"--reload-exclude\",\n \"reload_excludes\",\n multiple=True,\n help=\"Set glob patterns to exclude while watching for files. Includes \"\n \"'.*, .py[cod], .sw.*, ~*' by default; these defaults can be overridden \"\n \"with `--reload-include`. This option has no effect unless watchfiles is \"\n \"installed.\",\n)\[email protected](\n \"--reload-delay\",\n type=float,\n default=0.25,\n show_default=True,\n help=\"Delay between previous and next check if application needs to be.\"\n \" Defaults to 0.25s.\",\n)\[email protected](\n \"--workers\",\n default=None,\n type=int,\n help=\"Number of worker processes. Defaults to the $WEB_CONCURRENCY environment\"\n \" variable if available, or 1. Not valid with --reload.\",\n)\[email protected](\n \"--loop\",\n type=LOOP_CHOICES,\n default=\"auto\",\n help=\"Event loop implementation.\",\n show_default=True,\n)\[email protected](\n \"--http\",\n type=HTTP_CHOICES,\n default=\"auto\",\n help=\"HTTP protocol implementation.\",\n show_default=True,\n)\[email protected](\n \"--ws\",\n type=WS_CHOICES,\n default=\"auto\",\n help=\"WebSocket protocol implementation.\",\n show_default=True,\n)\[email protected](\n \"--ws-max-size\",\n type=int,\n default=16777216,\n help=\"WebSocket max size message in bytes\",\n show_default=True,\n)\[email protected](\n \"--ws-ping-interval\",\n type=float,\n default=20.0,\n help=\"WebSocket ping interval\",\n show_default=True,\n)\[email protected](\n \"--ws-ping-timeout\",\n type=float,\n default=20.0,\n help=\"WebSocket ping timeout\",\n show_default=True,\n)\[email protected](\n \"--ws-per-message-deflate\",\n type=bool,\n default=True,\n help=\"WebSocket per-message-deflate compression\",\n show_default=True,\n)\[email protected](\n \"--lifespan\",\n type=LIFESPAN_CHOICES,\n default=\"auto\",\n help=\"Lifespan implementation.\",\n show_default=True,\n)\[email protected](\n \"--interface\",\n type=INTERFACE_CHOICES,\n default=\"auto\",\n help=\"Select ASGI3, ASGI2, or WSGI as the application interface.\",\n show_default=True,\n)\[email protected](\n \"--env-file\",\n type=click.Path(exists=True),\n default=None,\n help=\"Environment configuration file.\",\n show_default=True,\n)\[email protected](\n \"--log-config\",\n type=click.Path(exists=True),\n default=None,\n help=\"Logging configuration file. Supported formats: .ini, .json, .yaml.\",\n show_default=True,\n)\[email protected](\n \"--log-level\",\n type=LEVEL_CHOICES,\n default=None,\n help=\"Log level. [default: info]\",\n show_default=True,\n)\[email protected](\n \"--access-log/--no-access-log\",\n is_flag=True,\n default=True,\n help=\"Enable/Disable access log.\",\n)\[email protected](\n \"--use-colors/--no-use-colors\",\n is_flag=True,\n default=None,\n help=\"Enable/Disable colorized logging.\",\n)\[email protected](\n \"--proxy-headers/--no-proxy-headers\",\n is_flag=True,\n default=True,\n help=\"Enable/Disable X-Forwarded-Proto, X-Forwarded-For, X-Forwarded-Port to \"\n \"populate remote address info.\",\n)\[email protected](\n \"--server-header/--no-server-header\",\n is_flag=True,\n default=True,\n help=\"Enable/Disable default Server header.\",\n)\[email protected](\n \"--date-header/--no-date-header\",\n is_flag=True,\n default=True,\n help=\"Enable/Disable default Date header.\",\n)\[email protected](\n \"--forwarded-allow-ips\",\n type=str,\n default=None,\n help=\"Comma separated list of IPs to trust with proxy headers. Defaults to\"\n \" the $FORWARDED_ALLOW_IPS environment variable if available, or '127.0.0.1'.\",\n)\[email protected](\n \"--root-path\",\n type=str,\n default=\"\",\n help=\"Set the ASGI 'root_path' for applications submounted below a given URL path.\",\n)\[email protected](\n \"--limit-concurrency\",\n type=int,\n default=None,\n help=\"Maximum number of concurrent connections or tasks to allow, before issuing\"\n \" HTTP 503 responses.\",\n)\[email protected](\n \"--backlog\",\n type=int,\n default=2048,\n help=\"Maximum number of connections to hold in backlog\",\n)\[email protected](\n \"--limit-max-requests\",\n type=int,\n default=None,\n help=\"Maximum number of requests to service before terminating the process.\",\n)\[email protected](\n \"--timeout-keep-alive\",\n type=int,\n default=5,\n help=\"Close Keep-Alive connections if no new data is received within this timeout.\",\n show_default=True,\n)\[email protected](\n \"--ssl-keyfile\", type=str, default=None, help=\"SSL key file\", show_default=True\n)\[email protected](\n \"--ssl-certfile\",\n type=str,\n default=None,\n help=\"SSL certificate file\",\n show_default=True,\n)\[email protected](\n \"--ssl-keyfile-password\",\n type=str,\n default=None,\n help=\"SSL keyfile password\",\n show_default=True,\n)\[email protected](\n \"--ssl-version\",\n type=int,\n default=int(SSL_PROTOCOL_VERSION),\n help=\"SSL version to use (see stdlib ssl module's)\",\n show_default=True,\n)\[email protected](\n \"--ssl-cert-reqs\",\n type=int,\n default=int(ssl.CERT_NONE),\n help=\"Whether client certificate is required (see stdlib ssl module's)\",\n show_default=True,\n)\[email protected](\n \"--ssl-ca-certs\",\n type=str,\n default=None,\n help=\"CA certificates file\",\n show_default=True,\n)\[email protected](\n \"--ssl-ciphers\",\n type=str,\n default=\"TLSv1\",\n help=\"Ciphers to use (see stdlib ssl module's)\",\n show_default=True,\n)\[email protected](\n \"--header\",\n \"headers\",\n multiple=True,\n help=\"Specify custom default HTTP response headers as a Name:Value pair\",\n)\[email protected](\n \"--version\",\n is_flag=True,\n callback=print_version,\n expose_value=False,\n is_eager=True,\n help=\"Display the uvicorn version and exit.\",\n)\[email protected](\n \"--app-dir\",\n default=\".\",\n show_default=True,\n help=\"Look for APP in the specified directory, by adding this to the PYTHONPATH.\"\n \" Defaults to the current working directory.\",\n)\[email protected](\n \"--h11-max-incomplete-event-size\",\n \"h11_max_incomplete_event_size\",\n type=int,\n default=None,\n help=\"For h11, the maximum number of bytes to buffer of an incomplete event.\",\n)\[email protected](\n \"--factory\",\n is_flag=True,\n default=False,\n help=\"Treat APP as an application factory, i.e. a () -> <ASGI app> callable.\",\n show_default=True,\n)\ndef main(\n app: str,\n host: str,\n port: int,\n uds: str,\n fd: int,\n loop: LoopSetupType,\n http: HTTPProtocolType,\n ws: WSProtocolType,\n ws_max_size: int,\n ws_ping_interval: float,\n ws_ping_timeout: float,\n ws_per_message_deflate: bool,\n lifespan: LifespanType,\n interface: InterfaceType,\n debug: bool,\n reload: bool,\n reload_dirs: typing.List[str],\n reload_includes: typing.List[str],\n reload_excludes: typing.List[str],\n reload_delay: float,\n workers: int,\n env_file: str,\n log_config: str,\n log_level: str,\n access_log: bool,\n proxy_headers: bool,\n server_header: bool,\n date_header: bool,\n forwarded_allow_ips: str,\n root_path: str,\n limit_concurrency: int,\n backlog: int,\n limit_max_requests: int,\n timeout_keep_alive: int,\n ssl_keyfile: str,\n ssl_certfile: str,\n ssl_keyfile_password: str,\n ssl_version: int,\n ssl_cert_reqs: int,\n ssl_ca_certs: str,\n ssl_ciphers: str,\n headers: typing.List[str],\n use_colors: bool,\n app_dir: str,\n h11_max_incomplete_event_size: int,\n factory: bool,\n) -> None:\n run(\n app,\n host=host,\n port=port,\n uds=uds,\n fd=fd,\n loop=loop,\n http=http,\n ws=ws,\n ws_max_size=ws_max_size,\n ws_ping_interval=ws_ping_interval,\n ws_ping_timeout=ws_ping_timeout,\n ws_per_message_deflate=ws_per_message_deflate,\n lifespan=lifespan,\n env_file=env_file,\n log_config=LOGGING_CONFIG if log_config is None else log_config,\n log_level=log_level,\n access_log=access_log,\n interface=interface,\n debug=debug,\n reload=reload,\n reload_dirs=reload_dirs if reload_dirs else None,\n reload_includes=reload_includes if reload_includes else None,\n reload_excludes=reload_excludes if reload_excludes else None,\n reload_delay=reload_delay,\n workers=workers,\n proxy_headers=proxy_headers,\n server_header=server_header,\n date_header=date_header,\n forwarded_allow_ips=forwarded_allow_ips,\n root_path=root_path,\n limit_concurrency=limit_concurrency,\n backlog=backlog,\n limit_max_requests=limit_max_requests,\n timeout_keep_alive=timeout_keep_alive,\n ssl_keyfile=ssl_keyfile,\n ssl_certfile=ssl_certfile,\n ssl_keyfile_password=ssl_keyfile_password,\n ssl_version=ssl_version,\n ssl_cert_reqs=ssl_cert_reqs,\n ssl_ca_certs=ssl_ca_certs,\n ssl_ciphers=ssl_ciphers,\n headers=[header.split(\":\", 1) for header in headers], # type: ignore[misc]\n use_colors=use_colors,\n factory=factory,\n app_dir=app_dir,\n h11_max_incomplete_event_size=h11_max_incomplete_event_size,\n )\n\n\ndef run(\n app: typing.Union[\"ASGIApplication\", str],\n *,\n host: str = \"127.0.0.1\",\n port: int = 8000,\n uds: typing.Optional[str] = None,\n fd: typing.Optional[int] = None,\n loop: LoopSetupType = \"auto\",\n http: HTTPProtocolType = \"auto\",\n ws: WSProtocolType = \"auto\",\n ws_max_size: int = 16777216,\n ws_ping_interval: float = 20.0,\n ws_ping_timeout: float = 20.0,\n ws_per_message_deflate: bool = True,\n lifespan: LifespanType = \"auto\",\n interface: InterfaceType = \"auto\",\n debug: bool = False,\n reload: bool = False,\n reload_dirs: typing.Optional[typing.List[str]] = None,\n reload_includes: typing.Optional[typing.List[str]] = None,\n reload_excludes: typing.Optional[typing.List[str]] = None,\n reload_delay: float = 0.25,\n workers: typing.Optional[int] = None,\n env_file: typing.Optional[str] = None,\n log_config: typing.Optional[typing.Union[dict, str]] = None,\n log_level: typing.Optional[str] = None,\n access_log: bool = True,\n proxy_headers: bool = True,\n server_header: bool = True,\n date_header: bool = True,\n forwarded_allow_ips: typing.Optional[str] = None,\n root_path: str = \"\",\n limit_concurrency: typing.Optional[int] = None,\n backlog: int = 2048,\n limit_max_requests: typing.Optional[int] = None,\n timeout_keep_alive: int = 5,\n ssl_keyfile: typing.Optional[str] = None,\n ssl_certfile: typing.Optional[str] = None,\n ssl_keyfile_password: typing.Optional[str] = None,\n ssl_version: int = int(SSL_PROTOCOL_VERSION),\n ssl_cert_reqs: int = int(ssl.CERT_NONE),\n ssl_ca_certs: typing.Optional[str] = None,\n ssl_ciphers: str = \"TLSv1\",\n headers: typing.Optional[typing.List[typing.Tuple[str, str]]] = None,\n use_colors: typing.Optional[bool] = None,\n app_dir: typing.Optional[str] = None,\n factory: bool = False,\n h11_max_incomplete_event_size: int = DEFAULT_MAX_INCOMPLETE_EVENT_SIZE,\n) -> None:\n if app_dir is not None:\n sys.path.insert(0, app_dir)\n\n config = Config(\n app,\n host=host,\n port=port,\n uds=uds,\n fd=fd,\n loop=loop,\n http=http,\n ws=ws,\n ws_max_size=ws_max_size,\n ws_ping_interval=ws_ping_interval,\n ws_ping_timeout=ws_ping_timeout,\n ws_per_message_deflate=ws_per_message_deflate,\n lifespan=lifespan,\n interface=interface,\n debug=debug,\n reload=reload,\n reload_dirs=reload_dirs,\n reload_includes=reload_includes,\n reload_excludes=reload_excludes,\n reload_delay=reload_delay,\n workers=workers,\n env_file=env_file,\n log_config=log_config,\n log_level=log_level,\n access_log=access_log,\n proxy_headers=proxy_headers,\n server_header=server_header,\n date_header=date_header,\n forwarded_allow_ips=forwarded_allow_ips,\n root_path=root_path,\n limit_concurrency=limit_concurrency,\n backlog=backlog,\n limit_max_requests=limit_max_requests,\n timeout_keep_alive=timeout_keep_alive,\n ssl_keyfile=ssl_keyfile,\n ssl_certfile=ssl_certfile,\n ssl_keyfile_password=ssl_keyfile_password,\n ssl_version=ssl_version,\n ssl_cert_reqs=ssl_cert_reqs,\n ssl_ca_certs=ssl_ca_certs,\n ssl_ciphers=ssl_ciphers,\n headers=headers,\n use_colors=use_colors,\n factory=factory,\n h11_max_incomplete_event_size=h11_max_incomplete_event_size,\n )\n server = Server(config=config)\n\n if (config.reload or config.workers > 1) and not isinstance(app, str):\n logger = logging.getLogger(\"uvicorn.error\")\n logger.warning(\n \"You must pass the application as an import string to enable 'reload' or \"\n \"'workers'.\"\n )\n sys.exit(1)\n\n if config.should_reload:\n sock = config.bind_socket()\n ChangeReload(config, target=server.run, sockets=[sock]).run()\n elif config.workers > 1:\n sock = config.bind_socket()\n Multiprocess(config, target=server.run, sockets=[sock]).run()\n else:\n server.run()\n if config.uds:\n os.remove(config.uds) # pragma: py-win32\n\n if not server.started and not config.should_reload and config.workers == 1:\n sys.exit(STARTUP_FAILURE)\n\n\nif __name__ == \"__main__\":\n main() # pragma: no cover\n",
"path": "uvicorn/main.py"
}
] | [
{
"content": "import logging\nimport os\nimport platform\nimport ssl\nimport sys\nimport typing\n\nimport click\nfrom h11._connection import DEFAULT_MAX_INCOMPLETE_EVENT_SIZE\n\nimport uvicorn\nfrom uvicorn.config import (\n HTTP_PROTOCOLS,\n INTERFACES,\n LIFESPAN,\n LOG_LEVELS,\n LOGGING_CONFIG,\n LOOP_SETUPS,\n SSL_PROTOCOL_VERSION,\n WS_PROTOCOLS,\n Config,\n HTTPProtocolType,\n InterfaceType,\n LifespanType,\n LoopSetupType,\n WSProtocolType,\n)\nfrom uvicorn.server import Server, ServerState # noqa: F401 # Used to be defined here.\nfrom uvicorn.supervisors import ChangeReload, Multiprocess\n\nif typing.TYPE_CHECKING:\n from asgiref.typing import ASGIApplication\n\nLEVEL_CHOICES = click.Choice(list(LOG_LEVELS.keys()))\nHTTP_CHOICES = click.Choice(list(HTTP_PROTOCOLS.keys()))\nWS_CHOICES = click.Choice(list(WS_PROTOCOLS.keys()))\nLIFESPAN_CHOICES = click.Choice(list(LIFESPAN.keys()))\nLOOP_CHOICES = click.Choice([key for key in LOOP_SETUPS.keys() if key != \"none\"])\nINTERFACE_CHOICES = click.Choice(INTERFACES)\n\nSTARTUP_FAILURE = 3\n\nlogger = logging.getLogger(\"uvicorn.error\")\n\n\ndef print_version(ctx: click.Context, param: click.Parameter, value: bool) -> None:\n if not value or ctx.resilient_parsing:\n return\n click.echo(\n \"Running uvicorn %s with %s %s on %s\"\n % (\n uvicorn.__version__,\n platform.python_implementation(),\n platform.python_version(),\n platform.system(),\n )\n )\n ctx.exit()\n\n\[email protected](context_settings={\"auto_envvar_prefix\": \"UVICORN\"})\[email protected](\"app\")\[email protected](\n \"--host\",\n type=str,\n default=\"127.0.0.1\",\n help=\"Bind socket to this host.\",\n show_default=True,\n)\[email protected](\n \"--port\",\n type=int,\n default=8000,\n help=\"Bind socket to this port.\",\n show_default=True,\n)\[email protected](\"--uds\", type=str, default=None, help=\"Bind to a UNIX domain socket.\")\[email protected](\n \"--fd\", type=int, default=None, help=\"Bind to socket from this file descriptor.\"\n)\[email protected](\n \"--debug\", is_flag=True, default=False, help=\"Enable debug mode.\", hidden=True\n)\[email protected](\"--reload\", is_flag=True, default=False, help=\"Enable auto-reload.\")\[email protected](\n \"--reload-dir\",\n \"reload_dirs\",\n multiple=True,\n help=\"Set reload directories explicitly, instead of using the current working\"\n \" directory.\",\n type=click.Path(exists=True),\n)\[email protected](\n \"--reload-include\",\n \"reload_includes\",\n multiple=True,\n help=\"Set glob patterns to include while watching for files. Includes '*.py' \"\n \"by default; these defaults can be overridden with `--reload-exclude`. \"\n \"This option has no effect unless watchfiles is installed.\",\n)\[email protected](\n \"--reload-exclude\",\n \"reload_excludes\",\n multiple=True,\n help=\"Set glob patterns to exclude while watching for files. Includes \"\n \"'.*, .py[cod], .sw.*, ~*' by default; these defaults can be overridden \"\n \"with `--reload-include`. This option has no effect unless watchfiles is \"\n \"installed.\",\n)\[email protected](\n \"--reload-delay\",\n type=float,\n default=0.25,\n show_default=True,\n help=\"Delay between previous and next check if application needs to be.\"\n \" Defaults to 0.25s.\",\n)\[email protected](\n \"--workers\",\n default=None,\n type=int,\n help=\"Number of worker processes. Defaults to the $WEB_CONCURRENCY environment\"\n \" variable if available, or 1. Not valid with --reload.\",\n)\[email protected](\n \"--loop\",\n type=LOOP_CHOICES,\n default=\"auto\",\n help=\"Event loop implementation.\",\n show_default=True,\n)\[email protected](\n \"--http\",\n type=HTTP_CHOICES,\n default=\"auto\",\n help=\"HTTP protocol implementation.\",\n show_default=True,\n)\[email protected](\n \"--ws\",\n type=WS_CHOICES,\n default=\"auto\",\n help=\"WebSocket protocol implementation.\",\n show_default=True,\n)\[email protected](\n \"--ws-max-size\",\n type=int,\n default=16777216,\n help=\"WebSocket max size message in bytes\",\n show_default=True,\n)\[email protected](\n \"--ws-ping-interval\",\n type=float,\n default=20.0,\n help=\"WebSocket ping interval\",\n show_default=True,\n)\[email protected](\n \"--ws-ping-timeout\",\n type=float,\n default=20.0,\n help=\"WebSocket ping timeout\",\n show_default=True,\n)\[email protected](\n \"--ws-per-message-deflate\",\n type=bool,\n default=True,\n help=\"WebSocket per-message-deflate compression\",\n show_default=True,\n)\[email protected](\n \"--lifespan\",\n type=LIFESPAN_CHOICES,\n default=\"auto\",\n help=\"Lifespan implementation.\",\n show_default=True,\n)\[email protected](\n \"--interface\",\n type=INTERFACE_CHOICES,\n default=\"auto\",\n help=\"Select ASGI3, ASGI2, or WSGI as the application interface.\",\n show_default=True,\n)\[email protected](\n \"--env-file\",\n type=click.Path(exists=True),\n default=None,\n help=\"Environment configuration file.\",\n show_default=True,\n)\[email protected](\n \"--log-config\",\n type=click.Path(exists=True),\n default=None,\n help=\"Logging configuration file. Supported formats: .ini, .json, .yaml.\",\n show_default=True,\n)\[email protected](\n \"--log-level\",\n type=LEVEL_CHOICES,\n default=None,\n help=\"Log level. [default: info]\",\n show_default=True,\n)\[email protected](\n \"--access-log/--no-access-log\",\n is_flag=True,\n default=True,\n help=\"Enable/Disable access log.\",\n)\[email protected](\n \"--use-colors/--no-use-colors\",\n is_flag=True,\n default=None,\n help=\"Enable/Disable colorized logging.\",\n)\[email protected](\n \"--proxy-headers/--no-proxy-headers\",\n is_flag=True,\n default=True,\n help=\"Enable/Disable X-Forwarded-Proto, X-Forwarded-For, X-Forwarded-Port to \"\n \"populate remote address info.\",\n)\[email protected](\n \"--server-header/--no-server-header\",\n is_flag=True,\n default=True,\n help=\"Enable/Disable default Server header.\",\n)\[email protected](\n \"--date-header/--no-date-header\",\n is_flag=True,\n default=True,\n help=\"Enable/Disable default Date header.\",\n)\[email protected](\n \"--forwarded-allow-ips\",\n type=str,\n default=None,\n help=\"Comma separated list of IPs to trust with proxy headers. Defaults to\"\n \" the $FORWARDED_ALLOW_IPS environment variable if available, or '127.0.0.1'.\",\n)\[email protected](\n \"--root-path\",\n type=str,\n default=\"\",\n help=\"Set the ASGI 'root_path' for applications submounted below a given URL path.\",\n)\[email protected](\n \"--limit-concurrency\",\n type=int,\n default=None,\n help=\"Maximum number of concurrent connections or tasks to allow, before issuing\"\n \" HTTP 503 responses.\",\n)\[email protected](\n \"--backlog\",\n type=int,\n default=2048,\n help=\"Maximum number of connections to hold in backlog\",\n)\[email protected](\n \"--limit-max-requests\",\n type=int,\n default=None,\n help=\"Maximum number of requests to service before terminating the process.\",\n)\[email protected](\n \"--timeout-keep-alive\",\n type=int,\n default=5,\n help=\"Close Keep-Alive connections if no new data is received within this timeout.\",\n show_default=True,\n)\[email protected](\n \"--ssl-keyfile\", type=str, default=None, help=\"SSL key file\", show_default=True\n)\[email protected](\n \"--ssl-certfile\",\n type=str,\n default=None,\n help=\"SSL certificate file\",\n show_default=True,\n)\[email protected](\n \"--ssl-keyfile-password\",\n type=str,\n default=None,\n help=\"SSL keyfile password\",\n show_default=True,\n)\[email protected](\n \"--ssl-version\",\n type=int,\n default=int(SSL_PROTOCOL_VERSION),\n help=\"SSL version to use (see stdlib ssl module's)\",\n show_default=True,\n)\[email protected](\n \"--ssl-cert-reqs\",\n type=int,\n default=int(ssl.CERT_NONE),\n help=\"Whether client certificate is required (see stdlib ssl module's)\",\n show_default=True,\n)\[email protected](\n \"--ssl-ca-certs\",\n type=str,\n default=None,\n help=\"CA certificates file\",\n show_default=True,\n)\[email protected](\n \"--ssl-ciphers\",\n type=str,\n default=\"TLSv1\",\n help=\"Ciphers to use (see stdlib ssl module's)\",\n show_default=True,\n)\[email protected](\n \"--header\",\n \"headers\",\n multiple=True,\n help=\"Specify custom default HTTP response headers as a Name:Value pair\",\n)\[email protected](\n \"--version\",\n is_flag=True,\n callback=print_version,\n expose_value=False,\n is_eager=True,\n help=\"Display the uvicorn version and exit.\",\n)\[email protected](\n \"--app-dir\",\n default=\".\",\n show_default=True,\n help=\"Look for APP in the specified directory, by adding this to the PYTHONPATH.\"\n \" Defaults to the current working directory.\",\n)\[email protected](\n \"--h11-max-incomplete-event-size\",\n \"h11_max_incomplete_event_size\",\n type=int,\n default=DEFAULT_MAX_INCOMPLETE_EVENT_SIZE,\n help=\"For h11, the maximum number of bytes to buffer of an incomplete event.\",\n)\[email protected](\n \"--factory\",\n is_flag=True,\n default=False,\n help=\"Treat APP as an application factory, i.e. a () -> <ASGI app> callable.\",\n show_default=True,\n)\ndef main(\n app: str,\n host: str,\n port: int,\n uds: str,\n fd: int,\n loop: LoopSetupType,\n http: HTTPProtocolType,\n ws: WSProtocolType,\n ws_max_size: int,\n ws_ping_interval: float,\n ws_ping_timeout: float,\n ws_per_message_deflate: bool,\n lifespan: LifespanType,\n interface: InterfaceType,\n debug: bool,\n reload: bool,\n reload_dirs: typing.List[str],\n reload_includes: typing.List[str],\n reload_excludes: typing.List[str],\n reload_delay: float,\n workers: int,\n env_file: str,\n log_config: str,\n log_level: str,\n access_log: bool,\n proxy_headers: bool,\n server_header: bool,\n date_header: bool,\n forwarded_allow_ips: str,\n root_path: str,\n limit_concurrency: int,\n backlog: int,\n limit_max_requests: int,\n timeout_keep_alive: int,\n ssl_keyfile: str,\n ssl_certfile: str,\n ssl_keyfile_password: str,\n ssl_version: int,\n ssl_cert_reqs: int,\n ssl_ca_certs: str,\n ssl_ciphers: str,\n headers: typing.List[str],\n use_colors: bool,\n app_dir: str,\n h11_max_incomplete_event_size: int,\n factory: bool,\n) -> None:\n run(\n app,\n host=host,\n port=port,\n uds=uds,\n fd=fd,\n loop=loop,\n http=http,\n ws=ws,\n ws_max_size=ws_max_size,\n ws_ping_interval=ws_ping_interval,\n ws_ping_timeout=ws_ping_timeout,\n ws_per_message_deflate=ws_per_message_deflate,\n lifespan=lifespan,\n env_file=env_file,\n log_config=LOGGING_CONFIG if log_config is None else log_config,\n log_level=log_level,\n access_log=access_log,\n interface=interface,\n debug=debug,\n reload=reload,\n reload_dirs=reload_dirs if reload_dirs else None,\n reload_includes=reload_includes if reload_includes else None,\n reload_excludes=reload_excludes if reload_excludes else None,\n reload_delay=reload_delay,\n workers=workers,\n proxy_headers=proxy_headers,\n server_header=server_header,\n date_header=date_header,\n forwarded_allow_ips=forwarded_allow_ips,\n root_path=root_path,\n limit_concurrency=limit_concurrency,\n backlog=backlog,\n limit_max_requests=limit_max_requests,\n timeout_keep_alive=timeout_keep_alive,\n ssl_keyfile=ssl_keyfile,\n ssl_certfile=ssl_certfile,\n ssl_keyfile_password=ssl_keyfile_password,\n ssl_version=ssl_version,\n ssl_cert_reqs=ssl_cert_reqs,\n ssl_ca_certs=ssl_ca_certs,\n ssl_ciphers=ssl_ciphers,\n headers=[header.split(\":\", 1) for header in headers], # type: ignore[misc]\n use_colors=use_colors,\n factory=factory,\n app_dir=app_dir,\n h11_max_incomplete_event_size=h11_max_incomplete_event_size,\n )\n\n\ndef run(\n app: typing.Union[\"ASGIApplication\", str],\n *,\n host: str = \"127.0.0.1\",\n port: int = 8000,\n uds: typing.Optional[str] = None,\n fd: typing.Optional[int] = None,\n loop: LoopSetupType = \"auto\",\n http: HTTPProtocolType = \"auto\",\n ws: WSProtocolType = \"auto\",\n ws_max_size: int = 16777216,\n ws_ping_interval: float = 20.0,\n ws_ping_timeout: float = 20.0,\n ws_per_message_deflate: bool = True,\n lifespan: LifespanType = \"auto\",\n interface: InterfaceType = \"auto\",\n debug: bool = False,\n reload: bool = False,\n reload_dirs: typing.Optional[typing.List[str]] = None,\n reload_includes: typing.Optional[typing.List[str]] = None,\n reload_excludes: typing.Optional[typing.List[str]] = None,\n reload_delay: float = 0.25,\n workers: typing.Optional[int] = None,\n env_file: typing.Optional[str] = None,\n log_config: typing.Optional[typing.Union[dict, str]] = None,\n log_level: typing.Optional[str] = None,\n access_log: bool = True,\n proxy_headers: bool = True,\n server_header: bool = True,\n date_header: bool = True,\n forwarded_allow_ips: typing.Optional[str] = None,\n root_path: str = \"\",\n limit_concurrency: typing.Optional[int] = None,\n backlog: int = 2048,\n limit_max_requests: typing.Optional[int] = None,\n timeout_keep_alive: int = 5,\n ssl_keyfile: typing.Optional[str] = None,\n ssl_certfile: typing.Optional[str] = None,\n ssl_keyfile_password: typing.Optional[str] = None,\n ssl_version: int = int(SSL_PROTOCOL_VERSION),\n ssl_cert_reqs: int = int(ssl.CERT_NONE),\n ssl_ca_certs: typing.Optional[str] = None,\n ssl_ciphers: str = \"TLSv1\",\n headers: typing.Optional[typing.List[typing.Tuple[str, str]]] = None,\n use_colors: typing.Optional[bool] = None,\n app_dir: typing.Optional[str] = None,\n factory: bool = False,\n h11_max_incomplete_event_size: int = DEFAULT_MAX_INCOMPLETE_EVENT_SIZE,\n) -> None:\n if app_dir is not None:\n sys.path.insert(0, app_dir)\n\n config = Config(\n app,\n host=host,\n port=port,\n uds=uds,\n fd=fd,\n loop=loop,\n http=http,\n ws=ws,\n ws_max_size=ws_max_size,\n ws_ping_interval=ws_ping_interval,\n ws_ping_timeout=ws_ping_timeout,\n ws_per_message_deflate=ws_per_message_deflate,\n lifespan=lifespan,\n interface=interface,\n debug=debug,\n reload=reload,\n reload_dirs=reload_dirs,\n reload_includes=reload_includes,\n reload_excludes=reload_excludes,\n reload_delay=reload_delay,\n workers=workers,\n env_file=env_file,\n log_config=log_config,\n log_level=log_level,\n access_log=access_log,\n proxy_headers=proxy_headers,\n server_header=server_header,\n date_header=date_header,\n forwarded_allow_ips=forwarded_allow_ips,\n root_path=root_path,\n limit_concurrency=limit_concurrency,\n backlog=backlog,\n limit_max_requests=limit_max_requests,\n timeout_keep_alive=timeout_keep_alive,\n ssl_keyfile=ssl_keyfile,\n ssl_certfile=ssl_certfile,\n ssl_keyfile_password=ssl_keyfile_password,\n ssl_version=ssl_version,\n ssl_cert_reqs=ssl_cert_reqs,\n ssl_ca_certs=ssl_ca_certs,\n ssl_ciphers=ssl_ciphers,\n headers=headers,\n use_colors=use_colors,\n factory=factory,\n h11_max_incomplete_event_size=h11_max_incomplete_event_size,\n )\n server = Server(config=config)\n\n if (config.reload or config.workers > 1) and not isinstance(app, str):\n logger = logging.getLogger(\"uvicorn.error\")\n logger.warning(\n \"You must pass the application as an import string to enable 'reload' or \"\n \"'workers'.\"\n )\n sys.exit(1)\n\n if config.should_reload:\n sock = config.bind_socket()\n ChangeReload(config, target=server.run, sockets=[sock]).run()\n elif config.workers > 1:\n sock = config.bind_socket()\n Multiprocess(config, target=server.run, sockets=[sock]).run()\n else:\n server.run()\n if config.uds:\n os.remove(config.uds) # pragma: py-win32\n\n if not server.started and not config.should_reload and config.workers == 1:\n sys.exit(STARTUP_FAILURE)\n\n\nif __name__ == \"__main__\":\n main() # pragma: no cover\n",
"path": "uvicorn/main.py"
}
] | diff --git a/uvicorn/main.py b/uvicorn/main.py
index ac6096722..58e01305a 100644
--- a/uvicorn/main.py
+++ b/uvicorn/main.py
@@ -346,7 +346,7 @@ def print_version(ctx: click.Context, param: click.Parameter, value: bool) -> No
"--h11-max-incomplete-event-size",
"h11_max_incomplete_event_size",
type=int,
- default=None,
+ default=DEFAULT_MAX_INCOMPLETE_EVENT_SIZE,
help="For h11, the maximum number of bytes to buffer of an incomplete event.",
)
@click.option(
|
encode__django-rest-framework-5849 | RelatedField (and their subclasses) do not support traversing relationships that can be null
This was introduced by #5518, and while the solutions there work on normal fields, RelatedField behaves differently:
https://github.com/encode/django-rest-framework/blob/da535d31dd93dbb1d650e2e92bd0910ca8eb4ea4/rest_framework/fields.py#L440-L442
vs
https://github.com/encode/django-rest-framework/blob/da535d31dd93dbb1d650e2e92bd0910ca8eb4ea4/rest_framework/relations.py#L177
An example of the problem can be reproduced with https://gist.github.com/gcbirzan/a968facbaf0969f4a9616942de7022dc
A `Model1` instance with `None` for `model2` will produce an exception.
I believe that the correct behaviour is for `RelatedField.get_attribute` to call super.
As a side note, this is very hacky to work around, you'll need to copy paste the code from `RelatedField.get_attribute` in your class, then call the `Field.get_attribute`, since obviously super won't work.
If there's some agreement that this is the solution, I can fix it, but I'm not 100% sure (given the reaction on the original ticket) if this is considered a bug.
| [
{
"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom collections import OrderedDict\n\nfrom django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist\nfrom django.db.models import Manager\nfrom django.db.models.query import QuerySet\nfrom django.urls import NoReverseMatch, Resolver404, get_script_prefix, resolve\nfrom django.utils import six\nfrom django.utils.encoding import (\n python_2_unicode_compatible, smart_text, uri_to_iri\n)\nfrom django.utils.six.moves.urllib import parse as urlparse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom rest_framework.fields import (\n Field, empty, get_attribute, is_simple_callable, iter_options\n)\nfrom rest_framework.reverse import reverse\nfrom rest_framework.settings import api_settings\nfrom rest_framework.utils import html\n\n\ndef method_overridden(method_name, klass, instance):\n \"\"\"\n Determine if a method has been overridden.\n \"\"\"\n method = getattr(klass, method_name)\n default_method = getattr(method, '__func__', method) # Python 3 compat\n return default_method is not getattr(instance, method_name).__func__\n\n\nclass Hyperlink(six.text_type):\n \"\"\"\n A string like object that additionally has an associated name.\n We use this for hyperlinked URLs that may render as a named link\n in some contexts, or render as a plain URL in others.\n \"\"\"\n def __new__(self, url, obj):\n ret = six.text_type.__new__(self, url)\n ret.obj = obj\n return ret\n\n def __getnewargs__(self):\n return(str(self), self.name,)\n\n @property\n def name(self):\n # This ensures that we only called `__str__` lazily,\n # as in some cases calling __str__ on a model instances *might*\n # involve a database lookup.\n return six.text_type(self.obj)\n\n is_hyperlink = True\n\n\n@python_2_unicode_compatible\nclass PKOnlyObject(object):\n \"\"\"\n This is a mock object, used for when we only need the pk of the object\n instance, but still want to return an object with a .pk attribute,\n in order to keep the same interface as a regular model instance.\n \"\"\"\n def __init__(self, pk):\n self.pk = pk\n\n def __str__(self):\n return \"%s\" % self.pk\n\n\n# We assume that 'validators' are intended for the child serializer,\n# rather than the parent serializer.\nMANY_RELATION_KWARGS = (\n 'read_only', 'write_only', 'required', 'default', 'initial', 'source',\n 'label', 'help_text', 'style', 'error_messages', 'allow_empty',\n 'html_cutoff', 'html_cutoff_text'\n)\n\n\nclass RelatedField(Field):\n queryset = None\n html_cutoff = None\n html_cutoff_text = None\n\n def __init__(self, **kwargs):\n self.queryset = kwargs.pop('queryset', self.queryset)\n\n cutoff_from_settings = api_settings.HTML_SELECT_CUTOFF\n if cutoff_from_settings is not None:\n cutoff_from_settings = int(cutoff_from_settings)\n self.html_cutoff = kwargs.pop('html_cutoff', cutoff_from_settings)\n\n self.html_cutoff_text = kwargs.pop(\n 'html_cutoff_text',\n self.html_cutoff_text or _(api_settings.HTML_SELECT_CUTOFF_TEXT)\n )\n if not method_overridden('get_queryset', RelatedField, self):\n assert self.queryset is not None or kwargs.get('read_only', None), (\n 'Relational field must provide a `queryset` argument, '\n 'override `get_queryset`, or set read_only=`True`.'\n )\n assert not (self.queryset is not None and kwargs.get('read_only', None)), (\n 'Relational fields should not provide a `queryset` argument, '\n 'when setting read_only=`True`.'\n )\n kwargs.pop('many', None)\n kwargs.pop('allow_empty', None)\n super(RelatedField, self).__init__(**kwargs)\n\n def __new__(cls, *args, **kwargs):\n # We override this method in order to automagically create\n # `ManyRelatedField` classes instead when `many=True` is set.\n if kwargs.pop('many', False):\n return cls.many_init(*args, **kwargs)\n return super(RelatedField, cls).__new__(cls, *args, **kwargs)\n\n @classmethod\n def many_init(cls, *args, **kwargs):\n \"\"\"\n This method handles creating a parent `ManyRelatedField` instance\n when the `many=True` keyword argument is passed.\n\n Typically you won't need to override this method.\n\n Note that we're over-cautious in passing most arguments to both parent\n and child classes in order to try to cover the general case. If you're\n overriding this method you'll probably want something much simpler, eg:\n\n @classmethod\n def many_init(cls, *args, **kwargs):\n kwargs['child'] = cls()\n return CustomManyRelatedField(*args, **kwargs)\n \"\"\"\n list_kwargs = {'child_relation': cls(*args, **kwargs)}\n for key in kwargs:\n if key in MANY_RELATION_KWARGS:\n list_kwargs[key] = kwargs[key]\n return ManyRelatedField(**list_kwargs)\n\n def run_validation(self, data=empty):\n # We force empty strings to None values for relational fields.\n if data == '':\n data = None\n return super(RelatedField, self).run_validation(data)\n\n def get_queryset(self):\n queryset = self.queryset\n if isinstance(queryset, (QuerySet, Manager)):\n # Ensure queryset is re-evaluated whenever used.\n # Note that actually a `Manager` class may also be used as the\n # queryset argument. This occurs on ModelSerializer fields,\n # as it allows us to generate a more expressive 'repr' output\n # for the field.\n # Eg: 'MyRelationship(queryset=ExampleModel.objects.all())'\n queryset = queryset.all()\n return queryset\n\n def use_pk_only_optimization(self):\n return False\n\n def get_attribute(self, instance):\n if self.use_pk_only_optimization() and self.source_attrs:\n # Optimized case, return a mock object only containing the pk attribute.\n try:\n instance = get_attribute(instance, self.source_attrs[:-1])\n value = instance.serializable_value(self.source_attrs[-1])\n if is_simple_callable(value):\n # Handle edge case where the relationship `source` argument\n # points to a `get_relationship()` method on the model\n value = value().pk\n return PKOnlyObject(pk=value)\n except AttributeError:\n pass\n\n # Standard case, return the object instance.\n return get_attribute(instance, self.source_attrs)\n\n def get_choices(self, cutoff=None):\n queryset = self.get_queryset()\n if queryset is None:\n # Ensure that field.choices returns something sensible\n # even when accessed with a read-only field.\n return {}\n\n if cutoff is not None:\n queryset = queryset[:cutoff]\n\n return OrderedDict([\n (\n self.to_representation(item),\n self.display_value(item)\n )\n for item in queryset\n ])\n\n @property\n def choices(self):\n return self.get_choices()\n\n @property\n def grouped_choices(self):\n return self.choices\n\n def iter_options(self):\n return iter_options(\n self.get_choices(cutoff=self.html_cutoff),\n cutoff=self.html_cutoff,\n cutoff_text=self.html_cutoff_text\n )\n\n def display_value(self, instance):\n return six.text_type(instance)\n\n\nclass StringRelatedField(RelatedField):\n \"\"\"\n A read only field that represents its targets using their\n plain string representation.\n \"\"\"\n\n def __init__(self, **kwargs):\n kwargs['read_only'] = True\n super(StringRelatedField, self).__init__(**kwargs)\n\n def to_representation(self, value):\n return six.text_type(value)\n\n\nclass PrimaryKeyRelatedField(RelatedField):\n default_error_messages = {\n 'required': _('This field is required.'),\n 'does_not_exist': _('Invalid pk \"{pk_value}\" - object does not exist.'),\n 'incorrect_type': _('Incorrect type. Expected pk value, received {data_type}.'),\n }\n\n def __init__(self, **kwargs):\n self.pk_field = kwargs.pop('pk_field', None)\n super(PrimaryKeyRelatedField, self).__init__(**kwargs)\n\n def use_pk_only_optimization(self):\n return True\n\n def to_internal_value(self, data):\n if self.pk_field is not None:\n data = self.pk_field.to_internal_value(data)\n try:\n return self.get_queryset().get(pk=data)\n except ObjectDoesNotExist:\n self.fail('does_not_exist', pk_value=data)\n except (TypeError, ValueError):\n self.fail('incorrect_type', data_type=type(data).__name__)\n\n def to_representation(self, value):\n if self.pk_field is not None:\n return self.pk_field.to_representation(value.pk)\n return value.pk\n\n\nclass HyperlinkedRelatedField(RelatedField):\n lookup_field = 'pk'\n view_name = None\n\n default_error_messages = {\n 'required': _('This field is required.'),\n 'no_match': _('Invalid hyperlink - No URL match.'),\n 'incorrect_match': _('Invalid hyperlink - Incorrect URL match.'),\n 'does_not_exist': _('Invalid hyperlink - Object does not exist.'),\n 'incorrect_type': _('Incorrect type. Expected URL string, received {data_type}.'),\n }\n\n def __init__(self, view_name=None, **kwargs):\n if view_name is not None:\n self.view_name = view_name\n assert self.view_name is not None, 'The `view_name` argument is required.'\n self.lookup_field = kwargs.pop('lookup_field', self.lookup_field)\n self.lookup_url_kwarg = kwargs.pop('lookup_url_kwarg', self.lookup_field)\n self.format = kwargs.pop('format', None)\n\n # We include this simply for dependency injection in tests.\n # We can't add it as a class attributes or it would expect an\n # implicit `self` argument to be passed.\n self.reverse = reverse\n\n super(HyperlinkedRelatedField, self).__init__(**kwargs)\n\n def use_pk_only_optimization(self):\n return self.lookup_field == 'pk'\n\n def get_object(self, view_name, view_args, view_kwargs):\n \"\"\"\n Return the object corresponding to a matched URL.\n\n Takes the matched URL conf arguments, and should return an\n object instance, or raise an `ObjectDoesNotExist` exception.\n \"\"\"\n lookup_value = view_kwargs[self.lookup_url_kwarg]\n lookup_kwargs = {self.lookup_field: lookup_value}\n return self.get_queryset().get(**lookup_kwargs)\n\n def get_url(self, obj, view_name, request, format):\n \"\"\"\n Given an object, return the URL that hyperlinks to the object.\n\n May raise a `NoReverseMatch` if the `view_name` and `lookup_field`\n attributes are not configured to correctly match the URL conf.\n \"\"\"\n # Unsaved objects will not yet have a valid URL.\n if hasattr(obj, 'pk') and obj.pk in (None, ''):\n return None\n\n lookup_value = getattr(obj, self.lookup_field)\n kwargs = {self.lookup_url_kwarg: lookup_value}\n return self.reverse(view_name, kwargs=kwargs, request=request, format=format)\n\n def to_internal_value(self, data):\n request = self.context.get('request', None)\n try:\n http_prefix = data.startswith(('http:', 'https:'))\n except AttributeError:\n self.fail('incorrect_type', data_type=type(data).__name__)\n\n if http_prefix:\n # If needed convert absolute URLs to relative path\n data = urlparse.urlparse(data).path\n prefix = get_script_prefix()\n if data.startswith(prefix):\n data = '/' + data[len(prefix):]\n\n data = uri_to_iri(data)\n\n try:\n match = resolve(data)\n except Resolver404:\n self.fail('no_match')\n\n try:\n expected_viewname = request.versioning_scheme.get_versioned_viewname(\n self.view_name, request\n )\n except AttributeError:\n expected_viewname = self.view_name\n\n if match.view_name != expected_viewname:\n self.fail('incorrect_match')\n\n try:\n return self.get_object(match.view_name, match.args, match.kwargs)\n except (ObjectDoesNotExist, TypeError, ValueError):\n self.fail('does_not_exist')\n\n def to_representation(self, value):\n assert 'request' in self.context, (\n \"`%s` requires the request in the serializer\"\n \" context. Add `context={'request': request}` when instantiating \"\n \"the serializer.\" % self.__class__.__name__\n )\n\n request = self.context['request']\n format = self.context.get('format', None)\n\n # By default use whatever format is given for the current context\n # unless the target is a different type to the source.\n #\n # Eg. Consider a HyperlinkedIdentityField pointing from a json\n # representation to an html property of that representation...\n #\n # '/snippets/1/' should link to '/snippets/1/highlight/'\n # ...but...\n # '/snippets/1/.json' should link to '/snippets/1/highlight/.html'\n if format and self.format and self.format != format:\n format = self.format\n\n # Return the hyperlink, or error if incorrectly configured.\n try:\n url = self.get_url(value, self.view_name, request, format)\n except NoReverseMatch:\n msg = (\n 'Could not resolve URL for hyperlinked relationship using '\n 'view name \"%s\". You may have failed to include the related '\n 'model in your API, or incorrectly configured the '\n '`lookup_field` attribute on this field.'\n )\n if value in ('', None):\n value_string = {'': 'the empty string', None: 'None'}[value]\n msg += (\n \" WARNING: The value of the field on the model instance \"\n \"was %s, which may be why it didn't match any \"\n \"entries in your URL conf.\" % value_string\n )\n raise ImproperlyConfigured(msg % self.view_name)\n\n if url is None:\n return None\n\n return Hyperlink(url, value)\n\n\nclass HyperlinkedIdentityField(HyperlinkedRelatedField):\n \"\"\"\n A read-only field that represents the identity URL for an object, itself.\n\n This is in contrast to `HyperlinkedRelatedField` which represents the\n URL of relationships to other objects.\n \"\"\"\n\n def __init__(self, view_name=None, **kwargs):\n assert view_name is not None, 'The `view_name` argument is required.'\n kwargs['read_only'] = True\n kwargs['source'] = '*'\n super(HyperlinkedIdentityField, self).__init__(view_name, **kwargs)\n\n def use_pk_only_optimization(self):\n # We have the complete object instance already. We don't need\n # to run the 'only get the pk for this relationship' code.\n return False\n\n\nclass SlugRelatedField(RelatedField):\n \"\"\"\n A read-write field that represents the target of the relationship\n by a unique 'slug' attribute.\n \"\"\"\n default_error_messages = {\n 'does_not_exist': _('Object with {slug_name}={value} does not exist.'),\n 'invalid': _('Invalid value.'),\n }\n\n def __init__(self, slug_field=None, **kwargs):\n assert slug_field is not None, 'The `slug_field` argument is required.'\n self.slug_field = slug_field\n super(SlugRelatedField, self).__init__(**kwargs)\n\n def to_internal_value(self, data):\n try:\n return self.get_queryset().get(**{self.slug_field: data})\n except ObjectDoesNotExist:\n self.fail('does_not_exist', slug_name=self.slug_field, value=smart_text(data))\n except (TypeError, ValueError):\n self.fail('invalid')\n\n def to_representation(self, obj):\n return getattr(obj, self.slug_field)\n\n\nclass ManyRelatedField(Field):\n \"\"\"\n Relationships with `many=True` transparently get coerced into instead being\n a ManyRelatedField with a child relationship.\n\n The `ManyRelatedField` class is responsible for handling iterating through\n the values and passing each one to the child relationship.\n\n This class is treated as private API.\n You shouldn't generally need to be using this class directly yourself,\n and should instead simply set 'many=True' on the relationship.\n \"\"\"\n initial = []\n default_empty_html = []\n default_error_messages = {\n 'not_a_list': _('Expected a list of items but got type \"{input_type}\".'),\n 'empty': _('This list may not be empty.')\n }\n html_cutoff = None\n html_cutoff_text = None\n\n def __init__(self, child_relation=None, *args, **kwargs):\n self.child_relation = child_relation\n self.allow_empty = kwargs.pop('allow_empty', True)\n\n cutoff_from_settings = api_settings.HTML_SELECT_CUTOFF\n if cutoff_from_settings is not None:\n cutoff_from_settings = int(cutoff_from_settings)\n self.html_cutoff = kwargs.pop('html_cutoff', cutoff_from_settings)\n\n self.html_cutoff_text = kwargs.pop(\n 'html_cutoff_text',\n self.html_cutoff_text or _(api_settings.HTML_SELECT_CUTOFF_TEXT)\n )\n assert child_relation is not None, '`child_relation` is a required argument.'\n super(ManyRelatedField, self).__init__(*args, **kwargs)\n self.child_relation.bind(field_name='', parent=self)\n\n def get_value(self, dictionary):\n # We override the default field access in order to support\n # lists in HTML forms.\n if html.is_html_input(dictionary):\n # Don't return [] if the update is partial\n if self.field_name not in dictionary:\n if getattr(self.root, 'partial', False):\n return empty\n return dictionary.getlist(self.field_name)\n\n return dictionary.get(self.field_name, empty)\n\n def to_internal_value(self, data):\n if isinstance(data, type('')) or not hasattr(data, '__iter__'):\n self.fail('not_a_list', input_type=type(data).__name__)\n if not self.allow_empty and len(data) == 0:\n self.fail('empty')\n\n return [\n self.child_relation.to_internal_value(item)\n for item in data\n ]\n\n def get_attribute(self, instance):\n # Can't have any relationships if not created\n if hasattr(instance, 'pk') and instance.pk is None:\n return []\n\n relationship = get_attribute(instance, self.source_attrs)\n return relationship.all() if hasattr(relationship, 'all') else relationship\n\n def to_representation(self, iterable):\n return [\n self.child_relation.to_representation(value)\n for value in iterable\n ]\n\n def get_choices(self, cutoff=None):\n return self.child_relation.get_choices(cutoff)\n\n @property\n def choices(self):\n return self.get_choices()\n\n @property\n def grouped_choices(self):\n return self.choices\n\n def iter_options(self):\n return iter_options(\n self.get_choices(cutoff=self.html_cutoff),\n cutoff=self.html_cutoff,\n cutoff_text=self.html_cutoff_text\n )\n",
"path": "rest_framework/relations.py"
}
] | [
{
"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom collections import OrderedDict\n\nfrom django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist\nfrom django.db.models import Manager\nfrom django.db.models.query import QuerySet\nfrom django.urls import NoReverseMatch, Resolver404, get_script_prefix, resolve\nfrom django.utils import six\nfrom django.utils.encoding import (\n python_2_unicode_compatible, smart_text, uri_to_iri\n)\nfrom django.utils.six.moves.urllib import parse as urlparse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom rest_framework.fields import (\n Field, empty, get_attribute, is_simple_callable, iter_options\n)\nfrom rest_framework.reverse import reverse\nfrom rest_framework.settings import api_settings\nfrom rest_framework.utils import html\n\n\ndef method_overridden(method_name, klass, instance):\n \"\"\"\n Determine if a method has been overridden.\n \"\"\"\n method = getattr(klass, method_name)\n default_method = getattr(method, '__func__', method) # Python 3 compat\n return default_method is not getattr(instance, method_name).__func__\n\n\nclass Hyperlink(six.text_type):\n \"\"\"\n A string like object that additionally has an associated name.\n We use this for hyperlinked URLs that may render as a named link\n in some contexts, or render as a plain URL in others.\n \"\"\"\n def __new__(self, url, obj):\n ret = six.text_type.__new__(self, url)\n ret.obj = obj\n return ret\n\n def __getnewargs__(self):\n return(str(self), self.name,)\n\n @property\n def name(self):\n # This ensures that we only called `__str__` lazily,\n # as in some cases calling __str__ on a model instances *might*\n # involve a database lookup.\n return six.text_type(self.obj)\n\n is_hyperlink = True\n\n\n@python_2_unicode_compatible\nclass PKOnlyObject(object):\n \"\"\"\n This is a mock object, used for when we only need the pk of the object\n instance, but still want to return an object with a .pk attribute,\n in order to keep the same interface as a regular model instance.\n \"\"\"\n def __init__(self, pk):\n self.pk = pk\n\n def __str__(self):\n return \"%s\" % self.pk\n\n\n# We assume that 'validators' are intended for the child serializer,\n# rather than the parent serializer.\nMANY_RELATION_KWARGS = (\n 'read_only', 'write_only', 'required', 'default', 'initial', 'source',\n 'label', 'help_text', 'style', 'error_messages', 'allow_empty',\n 'html_cutoff', 'html_cutoff_text'\n)\n\n\nclass RelatedField(Field):\n queryset = None\n html_cutoff = None\n html_cutoff_text = None\n\n def __init__(self, **kwargs):\n self.queryset = kwargs.pop('queryset', self.queryset)\n\n cutoff_from_settings = api_settings.HTML_SELECT_CUTOFF\n if cutoff_from_settings is not None:\n cutoff_from_settings = int(cutoff_from_settings)\n self.html_cutoff = kwargs.pop('html_cutoff', cutoff_from_settings)\n\n self.html_cutoff_text = kwargs.pop(\n 'html_cutoff_text',\n self.html_cutoff_text or _(api_settings.HTML_SELECT_CUTOFF_TEXT)\n )\n if not method_overridden('get_queryset', RelatedField, self):\n assert self.queryset is not None or kwargs.get('read_only', None), (\n 'Relational field must provide a `queryset` argument, '\n 'override `get_queryset`, or set read_only=`True`.'\n )\n assert not (self.queryset is not None and kwargs.get('read_only', None)), (\n 'Relational fields should not provide a `queryset` argument, '\n 'when setting read_only=`True`.'\n )\n kwargs.pop('many', None)\n kwargs.pop('allow_empty', None)\n super(RelatedField, self).__init__(**kwargs)\n\n def __new__(cls, *args, **kwargs):\n # We override this method in order to automagically create\n # `ManyRelatedField` classes instead when `many=True` is set.\n if kwargs.pop('many', False):\n return cls.many_init(*args, **kwargs)\n return super(RelatedField, cls).__new__(cls, *args, **kwargs)\n\n @classmethod\n def many_init(cls, *args, **kwargs):\n \"\"\"\n This method handles creating a parent `ManyRelatedField` instance\n when the `many=True` keyword argument is passed.\n\n Typically you won't need to override this method.\n\n Note that we're over-cautious in passing most arguments to both parent\n and child classes in order to try to cover the general case. If you're\n overriding this method you'll probably want something much simpler, eg:\n\n @classmethod\n def many_init(cls, *args, **kwargs):\n kwargs['child'] = cls()\n return CustomManyRelatedField(*args, **kwargs)\n \"\"\"\n list_kwargs = {'child_relation': cls(*args, **kwargs)}\n for key in kwargs:\n if key in MANY_RELATION_KWARGS:\n list_kwargs[key] = kwargs[key]\n return ManyRelatedField(**list_kwargs)\n\n def run_validation(self, data=empty):\n # We force empty strings to None values for relational fields.\n if data == '':\n data = None\n return super(RelatedField, self).run_validation(data)\n\n def get_queryset(self):\n queryset = self.queryset\n if isinstance(queryset, (QuerySet, Manager)):\n # Ensure queryset is re-evaluated whenever used.\n # Note that actually a `Manager` class may also be used as the\n # queryset argument. This occurs on ModelSerializer fields,\n # as it allows us to generate a more expressive 'repr' output\n # for the field.\n # Eg: 'MyRelationship(queryset=ExampleModel.objects.all())'\n queryset = queryset.all()\n return queryset\n\n def use_pk_only_optimization(self):\n return False\n\n def get_attribute(self, instance):\n if self.use_pk_only_optimization() and self.source_attrs:\n # Optimized case, return a mock object only containing the pk attribute.\n try:\n instance = get_attribute(instance, self.source_attrs[:-1])\n value = instance.serializable_value(self.source_attrs[-1])\n if is_simple_callable(value):\n # Handle edge case where the relationship `source` argument\n # points to a `get_relationship()` method on the model\n value = value().pk\n return PKOnlyObject(pk=value)\n except AttributeError:\n pass\n\n # Standard case, return the object instance.\n return super(RelatedField, self).get_attribute(instance)\n\n def get_choices(self, cutoff=None):\n queryset = self.get_queryset()\n if queryset is None:\n # Ensure that field.choices returns something sensible\n # even when accessed with a read-only field.\n return {}\n\n if cutoff is not None:\n queryset = queryset[:cutoff]\n\n return OrderedDict([\n (\n self.to_representation(item),\n self.display_value(item)\n )\n for item in queryset\n ])\n\n @property\n def choices(self):\n return self.get_choices()\n\n @property\n def grouped_choices(self):\n return self.choices\n\n def iter_options(self):\n return iter_options(\n self.get_choices(cutoff=self.html_cutoff),\n cutoff=self.html_cutoff,\n cutoff_text=self.html_cutoff_text\n )\n\n def display_value(self, instance):\n return six.text_type(instance)\n\n\nclass StringRelatedField(RelatedField):\n \"\"\"\n A read only field that represents its targets using their\n plain string representation.\n \"\"\"\n\n def __init__(self, **kwargs):\n kwargs['read_only'] = True\n super(StringRelatedField, self).__init__(**kwargs)\n\n def to_representation(self, value):\n return six.text_type(value)\n\n\nclass PrimaryKeyRelatedField(RelatedField):\n default_error_messages = {\n 'required': _('This field is required.'),\n 'does_not_exist': _('Invalid pk \"{pk_value}\" - object does not exist.'),\n 'incorrect_type': _('Incorrect type. Expected pk value, received {data_type}.'),\n }\n\n def __init__(self, **kwargs):\n self.pk_field = kwargs.pop('pk_field', None)\n super(PrimaryKeyRelatedField, self).__init__(**kwargs)\n\n def use_pk_only_optimization(self):\n return True\n\n def to_internal_value(self, data):\n if self.pk_field is not None:\n data = self.pk_field.to_internal_value(data)\n try:\n return self.get_queryset().get(pk=data)\n except ObjectDoesNotExist:\n self.fail('does_not_exist', pk_value=data)\n except (TypeError, ValueError):\n self.fail('incorrect_type', data_type=type(data).__name__)\n\n def to_representation(self, value):\n if self.pk_field is not None:\n return self.pk_field.to_representation(value.pk)\n return value.pk\n\n\nclass HyperlinkedRelatedField(RelatedField):\n lookup_field = 'pk'\n view_name = None\n\n default_error_messages = {\n 'required': _('This field is required.'),\n 'no_match': _('Invalid hyperlink - No URL match.'),\n 'incorrect_match': _('Invalid hyperlink - Incorrect URL match.'),\n 'does_not_exist': _('Invalid hyperlink - Object does not exist.'),\n 'incorrect_type': _('Incorrect type. Expected URL string, received {data_type}.'),\n }\n\n def __init__(self, view_name=None, **kwargs):\n if view_name is not None:\n self.view_name = view_name\n assert self.view_name is not None, 'The `view_name` argument is required.'\n self.lookup_field = kwargs.pop('lookup_field', self.lookup_field)\n self.lookup_url_kwarg = kwargs.pop('lookup_url_kwarg', self.lookup_field)\n self.format = kwargs.pop('format', None)\n\n # We include this simply for dependency injection in tests.\n # We can't add it as a class attributes or it would expect an\n # implicit `self` argument to be passed.\n self.reverse = reverse\n\n super(HyperlinkedRelatedField, self).__init__(**kwargs)\n\n def use_pk_only_optimization(self):\n return self.lookup_field == 'pk'\n\n def get_object(self, view_name, view_args, view_kwargs):\n \"\"\"\n Return the object corresponding to a matched URL.\n\n Takes the matched URL conf arguments, and should return an\n object instance, or raise an `ObjectDoesNotExist` exception.\n \"\"\"\n lookup_value = view_kwargs[self.lookup_url_kwarg]\n lookup_kwargs = {self.lookup_field: lookup_value}\n return self.get_queryset().get(**lookup_kwargs)\n\n def get_url(self, obj, view_name, request, format):\n \"\"\"\n Given an object, return the URL that hyperlinks to the object.\n\n May raise a `NoReverseMatch` if the `view_name` and `lookup_field`\n attributes are not configured to correctly match the URL conf.\n \"\"\"\n # Unsaved objects will not yet have a valid URL.\n if hasattr(obj, 'pk') and obj.pk in (None, ''):\n return None\n\n lookup_value = getattr(obj, self.lookup_field)\n kwargs = {self.lookup_url_kwarg: lookup_value}\n return self.reverse(view_name, kwargs=kwargs, request=request, format=format)\n\n def to_internal_value(self, data):\n request = self.context.get('request', None)\n try:\n http_prefix = data.startswith(('http:', 'https:'))\n except AttributeError:\n self.fail('incorrect_type', data_type=type(data).__name__)\n\n if http_prefix:\n # If needed convert absolute URLs to relative path\n data = urlparse.urlparse(data).path\n prefix = get_script_prefix()\n if data.startswith(prefix):\n data = '/' + data[len(prefix):]\n\n data = uri_to_iri(data)\n\n try:\n match = resolve(data)\n except Resolver404:\n self.fail('no_match')\n\n try:\n expected_viewname = request.versioning_scheme.get_versioned_viewname(\n self.view_name, request\n )\n except AttributeError:\n expected_viewname = self.view_name\n\n if match.view_name != expected_viewname:\n self.fail('incorrect_match')\n\n try:\n return self.get_object(match.view_name, match.args, match.kwargs)\n except (ObjectDoesNotExist, TypeError, ValueError):\n self.fail('does_not_exist')\n\n def to_representation(self, value):\n assert 'request' in self.context, (\n \"`%s` requires the request in the serializer\"\n \" context. Add `context={'request': request}` when instantiating \"\n \"the serializer.\" % self.__class__.__name__\n )\n\n request = self.context['request']\n format = self.context.get('format', None)\n\n # By default use whatever format is given for the current context\n # unless the target is a different type to the source.\n #\n # Eg. Consider a HyperlinkedIdentityField pointing from a json\n # representation to an html property of that representation...\n #\n # '/snippets/1/' should link to '/snippets/1/highlight/'\n # ...but...\n # '/snippets/1/.json' should link to '/snippets/1/highlight/.html'\n if format and self.format and self.format != format:\n format = self.format\n\n # Return the hyperlink, or error if incorrectly configured.\n try:\n url = self.get_url(value, self.view_name, request, format)\n except NoReverseMatch:\n msg = (\n 'Could not resolve URL for hyperlinked relationship using '\n 'view name \"%s\". You may have failed to include the related '\n 'model in your API, or incorrectly configured the '\n '`lookup_field` attribute on this field.'\n )\n if value in ('', None):\n value_string = {'': 'the empty string', None: 'None'}[value]\n msg += (\n \" WARNING: The value of the field on the model instance \"\n \"was %s, which may be why it didn't match any \"\n \"entries in your URL conf.\" % value_string\n )\n raise ImproperlyConfigured(msg % self.view_name)\n\n if url is None:\n return None\n\n return Hyperlink(url, value)\n\n\nclass HyperlinkedIdentityField(HyperlinkedRelatedField):\n \"\"\"\n A read-only field that represents the identity URL for an object, itself.\n\n This is in contrast to `HyperlinkedRelatedField` which represents the\n URL of relationships to other objects.\n \"\"\"\n\n def __init__(self, view_name=None, **kwargs):\n assert view_name is not None, 'The `view_name` argument is required.'\n kwargs['read_only'] = True\n kwargs['source'] = '*'\n super(HyperlinkedIdentityField, self).__init__(view_name, **kwargs)\n\n def use_pk_only_optimization(self):\n # We have the complete object instance already. We don't need\n # to run the 'only get the pk for this relationship' code.\n return False\n\n\nclass SlugRelatedField(RelatedField):\n \"\"\"\n A read-write field that represents the target of the relationship\n by a unique 'slug' attribute.\n \"\"\"\n default_error_messages = {\n 'does_not_exist': _('Object with {slug_name}={value} does not exist.'),\n 'invalid': _('Invalid value.'),\n }\n\n def __init__(self, slug_field=None, **kwargs):\n assert slug_field is not None, 'The `slug_field` argument is required.'\n self.slug_field = slug_field\n super(SlugRelatedField, self).__init__(**kwargs)\n\n def to_internal_value(self, data):\n try:\n return self.get_queryset().get(**{self.slug_field: data})\n except ObjectDoesNotExist:\n self.fail('does_not_exist', slug_name=self.slug_field, value=smart_text(data))\n except (TypeError, ValueError):\n self.fail('invalid')\n\n def to_representation(self, obj):\n return getattr(obj, self.slug_field)\n\n\nclass ManyRelatedField(Field):\n \"\"\"\n Relationships with `many=True` transparently get coerced into instead being\n a ManyRelatedField with a child relationship.\n\n The `ManyRelatedField` class is responsible for handling iterating through\n the values and passing each one to the child relationship.\n\n This class is treated as private API.\n You shouldn't generally need to be using this class directly yourself,\n and should instead simply set 'many=True' on the relationship.\n \"\"\"\n initial = []\n default_empty_html = []\n default_error_messages = {\n 'not_a_list': _('Expected a list of items but got type \"{input_type}\".'),\n 'empty': _('This list may not be empty.')\n }\n html_cutoff = None\n html_cutoff_text = None\n\n def __init__(self, child_relation=None, *args, **kwargs):\n self.child_relation = child_relation\n self.allow_empty = kwargs.pop('allow_empty', True)\n\n cutoff_from_settings = api_settings.HTML_SELECT_CUTOFF\n if cutoff_from_settings is not None:\n cutoff_from_settings = int(cutoff_from_settings)\n self.html_cutoff = kwargs.pop('html_cutoff', cutoff_from_settings)\n\n self.html_cutoff_text = kwargs.pop(\n 'html_cutoff_text',\n self.html_cutoff_text or _(api_settings.HTML_SELECT_CUTOFF_TEXT)\n )\n assert child_relation is not None, '`child_relation` is a required argument.'\n super(ManyRelatedField, self).__init__(*args, **kwargs)\n self.child_relation.bind(field_name='', parent=self)\n\n def get_value(self, dictionary):\n # We override the default field access in order to support\n # lists in HTML forms.\n if html.is_html_input(dictionary):\n # Don't return [] if the update is partial\n if self.field_name not in dictionary:\n if getattr(self.root, 'partial', False):\n return empty\n return dictionary.getlist(self.field_name)\n\n return dictionary.get(self.field_name, empty)\n\n def to_internal_value(self, data):\n if isinstance(data, type('')) or not hasattr(data, '__iter__'):\n self.fail('not_a_list', input_type=type(data).__name__)\n if not self.allow_empty and len(data) == 0:\n self.fail('empty')\n\n return [\n self.child_relation.to_internal_value(item)\n for item in data\n ]\n\n def get_attribute(self, instance):\n # Can't have any relationships if not created\n if hasattr(instance, 'pk') and instance.pk is None:\n return []\n\n relationship = get_attribute(instance, self.source_attrs)\n return relationship.all() if hasattr(relationship, 'all') else relationship\n\n def to_representation(self, iterable):\n return [\n self.child_relation.to_representation(value)\n for value in iterable\n ]\n\n def get_choices(self, cutoff=None):\n return self.child_relation.get_choices(cutoff)\n\n @property\n def choices(self):\n return self.get_choices()\n\n @property\n def grouped_choices(self):\n return self.choices\n\n def iter_options(self):\n return iter_options(\n self.get_choices(cutoff=self.html_cutoff),\n cutoff=self.html_cutoff,\n cutoff_text=self.html_cutoff_text\n )\n",
"path": "rest_framework/relations.py"
}
] | diff --git a/rest_framework/relations.py b/rest_framework/relations.py
index c87b9299ab..c4e364cf25 100644
--- a/rest_framework/relations.py
+++ b/rest_framework/relations.py
@@ -174,7 +174,7 @@ def get_attribute(self, instance):
pass
# Standard case, return the object instance.
- return get_attribute(instance, self.source_attrs)
+ return super(RelatedField, self).get_attribute(instance)
def get_choices(self, cutoff=None):
queryset = self.get_queryset()
diff --git a/tests/test_model_serializer.py b/tests/test_model_serializer.py
index e55afe03e1..e4fc8b37f6 100644
--- a/tests/test_model_serializer.py
+++ b/tests/test_model_serializer.py
@@ -23,6 +23,8 @@
from rest_framework import serializers
from rest_framework.compat import postgres_fields, unicode_repr
+from .models import NestedForeignKeySource
+
def dedent(blocktext):
return '\n'.join([line[12:] for line in blocktext.splitlines()[1:-1]])
@@ -1164,6 +1166,25 @@ class Meta:
class TestFieldSource(TestCase):
+ def test_traverse_nullable_fk(self):
+ """
+ A dotted source with nullable elements uses default when any item in the chain is None. #5849.
+
+ Similar to model example from test_serializer.py `test_default_for_multiple_dotted_source` method,
+ but using RelatedField, rather than CharField.
+ """
+ class TestSerializer(serializers.ModelSerializer):
+ target = serializers.PrimaryKeyRelatedField(
+ source='target.target', read_only=True, allow_null=True, default=None
+ )
+
+ class Meta:
+ model = NestedForeignKeySource
+ fields = ('target', )
+
+ model = NestedForeignKeySource.objects.create()
+ assert TestSerializer(model).data['target'] is None
+
def test_named_field_source(self):
class TestSerializer(serializers.ModelSerializer):
|
zestedesavoir__zds-site-672 | Avoir un rappel des bases du Mardkown à côté des zones de rédaction
L'idée est d'avoir un rappel des bases du Mardkown à côté des zones de rédaction (les grandes lignes + un lien vers le tuto).
Je sais qu'on a les boutons, mais c'est toujours utile pour ceux qui préfèrent éviter de jouer avec la souris, et je pense améliorera l'apprentissage du MD.
Le truc le plus important à y mettre est sans doute la gestion des sauts de ligne / paragraphes :)
| [
{
"content": "# coding: utf-8\n\nimport locale\nimport os\nimport platform\n\n\n# Python is platform-independent...or is it?\nif platform.system() == \"Windows\":\n locale.setlocale(locale.LC_TIME, 'fra')\nelse:\n locale.setlocale(locale.LC_TIME, 'fr_FR.UTF-8')\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n# INTERNAL_IPS = ('127.0.0.1',) # debug toolbar\n\n\nADMINS = (\n ('user', 'mail'),\n)\n\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'base.db',\n 'USER': '',\n 'PASSWORD': '',\n 'HOST': '',\n 'PORT': '',\n }\n}\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = 'Europe/Paris'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'fr-fr'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = False\n\nSITE_ROOT = os.path.realpath(os.path.dirname(os.path.dirname(__file__)))\n\nSITE_URL = 'http://127.0.0.1:8000'\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/media/\"\nMEDIA_ROOT = os.path.join(SITE_ROOT, 'media')\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = '/media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = os.path.join(SITE_ROOT, 'static')\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(SITE_ROOT, 'assets'),\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\nSTATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'\n\nFIXTURE_DIRS = (os.path.join(SITE_ROOT, 'fixtures'))\n# You will need yuglify to be installed\nPIPELINE_JS = {\n 'modernizr' : {\n 'source_filenames': (\n 'js/vendor/custom.modernizr.js',\n ),\n 'output_filename': 'js/modernizr.js'\n }, \n 'css3-mediaqueries' : {\n 'source_filenames': (\n 'js/vendor/css3-mediaqueries.js',\n ),\n 'output_filename': 'js/css3-mediaqueries.js'\n },\n 'main-js': {\n 'source_filenames': (\n 'js/vendor/jquery.js',\n 'js/vendor/jquery.tabbable.js',\n\n 'js/custom/editor.js',\n\n 'js/custom/mobile-menu.js',\n 'js/custom/accessibility-links.js',\n 'js/custom/dropdown-menu.js',\n 'js/custom/data-click.js',\n 'js/custom/accordeon.js',\n 'js/custom/modal.js',\n 'js/custom/close-alert-box.js',\n 'js/custom/keyboard-navigation.js',\n 'js/custom/message-hidden.js',\n 'js/custom/spoiler.js',\n ),\n 'output_filename': 'js/main.js'\n }\n}\n\nPIPELINE_CSS = {\n 'main-css': {\n 'source_filenames': (\n 'css/main.css',\n ),\n 'output_filename': 'css/design.css'\n }\n}\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'n!01nl+318#x75_%le8#s0=-*ysw&y49uc#t=*wvi(9hnyii0z'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n # 'django.template.loaders.eggs.Loader',\n)\n\nFILE_UPLOAD_HANDLERS = (\n \"django.core.files.uploadhandler.MemoryFileUploadHandler\",\n \"django.core.files.uploadhandler.TemporaryFileUploadHandler\",\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'zds.utils.ThreadLocals',\n 'zds.middlewares.SetLastVisitMiddleware.SetLastVisitMiddleware',\n)\n\nROOT_URLCONF = 'zds.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'zds.wsgi.application'\n\nTEMPLATE_DIRS = [\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(SITE_ROOT, 'templates')\n]\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n # Default context processors\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.request',\n 'django.core.context_processors.tz',\n 'django.contrib.messages.context_processors.messages'\n)\n\nCRISPY_TEMPLATE_PACK='bootstrap'\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sitemaps',\n 'django.contrib.humanize',\n\n 'south',\n 'crispy_forms',\n 'email_obfuscator',\n 'pipeline',\n 'haystack',\n 'munin',\n\n # Apps DB tables are created in THIS order by default\n # --> Order is CRITICAL to properly handle foreign keys\n 'zds.utils',\n 'zds.pages',\n 'zds.gallery',\n 'zds.mp',\n 'zds.newsletter',\n 'zds.article',\n 'zds.forum',\n 'zds.tutorial',\n 'zds.member',\n # Uncomment the next line to enable the admin:\n 'django.contrib.admin',\n # Uncomment the next line to enable admin documentation:\n # 'django.contrib.admindocs',\n)\nif (DEBUG):\n INSTALLED_APPS += (\n 'debug_toolbar',\n )\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\n 'LOCATION': '127.0.0.1:11211',\n }\n}\n\nSESSION_ENGINE = \"django.contrib.sessions.backends.cached_db\"\n\nAUTH_PROFILE_MODULE = 'member.Profile'\nLOGIN_URL = '/membres/connexion'\n\nABSOLUTE_URL_OVERRIDES = {\n 'auth.user': lambda u: '/membres/voir/{0}/'.format(u.username.encode('utf-8'))\n}\n\n\n# Django fileserve settings (set to True for local dev version only)\nSERVE = False\n\n# Max size image upload (in bytes)\nIMAGE_MAX_SIZE = 1024 * 1024 * 2\n\n# git directory\nREPO_PATH = os.path.join(SITE_ROOT, 'tutoriels-private')\nREPO_PATH_PROD = os.path.join(SITE_ROOT, 'tutoriels-public')\nREPO_ARTICLE_PATH = os.path.join(SITE_ROOT, 'articles-data')\n\n# Constants for pagination\nPOSTS_PER_PAGE = 21\nTOPICS_PER_PAGE = 21\nMEMBERS_PER_PAGE = 36\n\n# Constants to avoid spam\nSPAM_LIMIT_SECONDS = 60 * 15\nSPAM_LIMIT_PARTICIPANT = 2\nFOLLOWED_TOPICS_PER_PAGE = 21\n\nBOT_ACCOUNT = 'admin'\n\nPANDOC_LOC = ''\n\nHAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'haystack.backends.solr_backend.SolrEngine',\n 'URL': 'http://127.0.0.1:8983/solr'\n # ...or for multicore...\n # 'URL': 'http://127.0.0.1:8983/solr/mysite',\n },\n}\n\nGEOIP_PATH = os.path.join(SITE_ROOT, 'geodata')\n\nfrom django.contrib.messages import constants as message_constants\nMESSAGE_TAGS = {\n message_constants.DEBUG: 'debug',\n message_constants.INFO: 'info',\n message_constants.SUCCESS: 'success',\n message_constants.WARNING: 'warning',\n message_constants.ERROR: 'alert',\n}\n\n\nMAX_POST_LENGTH = 1000000\nSDZ_TUTO_DIR = ''\n\nMAIL_CA_ASSO = '[email protected]'\n\n# Load the production settings, overwrite the existing ones if needed\ntry:\n from settings_prod import *\nexcept ImportError:\n pass\n",
"path": "zds/settings.py"
}
] | [
{
"content": "# coding: utf-8\n\nimport locale\nimport os\nimport platform\n\n\n# Python is platform-independent...or is it?\nif platform.system() == \"Windows\":\n locale.setlocale(locale.LC_TIME, 'fra')\nelse:\n locale.setlocale(locale.LC_TIME, 'fr_FR.UTF-8')\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n# INTERNAL_IPS = ('127.0.0.1',) # debug toolbar\n\n\nADMINS = (\n ('user', 'mail'),\n)\n\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'base.db',\n 'USER': '',\n 'PASSWORD': '',\n 'HOST': '',\n 'PORT': '',\n }\n}\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = 'Europe/Paris'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'fr-fr'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = False\n\nSITE_ROOT = os.path.realpath(os.path.dirname(os.path.dirname(__file__)))\n\nSITE_URL = 'http://127.0.0.1:8000'\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/media/\"\nMEDIA_ROOT = os.path.join(SITE_ROOT, 'media')\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = '/media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = os.path.join(SITE_ROOT, 'static')\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(SITE_ROOT, 'assets'),\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\nSTATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'\n\nFIXTURE_DIRS = (os.path.join(SITE_ROOT, 'fixtures'))\n# You will need yuglify to be installed\nPIPELINE_JS = {\n 'modernizr' : {\n 'source_filenames': (\n 'js/vendor/custom.modernizr.js',\n ),\n 'output_filename': 'js/modernizr.js'\n }, \n 'css3-mediaqueries' : {\n 'source_filenames': (\n 'js/vendor/css3-mediaqueries.js',\n ),\n 'output_filename': 'js/css3-mediaqueries.js'\n },\n 'main-js': {\n 'source_filenames': (\n 'js/vendor/jquery.js',\n 'js/vendor/jquery.tabbable.js',\n\n 'js/custom/editor.js',\n\n 'js/custom/mobile-menu.js',\n 'js/custom/accessibility-links.js',\n 'js/custom/dropdown-menu.js',\n 'js/custom/data-click.js',\n 'js/custom/accordeon.js',\n 'js/custom/modal.js',\n 'js/custom/close-alert-box.js',\n 'js/custom/keyboard-navigation.js',\n 'js/custom/message-hidden.js',\n 'js/custom/spoiler.js',\n 'js/custom/karma-ajax.js',\n 'js/custom/markdown-help.js',\n ),\n 'output_filename': 'js/main.js'\n }\n}\n\nPIPELINE_CSS = {\n 'main-css': {\n 'source_filenames': (\n 'css/main.css',\n ),\n 'output_filename': 'css/design.css'\n }\n}\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'n!01nl+318#x75_%le8#s0=-*ysw&y49uc#t=*wvi(9hnyii0z'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n # 'django.template.loaders.eggs.Loader',\n)\n\nFILE_UPLOAD_HANDLERS = (\n \"django.core.files.uploadhandler.MemoryFileUploadHandler\",\n \"django.core.files.uploadhandler.TemporaryFileUploadHandler\",\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'zds.utils.ThreadLocals',\n 'zds.middlewares.SetLastVisitMiddleware.SetLastVisitMiddleware',\n)\n\nROOT_URLCONF = 'zds.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'zds.wsgi.application'\n\nTEMPLATE_DIRS = [\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(SITE_ROOT, 'templates')\n]\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n # Default context processors\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.request',\n 'django.core.context_processors.tz',\n 'django.contrib.messages.context_processors.messages'\n)\n\nCRISPY_TEMPLATE_PACK='bootstrap'\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sitemaps',\n 'django.contrib.humanize',\n\n 'south',\n 'crispy_forms',\n 'email_obfuscator',\n 'pipeline',\n 'haystack',\n 'munin',\n\n # Apps DB tables are created in THIS order by default\n # --> Order is CRITICAL to properly handle foreign keys\n 'zds.utils',\n 'zds.pages',\n 'zds.gallery',\n 'zds.mp',\n 'zds.newsletter',\n 'zds.article',\n 'zds.forum',\n 'zds.tutorial',\n 'zds.member',\n # Uncomment the next line to enable the admin:\n 'django.contrib.admin',\n # Uncomment the next line to enable admin documentation:\n # 'django.contrib.admindocs',\n)\nif (DEBUG):\n INSTALLED_APPS += (\n 'debug_toolbar',\n )\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\n 'LOCATION': '127.0.0.1:11211',\n }\n}\n\nSESSION_ENGINE = \"django.contrib.sessions.backends.cached_db\"\n\nAUTH_PROFILE_MODULE = 'member.Profile'\nLOGIN_URL = '/membres/connexion'\n\nABSOLUTE_URL_OVERRIDES = {\n 'auth.user': lambda u: '/membres/voir/{0}/'.format(u.username.encode('utf-8'))\n}\n\n\n# Django fileserve settings (set to True for local dev version only)\nSERVE = False\n\n# Max size image upload (in bytes)\nIMAGE_MAX_SIZE = 1024 * 1024 * 2\n\n# git directory\nREPO_PATH = os.path.join(SITE_ROOT, 'tutoriels-private')\nREPO_PATH_PROD = os.path.join(SITE_ROOT, 'tutoriels-public')\nREPO_ARTICLE_PATH = os.path.join(SITE_ROOT, 'articles-data')\n\n# Constants for pagination\nPOSTS_PER_PAGE = 21\nTOPICS_PER_PAGE = 21\nMEMBERS_PER_PAGE = 36\n\n# Constants to avoid spam\nSPAM_LIMIT_SECONDS = 60 * 15\nSPAM_LIMIT_PARTICIPANT = 2\nFOLLOWED_TOPICS_PER_PAGE = 21\n\nBOT_ACCOUNT = 'admin'\n\nPANDOC_LOC = ''\n\nHAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'haystack.backends.solr_backend.SolrEngine',\n 'URL': 'http://127.0.0.1:8983/solr'\n # ...or for multicore...\n # 'URL': 'http://127.0.0.1:8983/solr/mysite',\n },\n}\n\nGEOIP_PATH = os.path.join(SITE_ROOT, 'geodata')\n\nfrom django.contrib.messages import constants as message_constants\nMESSAGE_TAGS = {\n message_constants.DEBUG: 'debug',\n message_constants.INFO: 'info',\n message_constants.SUCCESS: 'success',\n message_constants.WARNING: 'warning',\n message_constants.ERROR: 'alert',\n}\n\n\nMAX_POST_LENGTH = 1000000\nSDZ_TUTO_DIR = ''\n\nMAIL_CA_ASSO = '[email protected]'\n\n# Load the production settings, overwrite the existing ones if needed\ntry:\n from settings_prod import *\nexcept ImportError:\n pass\n",
"path": "zds/settings.py"
}
] | diff --git a/assets/css/main.css b/assets/css/main.css
index f55d6ebd09..05d1e05c26 100644
--- a/assets/css/main.css
+++ b/assets/css/main.css
@@ -1 +1 @@
-/*! normalize.css v1.1.2 | MIT License | git.io/normalize */article,aside,details,figcaption,figure,footer,header,hgroup,main,nav,section,summary{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}audio:not([controls]){display:none;height:0}[hidden]{display:none}html{font-size:100%;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}html,button,input,select,textarea{font-family:sans-serif}body{margin:0}a:focus{outline:thin dotted}a:active,a:hover{outline:0}h1{font-size:2em;margin:.67em 0}h2{font-size:1.5em;margin:.83em 0}h3{font-size:1.17em;margin:1em 0}h4{font-size:1em;margin:1.33em 0}h5{font-size:.83em;margin:1.67em 0}h6{font-size:.67em;margin:2.33em 0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:bold}blockquote{margin:1em 40px}dfn{font-style:italic}hr{-moz-box-sizing:content-box;box-sizing:content-box;height:0}mark{background:#ff0;color:#000}p,pre{margin:1em 0}code,kbd,pre,samp{font-family:monospace,serif;_font-family:'courier new',monospace;font-size:1em}pre{white-space:pre;white-space:pre-wrap;word-wrap:break-word}q{quotes:none}q:before,q:after{content:'';content:none}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-0.5em}sub{bottom:-0.25em}dl,menu,ol,ul{margin:1em 0}dd{margin:0 0 0 40px}menu,ol,ul{padding:0 0 0 40px}nav ul,nav ol{list-style:none;list-style-image:none}img{border:0;-ms-interpolation-mode:bicubic}svg:not(:root){overflow:hidden}figure{margin:0}form{margin:0}fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em}legend{border:0;padding:0;white-space:normal;*margin-left:-7px}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,select{text-transform:none}button,html input[type="button"],input[type="reset"],input[type="submit"]{-webkit-appearance:button;cursor:pointer;*overflow:visible}button[disabled],html input[disabled]{cursor:default}input[type="checkbox"],input[type="radio"]{box-sizing:border-box;padding:0;*height:13px;*width:13px}input[type="search"]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}input[type="search"]::-webkit-search-cancel-button,input[type="search"]::-webkit-search-decoration{-webkit-appearance:none}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}textarea{overflow:auto;vertical-align:top}table{border-collapse:collapse;border-spacing:0}html,body,button,input,select,textarea{font-family:"Segoe UI","Trebuchet MS",Helvetica,"Helvetica Neue",Arial,sans-serif;color:#222}.wf-active html,.no-js html,.wf-active body,.no-js body,.wf-active button,.no-js button,.wf-active input,.no-js input,.wf-active select,.no-js select,.wf-active textarea,.no-js textarea{font-family:"Source Sans Pro","Segoe UI","Trebuchet MS",Helvetica,"Helvetica Neue",Arial,sans-serif}html{height:100%;width:100%;font-size:62.5%;overflow-x:hidden}body{background:#f7f7f7;font-size:14px;font-size:1.4rem;line-height:1.7em;min-height:100%;width:100%}.page-container,.main-container{min-height:100%;background:#f7f7f7}.content-container{margin-bottom:50px}hr{display:block;height:1px;border:0;border-top:1px solid #ccc;margin:1em 0;padding:0}img{vertical-align:middle}fieldset{border:0;margin:0;padding:0}textarea{resize:vertical}a{color:#1088bf;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}a:hover{color:#d68807;text-decoration:none}.chromeframe{margin:0;background:#ccc;color:#000;padding:0.2em 0;text-align:center}.mobile-menu,.mobile-menu-btn{display:none}.ico{background-image:url('../images/[email protected]');background-repeat:no-repeat}.ico-after{position:relative}.ico-after:after{content:" ";display:block;position:absolute;top:0;left:0;width:16px;height:16px;background-image:url('../images/[email protected]');background-repeat:no-repeat}.a11y{display:block;width:0;height:0;text-indent:-9999px}.ir{background-color:transparent;border:0;overflow:hidden;*text-indent:-9999px}.ir:before{content:"";display:block;width:0;height:150%}.hidden{display:none !important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.clearfix:before,.clearfix:after{content:" ";display:table}.clearfix:after{clear:both}.clearfix{*zoom:1}.header-container header .accessibility{list-style:none;margin:0;padding:0 2.5%;background:rgba(0,0,0,0.2);overflow:hidden;height:0}.header-container header .accessibility.focused{height:auto}.header-container header .accessibility li{display:inline;margin:0;padding:0}.header-container header .accessibility li a{display:inline-block;padding:0 7px}.header-container header .accessibility li a:hover,.header-container header .accessibility li a:focus{color:#084561;background-color:#fff}.header-container header{background:#084561;border-bottom:3px solid #f8ad32}.header-container header a,.header-container header button{text-decoration:none;color:#FFF;-moz-transition-property:background;-o-transition-property:background;-webkit-transition-property:background;transition-property:background;-moz-transition-duration:0.15s;-o-transition-duration:0.15s;-webkit-transition-duration:0.15s;transition-duration:0.15s}.header-container header a:focus,.header-container header button:focus{outline:none}.header-logo{text-align:center;margin:0;padding:0;width:100%}.header-logo-link{display:block;margin:0 auto;text-indent:-9999px;width:100%;max-width:240px;height:60px;background:url("../images/logo.png") no-repeat center center;background-size:100% auto}.header-logo-link.oldie{width:240px}.header-logo-link:hover,.header-logo-link:focus{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=70);opacity:0.7}.dropdown{display:none;position:absolute;text-align:left;top:50px;left:0;right:0;background-color:#396a81;margin:0;padding:10px 2.5%;font-size:14px;font-size:1.4rem;border-bottom:3px solid #f8ad32;z-index:50}.dropdown .dropdown-title{text-transform:uppercase;color:#FFF}.dropdown .dropdown-list{width:100%;padding:0}.dropdown .dropdown-list>li{width:20%;float:left}.dropdown .dropdown-list>li.dropdown-empty-message{color:rgba(255,255,255,0.5);text-align:center;line-height:60px;background:none !important}.dropdown .dropdown-list>li ul{margin:0 0 10px;padding:0}.dropdown .dropdown-list>li ul li{position:relative}.dropdown .dropdown-list>li ul li a{display:block;width:95%;height:25px;line-height:25px;color:#95d7f5;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}.dropdown .dropdown-list>li ul li a:hover,.dropdown .dropdown-list>li ul li a:focus{text-indent:3%;background-color:rgba(0,0,0,0.3)}.dropdown .dropdown-link-all{display:block;clear:both;text-align:center;height:30px;line-height:30px;border-top:1px solid #274a5a;background-color:#396a81;-moz-transition-property:color,background-color;-o-transition-property:color,background-color;-webkit-transition-property:color,background-color;transition-property:color,background-color}.dropdown .dropdown-link-all:first-child{border-top:0 !important;border-bottom:1px solid #274a5a}.dropdown .dropdown-link-all:hover,.dropdown .dropdown-link-all:focus{color:#95d7f5;background-color:#274a5a;border-top:1px solid #396a81}.active+.dropdown{display:block}.header-container .header-menu{height:60px}.header-container .header-menu .header-menu-list{margin:0;padding:0}.header-container .header-menu .header-menu-list>li{display:block;float:left;width:33.3%}.header-container .header-menu .header-menu-list>li>a{display:block;position:relative;text-align:center;line-height:60px;text-transform:uppercase;font-size:1.5px;font-size:1.5rem;text-shadow:rgba(0,0,0,0.75) 0 0 3px}.header-container .header-menu .header-menu-list>li>a:hover,.header-container .header-menu .header-menu-list>li>a:focus,.header-container .header-menu .header-menu-list>li>a.active{background:#396a81}.header-container .header-menu .header-menu-list>li>a.current:before{content:" ";display:block;position:absolute;bottom:0;left:0;right:0;height:2px;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s;-moz-border-radius:2px 2px 0 0;-webkit-border-radius:2px;border-radius:2px 2px 0 0;background-color:#f8ad32}.header-container .header-menu .header-menu-list>li>a.current.active:before{height:0}.logbox{background:rgba(255,255,255,0.05)}.logbox .notifs-links{margin-right:60px}.logbox .notifs-links .ico-link{display:block;position:relative;width:33.3%;height:60px;line-height:60px;float:left}.logbox .notifs-links .ico-link .notif-count{display:block;position:absolute;z-index:1;top:50%;right:50%;margin:-20px -22px 0 0;padding:0 5px;height:16px;line-height:14px;background:#c0392b;-moz-border-radius:16px;-webkit-border-radius:16px;border-radius:16px}.logbox .notifs-links .ico-link .notif-text{display:block;position:absolute;text-indent:-9999px;height:22px;width:22px;top:50%;left:50%;margin:-11px 0 0 -11px}.logbox .notifs-links .ico-link .notif-text.ico-messages{background-position:0 -3360px}.logbox .notifs-links .ico-link .notif-text.ico-notifs{background-position:0 -3920px}.logbox .notifs-links .ico-link .notif-text.ico-alerts{background-position:0 -240px}.logbox .notifs-links .ico-link .notif-text.ico-gear{background-position:0 -2400px}.logbox .notifs-links .ico-link:hover,.logbox .notifs-links .ico-link:focus,.logbox .notifs-links .ico-link.active{background:#396a81}.logbox .dropdown{overflow:hidden}.logbox .dropdown .dropdown-title{display:block;width:100%;height:35px;line-height:37px;text-align:center;border-bottom:1px solid #274a5a;background-color:#396a81}.logbox .dropdown,.logbox .dropdown .dropdown-list{margin:0;padding:0;list-style:none;background-color:#19526c}.logbox .dropdown li,.logbox .dropdown .dropdown-list li{display:block;width:100%;height:60px}.logbox .dropdown li a,.logbox .dropdown .dropdown-list li a{display:block;overflow:hidden;position:relative;height:100%;width:100%}.logbox .dropdown li a,.logbox .dropdown li a:hover,.logbox .dropdown li a:focus,.logbox .dropdown li a.read:hover,.logbox .dropdown li a.read:focus,.logbox .dropdown .dropdown-list li a,.logbox .dropdown .dropdown-list li a:hover,.logbox .dropdown .dropdown-list li a:focus,.logbox .dropdown .dropdown-list li a.read:hover,.logbox .dropdown .dropdown-list li a.read:focus{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=100);opacity:1;-moz-transition-property:opacity,background-color;-o-transition-property:opacity,background-color;-webkit-transition-property:opacity,background-color;transition-property:opacity,background-color}.logbox .dropdown li a:hover,.logbox .dropdown li a:focus,.logbox .dropdown .dropdown-list li a:hover,.logbox .dropdown .dropdown-list li a:focus{background-color:#396a81}.logbox .dropdown li a:hover .username,.logbox .dropdown li a:focus .username,.logbox .dropdown .dropdown-list li a:hover .username,.logbox .dropdown .dropdown-list li a:focus .username{text-shadow:rgba(0,0,0,0.5) 0 0 5px}.logbox .dropdown li a:hover .date,.logbox .dropdown li a:focus .date,.logbox .dropdown .dropdown-list li a:hover .date,.logbox .dropdown .dropdown-list li a:focus .date{color:#95D7F5}.logbox .dropdown li a.read,.logbox .dropdown .dropdown-list li a.read{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=50);opacity:0.5}.logbox .dropdown li .avatar,.logbox .dropdown .dropdown-list li .avatar{float:left;height:30px;width:30px}.logbox .dropdown li .username,.logbox .dropdown .dropdown-list li .username{display:block;float:left;margin:4px 0 0 7px;color:#95D7F5;width:50%;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.logbox .dropdown li .date,.logbox .dropdown .dropdown-list li .date{color:#5196b6;float:right;padding:4px 10px 0 0;-moz-transition-property:color;-o-transition-property:color;-webkit-transition-property:color;transition-property:color}.logbox .dropdown li .topic,.logbox .dropdown .dropdown-list li .topic{display:block;position:absolute;bottom:0;left:0;overflow:hidden;height:25px;padding:4px 7px 2px;text-overflow:ellipsis;white-space:nowrap;width:95%;width:calc(100% - 14px)}.logbox .dropdown li:nth-child(2n+1),.logbox .dropdown li:nth-child(2n+1) form button,.logbox .dropdown .dropdown-list li:nth-child(2n+1),.logbox .dropdown .dropdown-list li:nth-child(2n+1) form button{background-color:#084561}.logbox .my-account{display:block;height:60px;width:60px;float:right}.logbox .my-account .username{display:none}.logbox .my-account .avatar{background:#396a81}.logbox .dropdown.my-account-dropdown a,.logbox .dropdown.my-account-dropdown button{padding-left:10px}.logbox .dropdown.my-account-dropdown button{width:100%;height:30px;line-height:28px;background:transparent;text-align:left;border:0}.logbox .dropdown.my-account-dropdown button:hover,.logbox .dropdown.my-account-dropdown button:focus{background:#396a81}.logbox.unlogged a{display:block;width:50%;text-align:center;float:left;line-height:60px;height:60px}.logbox.unlogged a:hover,.logbox.unlogged a:focus{background-color:#396a81}.avatar{height:60px;width:60px;background-color:#FFF}.sub-header{background:#EEE}.breadcrumb{display:none}.search{display:block;position:relative}.search form input,.search form button{float:left;border:none;background:rgba(255,255,255,0.25);height:40px;-moz-transition-property:background;-o-transition-property:background;-webkit-transition-property:background;transition-property:background;-moz-transition-duration:0.15s;-o-transition-duration:0.15s;-webkit-transition-duration:0.15s;transition-duration:0.15s}.search form input:hover,.search form input:focus,.search form button:hover,.search form button:focus{outline:none;background-color:rgba(255,255,255,0.75)}.search form input{height:30px;padding:5px 3%;width:70%}.search form button{width:12%;text-indent:-9999px}.search form button:after{display:block;content:" ";position:absolute;top:12px;left:50%;margin-left:-8px;height:16px;width:16px;background-position:0 -4640px}.search .search-more{display:block;float:left;height:40px;font-family:Arial, sans-serif;line-height:40px;width:12%;text-align:center;font-weight:bold;text-decoration:none;font-size:24px;background:#fff;color:#084561;-moz-transition:background 0.15s;-o-transition:background 0.15s;-webkit-transition:background 0.15s;transition:background 0.15s}.search .search-more:hover,.search .search-more:focus{background:rgba(255,255,255,0.7)}.alert-box{position:relative;padding:8px 15px;margin:0 0 15px 2%;color:#FFF;text-shadow:rgba(0,0,0,0.2) 0 0 2px}.alert-box .close-alert-box{display:block;position:absolute;top:12px;right:15px;height:20px;width:20px;text-indent:-9999px;text-decoration:none}.alert-box .close-alert-box-text{width:auto;text-indent:0;top:8px}.alert-box.info,.alert-box.success{background:#27ae60}.alert-box.error{background:#c0392b}.alert-box.alert,.alert-box.warning{background:#e67e22}.alert-box a{color:#EEE}.content-wrapper .alert-box{margin:0 0 20px}.main .sidebar{padding:0 0 10px;background:#f0f0f0;border-bottom:1px solid #FFF;color:#424242;width:105%;margin:0 0 0 -2.7%}.main .sidebar .new-btn{display:block;height:40px;padding-left:11.5%;text-decoration:none;text-indent:25px;line-height:40px;font-size:16px;font-size:1.6rem;position:relative;color:#1088bf;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}.main .sidebar .new-btn:first-child{margin-top:31px}.main .sidebar .new-btn:hover,.main .sidebar .new-btn:focus{background:#fff}.main .sidebar .new-btn:after{top:12px;left:11.5%}.main .sidebar h3,.main .sidebar h4{font-weight:normal;margin:0;padding:0}.main .sidebar h3{font-size:18px;font-size:1.8rem;line-height:38px;line-height:3.8rem;color:#084561;border-bottom:1px solid #f8ad32;margin-top:30px;text-transform:uppercase}.main .sidebar h4{padding-top:20px;font-size:17px;font-size:1.7rem}.main .sidebar h4 a{text-decoration:none;color:#424242}.main .sidebar h4[data-num]{position:relative;padding-left:calc(5% + 25px)}.main .sidebar h4[data-num]:before{content:attr(data-num);position:absolute;margin-left:-35px;text-align:right;width:50px}.main .sidebar h3+ul{margin:7px 0}.main .sidebar ul{margin:0;padding:0;list-style:none;width:100%}.main .sidebar ul li{position:relative;padding:0 0 0 2.5%;-moz-transition:background 0.15s;-o-transition:background 0.15s;-webkit-transition:background 0.15s;transition:background 0.15s}.main .sidebar ul li:not(.inactive):hover,.main .sidebar ul li a:focus{background:#fff;outline:none}.main .sidebar ul li:not(.inactive):hover .ico-after.action-hover,.main .sidebar ul li a:focus .ico-after.action-hover{display:block}.main .sidebar ul li a,.main .sidebar ul li button,.main .sidebar ul li.inactive span{display:block;padding-left:25px;padding-right:10px;text-decoration:none;color:#0079b2;overflow:hidden;height:30px;line-height:30px;font-size:14px;font-size:1.4rem;text-overflow:ellipsis;white-space:nowrap;border:0;text-align:left;background:transparent}.main .sidebar ul li a[data-num],.main .sidebar ul li button[data-num],.main .sidebar ul li.inactive span[data-num]{position:relative}.main .sidebar ul li a[data-num]:after,.main .sidebar ul li button[data-num]:after,.main .sidebar ul li.inactive span[data-num]:after{content:attr(data-num) ".";position:absolute;left:0;width:18px;text-align:right;color:#424242}.main .sidebar ul li a.unread,.main .sidebar ul li button.unread,.main .sidebar ul li.inactive span.unread{font-weight:bold}.main .sidebar ul li a.ico-after:after,.main .sidebar ul li button.ico-after:after,.main .sidebar ul li.inactive span.ico-after:after{top:7px;left:0;filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=70);opacity:0.7}.main .sidebar ul li a.ico-after:hover:after,.main .sidebar ul li a.ico-after:focus:after,.main .sidebar ul li button.ico-after:hover:after,.main .sidebar ul li button.ico-after:focus:after,.main .sidebar ul li.inactive span.ico-after:hover:after,.main .sidebar ul li.inactive span.ico-after:focus:after{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=100);opacity:1}.main .sidebar ul li a.ico-after.action-hover,.main .sidebar ul li button.ico-after.action-hover,.main .sidebar ul li.inactive span.ico-after.action-hover{position:absolute;display:none;overflow:visible;top:0;padding:0;z-index:1;width:30px;height:30px;text-indent:-9999px;border-left:1px solid transparent;background:#fff;right:-32px}.main .sidebar ul li a.ico-after.action-hover[data-title]:hover:before,.main .sidebar ul li button.ico-after.action-hover[data-title]:hover:before,.main .sidebar ul li.inactive span.ico-after.action-hover[data-title]:hover:before{content:attr(data-title);display:block;position:absolute;background:#fff;color:#555;top:0;left:35px;height:27px;line-height:27px;line-height:2.7rem;text-indent:0;padding:0 15px;border:1px solid #EEE;-moz-box-shadow:rgba(0,0,0,0.5) 0 0 3px;-webkit-box-shadow:rgba(0,0,0,0.5) 0 0 3px;box-shadow:rgba(0,0,0,0.5) 0 0 3px}.main .sidebar ul li a.ico-after.action-hover:after,.main .sidebar ul li button.ico-after.action-hover:after,.main .sidebar ul li.inactive span.ico-after.action-hover:after{left:5px}.main .sidebar ul li.inactive span{color:#555;font-style:italic}.main .sidebar ul li .last-answer{display:none}.main .sidebar ul li button{width:100%;line-height:28px}.main .sidebar ul li li{padding:0}.main .sidebar ul li li a{position:relative;color:#084561;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}.main .sidebar ul li li a:hover,.main .sidebar ul li li a:focus{color:#0079B2;background:#fff;margin-left:-11px}.main .sidebar ul li li a:hover:before,.main .sidebar ul li li a:focus:before{content:"> "}.main .sidebar.sommaire h4{border-bottom:1px solid #d8dada;padding-bottom:5px;padding-right:15px;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.main .sidebar.sommaire h4+ul>li:first-child{margin-top:5px}.main .sidebar.sommaire ul li.current{margin-top:0 !important;padding-top:5px;margin-bottom:5px;background-color:#F4F6F6}.main .sidebar.sommaire ul li.current ul{margin-top:5px;padding-top:5px;padding-bottom:5px;margin-left:-25px;width:calc(105% + 25px);background:-moz-linear-gradient(top, rgba(0,0,0,0.07),rgba(0,0,0,0) 3px);background:-o-linear-gradient(top, rgba(0,0,0,0.07),rgba(0,0,0,0) 3px);background:-webkit-linear-gradient(top, rgba(0,0,0,0.07),rgba(0,0,0,0) 3px);background:linear-gradient(to bottom, rgba(0,0,0,0.07),rgba(0,0,0,0) 3px)}.main .sidebar.sommaire ul li.current ul a{padding-left:50px}.main .content-container{padding-top:30px}.main .content-container h1,.main .content-container h2{font-size:22px;font-size:2.2rem;line-height:38px;line-height:3.8rem;color:#084561;font-weight:normal;border-bottom:1px solid #f8ad32;margin:1px 0 15px}.main .content-container h1.illu,.main .content-container h2.illu{padding-left:60px}.main .content-container h1.ico-after,.main .content-container h2.ico-after{padding-left:80px}.main .content-container h1.ico-after:after,.main .content-container h2.ico-after:after{width:80px;height:40px;margin-left:21px}.main .content-container h1.ico-articles:after,.main .content-container h2.ico-articles:after{background-position:0 -880px}.main .content-container h1.ico-tutorials:after,.main .content-container h2.ico-tutorials:after{background-position:0 -5600px}.main .content-container h1.illu img,.main .content-container h2.illu img{position:absolute;margin:-6px 0 0 -60px;border:1px solid #cdd0d1;width:50px;height:50px}.main .content-container h1:not(:first-child),.main .content-container h2:not(:first-child){margin-top:50px}.main .content-container .subtitle{font-size:18px;font-size:1.8rem;color:#999;margin-top:-15px;margin-bottom:15px;padding:10px 0;font-weight:normal;border-bottom:1px solid #EEE}.main .content-container .member-item .avatar{margin-top:-2px;height:20px;width:20px;border:1px solid #CCC}.main .content-container .member-item:hover .avatar{border-color:#999}.main.home .content-container{margin-top:0}.tutorial-list article,.main .article-content .tutorial-list article{min-height:60px;padding:20px 2%;border-bottom:1px solid #e0e4e5}.tutorial-list article:nth-child(2n+1),.main .article-content .tutorial-list article:nth-child(2n+1){background-color:rgba(255,255,255,0.8)}.tutorial-list article,.tutorial-list article h3,.tutorial-list article a h3,.tutorial-list article h3 a,.main .article-content .tutorial-list article,.main .article-content .tutorial-list article h3,.main .article-content .tutorial-list article a h3,.main .article-content .tutorial-list article h3 a{color:#424242;font-weight:normal}.tutorial-list article a h3:hover,.tutorial-list article a h3:focus,.tutorial-list article h3 a:hover,.tutorial-list article h3 a:focus,.main .article-content .tutorial-list article a h3:hover,.main .article-content .tutorial-list article a h3:focus,.main .article-content .tutorial-list article h3 a:hover,.main .article-content .tutorial-list article h3 a:focus{text-decoration:underline}.tutorial-list article h3,.main .article-content .tutorial-list article h3{margin:0;padding:0;font-size:20px;font-size:2.0rem;height:27px;width:100%;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.tutorial-list article a,.main .article-content .tutorial-list article a{text-decoration:none}.tutorial-list article .article-metadata,.main .article-content .tutorial-list article .article-metadata{margin:0 0 5px;padding:0;color:#ee8709}.tutorial-list article .article-metadata a,.main .article-content .tutorial-list article .article-metadata a{color:#ee8709}.tutorial-list article .article-metadata a:hover,.tutorial-list article .article-metadata a:focus,.main .article-content .tutorial-list article .article-metadata a:hover,.main .article-content .tutorial-list article .article-metadata a:focus{text-decoration:underline}.tutorial-list article .article-illu,.main .article-content .tutorial-list article .article-illu{display:block;width:100%;height:100px;overflow:hidden;background-repeat:no-repeat;background-position:center center;-moz-background-size:cover;-o-background-size:cover;-webkit-background-size:cover;background-size:cover}.tutorial-list article .article-illu img,.main .article-content .tutorial-list article .article-illu img{width:100%;height:100%;filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=0);opacity:0}.tutorial-list article .resume,.main .article-content .tutorial-list article .resume{margin:20px 0 0;padding:0}.tutorial-list article .tutorial-img,.main .article-content .tutorial-list article .tutorial-img{float:left}.tutorial-list article .tutorial-infos,.main .article-content .tutorial-list article .tutorial-infos{margin:7px 0 0 70px}.taglist{list-style:none;padding:0;margin:-14px 0 15px;height:30px;line-height:30px}.taglist li{float:right}.taglist li a{display:block;text-decoration:none;padding:0 10px;background:#FBFBFB;color:#aaa9a7;margin-left:1px;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}.taglist li a:before{content:"#"}.taglist li a:hover,.taglist li a:focus{background:#FFF;color:#0e77a8;border-bottom:1px solid #0e77a8}.content-wrapper,.full-content-wrapper{margin:0 2%}.small-content-wrapper{width:90%;max-width:500px;margin:20px auto}.authors{color:#9c9c9c;padding-bottom:10px;border-bottom:1px solid #e0e4e5;margin-bottom:20px !important}.authors .authors-label{display:inline-block}.authors ul{display:inline-block;list-style:none;padding:0;margin:0}.authors ul li{display:inline-block;margin:0}.authors ul li .avatar{height:28px;width:28px;border:1px solid #cdd0d1;margin-right:3px;margin-top:-4px}.authors ul li a{display:block;text-decoration:none;color:#1088bf;height:36px;line-height:36px;padding:0 8px;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}.authors ul li a:hover,.authors ul li a:focus{background:#DDD;color:#084561}.authors ul li .info{padding-left:5px;color:#777}.pagination{list-style:none;margin:0;padding:0;border-top:1px solid #d2d5d6;border-bottom:1px solid #d2d5d6;background:#FBFBFB;height:40px;margin-bottom:20px !important}.pagination li{float:left}.pagination li a{display:block;text-align:center;text-decoration:none;color:#084561;min-width:45px;height:40px;line-height:40px;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}.pagination li a.current{height:38px;color:#808080;background:#F4F6F6;margin-top:-1px;border-left:1px solid #d2d5d6;border-bottom:3px solid #d2d5d6;border-right:2px solid #d2d5d6}.pagination li a.ico-after:after{margin-top:12px}.pagination li a[href]:hover,.pagination li a[href]:focus{background:#d2d5d6}.pagination li.prev a,.pagination li.next a{padding:0 15px}.pagination li.prev .ico-after{padding-left:30px}.pagination li.prev .ico-after:after{margin-left:8px}.pagination li.next{float:right}.pagination li.next .ico-after{padding-right:30px}.pagination li.next .ico-after:after{right:8px;left:auto}.pagination.pagination-top li a.current{margin-top:0;border-top:3px solid #d2d5d6;border-bottom:none;height:35px;line-height:35px;padding-bottom:3px}.pagination.pagination-chapter{margin-left:0}.pagination.pagination-chapter li{max-width:45%}.pagination.pagination-chapter a{text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.topic-list{margin-top:50px !important;margin-bottom:50px !important}.topic-list .topic{position:relative;height:81px;line-height:25px;border-top:1px solid #FFF;border-bottom:1px solid #CCC;background:#eff9fe;overflow:hidden}.topic-list .topic:first-child:after{display:block;content:" ";width:100%;height:1px;background:#CCC;margin-top:-2px}.topic-list .topic:nth-child(2n){background:none}.topic-list .topic:nth-child(2n).unread{background:#feeed5}.topic-list .topic.unread{background:#fde8c6}.topic-list .topic:hover:before,.topic-list .topic.active:before{content:" ";display:block;position:absolute;background:#0e77a8;height:100%;width:5px}.topic-list .topic:hover.unread:before,.topic-list .topic.active.unread:before{background:#f8ad32}.topic-list a{text-decoration:none;color:#0e77a8}.topic-list a:hover,.topic-list a:focus{color:#0e77a8;text-decoration:underline;outline:none}.topic-list .topic-infos,.topic-list .topic-description,.topic-list .topic-answers,.topic-list .topic-last-answer{display:block;float:left;padding:4px 0;margin:0}.topic-list .topic-infos{width:5%}.topic-list .topic-infos input[type=checkbox]{margin:29px 25% 0}.topic-list .topic-infos .ico-after{display:block;text-indent:-9999px}.topic-list .topic-infos .ico-after:after{margin:4px 0 0 15px}.topic-list .topic-description{position:relative;width:60%}.topic-list .topic-description .topic-title-link:hover,.topic-list .topic-description .topic-title-link:after{text-decoration:none}.topic-list .topic-description .topic-title-link:hover .topic-title,.topic-list .topic-description .topic-title-link:after .topic-title{text-decoration:underline}.topic-list .topic-description .topic-title,.topic-list .topic-description .topic-subtitle{display:block;text-overflow:ellipsis;white-space:nowrap;overflow:hidden;margin:0;padding:0}.topic-list .topic-description .topic-title{font-size:16px;font-size:1.6rem}.topic-list .topic-description .topic-subtitle{height:24px;line-height:1.3em;color:#777}.topic-list .topic-description .topic-members{margin:0;color:#777}.topic-list .topic-description .topic-tag:before{content:"#"}.topic-list .topic-answers{width:13%;text-align:center;padding-top:25px}.topic-list .topic-last-answer{width:22%}.topic-list .topic-last-answer .topic-no-last-answer{display:block;margin-top:24px;color:#084561;filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=50);opacity:0.5}.no-cssmask .topic-list .topic-description[style]:before{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=10);opacity:0.1}.forum-list .group-title{width:100%;height:50px;margin-top:30px !important;clear:both;border-bottom:1px solid #CCC;color:#f8ad32}.forum-list .topic{height:60px}.forum-list .topic-description{padding-left:1.5%}.forum-list .topic-description .topic-title{font-weight:normal}.forum-list .topic-answers{padding-top:17px}.forum-list .topic-answers span{display:block;float:left;width:50%}.forum-list .topic-last-answer{width:18%}.forum-list .topic-last-answer .topic-no-last-answer{margin-top:13px}.forum-list .topic-last-answer .forum-last-message{color:#777;display:block}.forum-list .topic-last-answer .forum-last-message-title{display:block;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.main .content-container .content-wrapper{max-width:960px;margin:0 auto}.main .content-container .content-wrapper.article-content,.main .content-container .content-wrapper.authors{padding-left:2%;padding-right:2%}.main .content-container .article-content p,.main .content-container .article-content ul:not(.pagination){font-family:"Liberation Serif","Times New Roman",Times,Georgia,FreeSerif,serif}.main .content-container .article-content,.main .content-container .message-content{margin-top:20px;color:#424242}.main .content-container .article-content h2,.main .content-container .article-content h2 a,.main .content-container .article-content h3,.main .content-container .article-content h3 a,.main .content-container .message-content h2,.main .content-container .message-content h2 a,.main .content-container .message-content h3,.main .content-container .message-content h3 a{color:#ee8709;margin-top:40px;text-decoration:none}.main .content-container .article-content h2 a:hover,.main .content-container .article-content h2 a:focus,.main .content-container .article-content h3 a:hover,.main .content-container .article-content h3 a:focus,.main .content-container .message-content h2 a:hover,.main .content-container .message-content h2 a:focus,.main .content-container .message-content h3 a:hover,.main .content-container .message-content h3 a:focus{text-decoration:underline}.main .content-container .article-content h2,.main .content-container .message-content h2{font-size:22px;font-size:2.2rem;line-height:50px;margin-bottom:20px;background:#FFF;border-top:1px solid #e0e4e5;padding-left:1%;font-weight:400}.main .content-container .article-content h3,.main .content-container .message-content h3{font-size:20px;font-size:2.0rem;margin-bottom:14px}.main .content-container .article-content h4,.main .content-container .message-content h4{font-size:18px;font-size:1.8rem;margin-bottom:12px}.main .content-container .article-content .actions-title,.main .content-container .message-content .actions-title{float:right;margin:-60px 10px 0 0}.main .content-container .article-content .actions-title .btn,.main .content-container .message-content .actions-title .btn{height:30px;line-height:30px;margin-left:3px;text-transform:uppercase;filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=70);opacity:0.7}.main .content-container .article-content .actions-title .btn.ico-after:after,.main .content-container .message-content .actions-title .btn.ico-after:after{margin-top:7px}.main .content-container .article-content .actions-title .btn:hover,.main .content-container .article-content .actions-title .btn:focus,.main .content-container .message-content .actions-title .btn:hover,.main .content-container .message-content .actions-title .btn:focus{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=100);opacity:1}.main .content-container .article-content .information,.main .content-container .article-content .question,.main .content-container .article-content .error,.main .content-container .article-content .warning,.main .content-container .article-content .spoiler,.main .content-container .message-content .information,.main .content-container .message-content .question,.main .content-container .message-content .error,.main .content-container .message-content .warning,.main .content-container .message-content .spoiler{margin:25px 0;padding:7px 15px 7px 45px}.main .content-container .article-content .information.ico-after:after,.main .content-container .article-content .question.ico-after:after,.main .content-container .article-content .error.ico-after:after,.main .content-container .article-content .warning.ico-after:after,.main .content-container .article-content .spoiler.ico-after:after,.main .content-container .message-content .information.ico-after:after,.main .content-container .message-content .question.ico-after:after,.main .content-container .message-content .error.ico-after:after,.main .content-container .message-content .warning.ico-after:after,.main .content-container .message-content .spoiler.ico-after:after{position:absolute;top:50%;left:23px;margin:-11px 0 0 -11px;height:22px;width:22px}.main .content-container .article-content .information,.main .content-container .message-content .information{background:#daeaee}.main .content-container .article-content .information.ico-after:after,.main .content-container .message-content .information.ico-after:after{background-position:0 -2960px}.main .content-container .article-content .question,.main .content-container .message-content .question{background:#e2daee}.main .content-container .article-content .question.ico-after:after,.main .content-container .message-content .question.ico-after:after{background-position:0 -4240px}.main .content-container .article-content .error,.main .content-container .message-content .error{background:#eedada}.main .content-container .article-content .error.ico-after:after,.main .content-container .message-content .error.ico-after:after{background-position:0 -2320px}.main .content-container .article-content .warning,.main .content-container .message-content .warning{background:#eee7da}.main .content-container .article-content .warning.ico-after:after,.main .content-container .message-content .warning.ico-after:after{background-position:0 -5920px}.main .content-container .article-content .spoiler-title,.main .content-container .message-content .spoiler-title{display:block;background:#EEE;margin-top:15px;padding:3px 15px 3px 40px;text-decoration:none;border-bottom:1px solid #DDD;color:#555}.main .content-container .article-content .spoiler-title.ico-after:after,.main .content-container .message-content .spoiler-title.ico-after:after{margin:8px 0 0 10px}.main .content-container .article-content .spoiler-title:nth-last-child(2),.main .content-container .message-content .spoiler-title:nth-last-child(2){margin-bottom:15px}.main .content-container .article-content .spoiler-title:hover,.main .content-container .message-content .spoiler-title:hover{text-decoration:underline}.main .content-container .article-content .spoiler,.main .content-container .message-content .spoiler{margin-top:0;padding-left:15px;background:#EEE}.main .content-container .article-content figure,.main .content-container .message-content figure{margin:25px 0;max-width:100%;padding:10px;background:#DDD;text-align:center}.main .content-container .article-content figure img,.main .content-container .article-content figure video,.main .content-container .article-content figure pre,.main .content-container .article-content figure code,.main .content-container .article-content figure table,.main .content-container .article-content figure blockquote,.main .content-container .article-content figure embed,.main .content-container .article-content figure video,.main .content-container .message-content figure img,.main .content-container .message-content figure video,.main .content-container .message-content figure pre,.main .content-container .message-content figure code,.main .content-container .message-content figure table,.main .content-container .message-content figure blockquote,.main .content-container .message-content figure embed,.main .content-container .message-content figure video{max-width:100%;margin:0 auto;text-align:left}.main .content-container .article-content figure img,.main .content-container .article-content figure video,.main .content-container .article-content figure pre,.main .content-container .article-content figure code,.main .content-container .message-content figure img,.main .content-container .message-content figure video,.main .content-container .message-content figure pre,.main .content-container .message-content figure code{display:block}.main .content-container .article-content figure figcaption,.main .content-container .message-content figure figcaption{display:block;padding-top:10px}.main .content-container .reactions-title{margin:50px 0 20px;color:#084561;border-bottom:1px solid #f8ad32;text-transform:uppercase;font-weight:normal;font-size:22px;font-size:2.2rem;line-height:30px}.wf-active .main .content-container .article-content p,.wf-active .main .content-container .article-content ul:not(.pagination){font-family:"Droid Serif","Liberation Serif","Times New Roman",Times,Georgia,FreeSerif,serif}.js .spoiler{display:none}table{margin:15px 0;border-top:1px solid #DDD}table thead{background:#DDD;color:#084561}table th,table td{text-align:left;padding:5px 15px 5px 7px;border-right:1px solid #DDD}table th:first-child,table td:first-child{border-left:1px solid #DDD}table tbody tr{border-bottom:1px solid #DDD}table tbody tr:nth-child(2n+1){background:#F7F7F7}table.fullwidth{width:100%}.topic-message{position:relative}.topic-message.helpful .message{background-color:#e9f9dc}.topic-message.helpful .message:after{border-right-color:#e9f9dc}.topic-message .user .avatar-link{display:block;height:58px;width:58px;z-index:0;position:absolute;top:0;border:1px solid #DDD}.topic-message .user .avatar-link[href]:hover,.topic-message .user .avatar-link[href]:focus{border-color:#FFF;overflow:hidden;-moz-box-shadow:rgba(0,0,0,0.3) 0 1px 7px;-webkit-box-shadow:rgba(0,0,0,0.3) 0 1px 7px;box-shadow:rgba(0,0,0,0.3) 0 1px 7px}.topic-message .user .avatar-link img{height:58px;width:58px}.topic-message .user .badge{display:block;width:60px;height:25px;line-height:25px;text-align:center;text-transform:uppercase;color:#EEE;text-shadow:rgba(0,0,0,0.25) 0 0 3px;background:#777}.topic-message .user .badge.staff{background:#48a200}.topic-message .user .user-metadata{width:60px;height:25px}.topic-message .user .user-metadata a{display:block;float:left;border:1px solid #D2D5D6;border-top:0;text-align:center;background-color:#edefef;text-decoration:none;color:#424242;height:25px;line-height:26px;width:28px;color:#777;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}.topic-message .user .user-metadata a:first-child{border-right:0;width:29px}.topic-message .user .user-metadata a:hover,.topic-message .user .user-metadata a:focus{border-bottom-width:1px;border-bottom-color:#777;background:#FFF}.topic-message .user .user-metadata a.positive{color:#48a200}.topic-message .user .user-metadata a.negative{color:#c0392b}.topic-message .message{position:relative;background-color:#FDFDFD;border:1px solid #D2D5D6;border-right-width:2px;border-bottom-width:3px;min-height:75px}.topic-message .message .message-metadata{display:inline-block;font-size:14px;font-size:1.4rem;margin-left:5px}.topic-message .message .message-metadata a{display:block;float:left;color:#999;text-decoration:none;height:30px;line-height:30px;padding:0 5px;border-bottom:1px solid #D2D5D6;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}.topic-message .message .message-metadata a:hover,.topic-message .message .message-metadata a:focus{border-bottom:1px solid #0e77a8;color:#0e77a8;outline:none}.topic-message .message .message-metadata .username{color:#484848;font-size:16px;font-size:1.6rem;margin-right:3px}.topic-message .message .message-metadata .date{line-height:32px}.topic-message .message .message-actions{margin:0;padding:0;list-style:none;position:absolute;top:0;right:0;text-transform:uppercase}.topic-message .message .message-actions li{float:left}.topic-message .message .message-content{clear:both;margin:0 10px 0;padding-top:1px}.topic-message .message .message-content>p:first-child{margin-top:7px}.topic-message .message .message-content .message-hidden-content{display:none}.topic-message .message .message-content .message-edited,.topic-message .message .message-content .message-hidden,.topic-message .message .message-content .message-helpful{padding:3px 0 0}.topic-message .message .message-content .message-edited.ico-after,.topic-message .message .message-content .message-hidden.ico-after,.topic-message .message .message-content .message-helpful.ico-after{text-indent:20px}.topic-message .message .message-content .message-edited.ico-after:after,.topic-message .message .message-content .message-hidden.ico-after:after,.topic-message .message .message-content .message-helpful.ico-after:after{margin:7px 0}.topic-message .message .message-content .message-edited,.topic-message .message .message-content .message-hidden{font-style:italic;color:#999}.topic-message .message .message-content .message-edited:after,.topic-message .message .message-content .message-hidden:after{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=50);opacity:0.5}.topic-message .message .message-content .message-hidden{margin-top:1px}.topic-message .message .message-content .message-helpful{color:#48A200;text-indent:20px}.topic-message .message .message-content textarea{margin:10px 0 10px -1px;background-color:transparent;min-height:150px}.topic-message .message .message-bottom{display:-webkit-box;display:-ms-flexbox;display:-webkit-flex;display:-moz-box;display:flex;-webkit-box-align:start;-moz-box-align:start;-ms-flex-align:start;-webkit-align-items:flex-start;align-items:flex-start;min-height:30px}.topic-message .message .message-bottom .signature{border-top:1px solid #D2D5D6;padding:3px 0 0 10px;margin:0 10px 0 0;font-size:12px;font-size:1.2rem;color:#999;flex:1}.topic-message .message .message-bottom .signature p{margin:0;padding:0}.topic-message .message .message-bottom .signature a{color:#999;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}.topic-message .message .message-bottom .signature a:hover,.topic-message .message .message-bottom .signature a:focus{text-decoration:none;color:#555}.topic-message .message .message-bottom .message-karma{margin-left:auto;margin-bottom:-2px}.topic-message .message .message-bottom .message-karma a{border-bottom-width:3px}.topic-message .message .message-bottom .message-karma .tick{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.topic-message .message .message-bottom .message-karma .tick:hover,.topic-message .message .message-bottom .message-karma .tick:focus{color:#555}.topic-message .message .message-bottom .message-karma .tick.active{color:#48a200}.topic-message .message .message-bottom .message-karma .tick.active:after{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=100);opacity:1}.topic-message .message .message-bottom .message-karma .upvote{color:#48a200}.topic-message .message .message-bottom .message-karma .downvote{color:#c0392b}.topic-message .message .message-bottom .message-karma .voted{font-weight:bold}.topic-message .message .message-bottom .message-karma .voted:after{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=100);opacity:1}.topic-message .message .message-buttons{margin:0 0 0 10px;padding:0;list-style:none;border-bottom:none}.topic-message .message .message-buttons a{text-indent:-9999px;width:0}.topic-message .message .message-buttons a:after{left:12px !important}.topic-message .message .message-submit{margin-left:auto;margin-right:10px}.topic-message .message .message-actions,.topic-message .message .message-buttons,.topic-message .message .message-karma,.topic-message .message .message-submit{display:-webkit-box;display:-ms-flexbox;display:-webkit-flex;display:-moz-box;display:flex}.topic-message .message .message-actions a,.topic-message .message .message-actions span,.topic-message .message .message-actions button,.topic-message .message .message-buttons a,.topic-message .message .message-buttons span,.topic-message .message .message-buttons button,.topic-message .message .message-karma a,.topic-message .message .message-karma span,.topic-message .message .message-karma button,.topic-message .message .message-submit a,.topic-message .message .message-submit span,.topic-message .message .message-submit button{display:block;float:left;margin-left:3px;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}.topic-message .message .message-actions a.ico-after,.topic-message .message .message-actions span.ico-after,.topic-message .message .message-actions button.ico-after,.topic-message .message .message-buttons a.ico-after,.topic-message .message .message-buttons span.ico-after,.topic-message .message .message-buttons button.ico-after,.topic-message .message .message-karma a.ico-after,.topic-message .message .message-karma span.ico-after,.topic-message .message .message-karma button.ico-after,.topic-message .message .message-submit a.ico-after,.topic-message .message .message-submit span.ico-after,.topic-message .message .message-submit button.ico-after{padding-left:30px}.topic-message .message .message-actions a:after,.topic-message .message .message-actions span:after,.topic-message .message .message-actions button:after,.topic-message .message .message-buttons a:after,.topic-message .message .message-buttons span:after,.topic-message .message .message-buttons button:after,.topic-message .message .message-karma a:after,.topic-message .message .message-karma span:after,.topic-message .message .message-karma button:after,.topic-message .message .message-submit a:after,.topic-message .message .message-submit span:after,.topic-message .message .message-submit button:after{top:7px;left:7px;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s;filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=50);opacity:0.5}.topic-message .message .message-actions a,.topic-message .message .message-actions span,.topic-message .message .message-buttons a,.topic-message .message .message-buttons span,.topic-message .message .message-karma a,.topic-message .message .message-karma span,.topic-message .message .message-submit a,.topic-message .message .message-submit span{border-bottom:1px solid #D2D5D6;text-decoration:none;color:#999;height:29px;line-height:30px;padding:0 10px}.topic-message .message .message-actions a,.topic-message .message .message-buttons a,.topic-message .message .message-karma a,.topic-message .message .message-submit a{cursor:pointer}.topic-message .message .message-actions a:hover,.topic-message .message .message-actions a:focus,.topic-message .message .message-buttons a:hover,.topic-message .message .message-buttons a:focus,.topic-message .message .message-karma a:hover,.topic-message .message .message-karma a:focus,.topic-message .message .message-submit a:hover,.topic-message .message .message-submit a:focus{border-bottom-color:#0e77a8;outline:none}.topic-message .message .message-actions a:hover:after,.topic-message .message .message-actions a:focus:after,.topic-message .message .message-buttons a:hover:after,.topic-message .message .message-buttons a:focus:after,.topic-message .message .message-karma a:hover:after,.topic-message .message .message-karma a:focus:after,.topic-message .message .message-submit a:hover:after,.topic-message .message .message-submit a:focus:after{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=100);opacity:1}.topic-message .message .message-actions a:hover,.topic-message .message .message-actions a:focus,.topic-message .message .message-buttons a:hover,.topic-message .message .message-buttons a:focus,.topic-message .message .message-karma button:hover,.topic-message .message .message-karma button:focus{color:#555;text-decoration:none}form.topic-message{margin-top:50px}.page-footer{background:#042332;height:50px;line-height:50px;border-top:3px solid #f8ad32;font-size:14px;font-size:1.4rem}.page-footer .wrapper{max-width:960px;margin:0 auto}.page-footer p{float:left;color:#EEE;text-transform:uppercase;margin:0}.page-footer ul{list-style:none;float:right;margin:0;padding:0}.page-footer ul li{display:inline-block;margin-left:25px}.page-footer ul li a{text-decoration:none;color:#EEE;text-transform:uppercase;border-bottom:1px solid transparent}.page-footer ul li a:hover,.page-footer ul li a:focus{border-bottom-color:#f8ad32}.modal{display:none}#modals .modal{position:fixed;z-index:50;width:auto !important;top:0;right:0;bottom:0;left:0;background:#EEE;min-height:220px;font-size:16px;font-size:1.6rem}#modals .modal .modal-title{display:block;border-bottom:3px solid #f8ad32;line-height:53px;height:50px;text-indent:15px;margin-bottom:20px;background:#084561;color:#FFF;font-size:1.6rem;font-size:16px;text-transform:uppercase;text-shadow:rgba(0,0,0,0.75) 0 0 3px}#modals .modal .modal-title.ico-after{text-indent:40px}#modals .modal .modal-title.ico-after:after{margin:18px 0 0 15px}#modals .modal p,#modals .modal input,#modals .modal select,#modals .modal textarea{margin:10px 15px}#modals .modal p:not([type=checkbox]):not([type=radio]),#modals .modal input:not([type=checkbox]):not([type=radio]),#modals .modal select:not([type=checkbox]):not([type=radio]),#modals .modal textarea:not([type=checkbox]):not([type=radio]){width:calc(98% - 32px) !important}#modals .modal label{margin:0 15px}#modals .modal textarea{margin-top:0}#modals .modal [type=submit],#modals .modal .btn{position:absolute;width:50%;height:50px;line-height:50px;bottom:0;right:0;margin:0 !important;padding:0 !important;text-align:center;background:none !important;border-top:1px solid #CCC;color:#333}#modals .modal .btn-submit,#modals .modal [type=submit]{height:51px;color:#084561;font-weight:bold}#modals .modal .btn-cancel{right:auto;left:0;border-right:1px solid #CCC;color:#555}.enable-mobile-menu #modals .modal{top:25px;right:25px;bottom:25px;left:25px;-moz-box-shadow:0 0 5px #000;-webkit-box-shadow:0 0 5px #000;box-shadow:0 0 5px #000}.enable-mobile-menu #modals .modal.modal-small{top:50%;bottom:auto;height:220px;margin:-110px auto 0;max-width:400px}.enable-mobile-menu #modals .modal.modal-medium{top:50%;bottom:auto;height:250px;margin:-125px auto 0;max-width:400px}.enable-mobile-menu #modals .modal.modal-medium textarea{height:80px}.enable-mobile-menu #modals-overlay{position:fixed;display:none;z-index:49;top:0;right:0;bottom:0;left:0;background:rgba(0,0,0,0.7)}.ico-after.view:after{background-position:0 -5840px}.ico-after.view.blue:after{background-position:0 -5680px}.ico-after.edit:after{background-position:0 -2240px}.ico-after.alert:after{background-position:0 -160px}.ico-after.cite:after{background-position:0 -1360px}.ico-after.tick:after{background-position:0 -5520px}.ico-after.tick.green:after{background-position:0 -5360px}.ico-after.upvote:after{background-position:0 -5280px}.ico-after.upvote.voted:after{background-position:0 -5200px}.ico-after.downvote:after{background-position:0 -5120px}.ico-after.downvote.voted:after{background-position:0 -5040px}.ico-after.lock:after{background-position:0 -3200px}.ico-after.lock.blue:after{background-position:0 -3040px}.ico-after.cross:after{background-position:0 -1760px}.ico-after.cross.blue:after{background-position:0 -1440px}.ico-after.cross.red:after{background-position:0 -1600px}.ico-after.cross.white:after{background-position:0 -1680px}.ico-after.pin:after{background-position:0 -4160px}.ico-after.pin.blue:after{background-position:0 -4000px}.ico-after.arrow-right:after{background-position:0 -800px}.ico-after.arrow-right.blue:after{background-position:0 -640px}.ico-after.star:after{background-position:0 -4960px}.ico-after.star.yellow:after{background-position:0 -4880px}.ico-after.star.blue:after{background-position:0 -4720px}.footer-container footer{color:#424242;padding:20px 0}.screen,.wide{display:none}.content-container form,#modals form{width:100%}.content-container form p,#modals form p{position:relative}.content-container fieldset,#modals fieldset{border-top:1px solid #DDD;border-bottom:3px solid #DDD;background:#EFEFEF;padding:0 4%}.content-container fieldset legend,#modals fieldset legend{padding:0 10px;border-top:1px solid #DDD;border-bottom:3px solid #DDD;background:#EFEFEF}.content-container label,#modals label{display:block;color:#555;height:30px;line-height:30px}.content-container label .asteriskField,#modals label .asteriskField{color:#C0392B;margin-left:4px}.content-container .form-error,#modals .form-error{display:block;font-size:13px;color:#C0392B}.content-container input,.content-container textarea,#modals input,#modals textarea{border:1px solid #D2D5D6}.content-container input:focus,.content-container textarea:focus,#modals input:focus,#modals textarea:focus{outline-color:#999}.content-container input.field-error,.content-container input:invalid,.content-container textarea.field-error,.content-container textarea:invalid,#modals input.field-error,#modals input:invalid,#modals textarea.field-error,#modals textarea:invalid{border-color:#C0392B}.content-container input.field-error:focus,.content-container input:invalid:focus,.content-container textarea.field-error:focus,.content-container textarea:invalid:focus,#modals input.field-error:focus,#modals input:invalid:focus,#modals textarea.field-error:focus,#modals textarea:invalid:focus{outline-color:#C0392B}.content-container input[disabled],.content-container textarea[disabled],#modals input[disabled],#modals textarea[disabled]{background:#DDD !important;color:#555}.content-container input,.content-container textarea,.content-container button,.content-container .btn,#modals input,#modals textarea,#modals button,#modals .btn{-webkit-appearance:none;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}.content-container input:not([type=submit]):not([type=reset]):not([type=radio]):not([type=checkbox]),#modals input:not([type=submit]):not([type=reset]):not([type=radio]):not([type=checkbox]){width:calc(98% - 2px);padding:0 1%}.content-container textarea,#modals textarea{width:calc(98% - 2px);padding:10px 1%;font-family:Courier, "Lucida Sans Typewriter", "Lucida Typewriter", "DejaVu Sans Mono", monospace}.content-container input,.content-container button,.content-container .btn,#modals input,#modals button,#modals .btn{display:block;height:30px}.content-container input.ico-after,.content-container button.ico-after,.content-container .btn.ico-after,#modals input.ico-after,#modals button.ico-after,#modals .btn.ico-after{padding-left:30px}.content-container input.ico-after:after,.content-container button.ico-after:after,.content-container .btn.ico-after:after,#modals input.ico-after:after,#modals button.ico-after:after,#modals .btn.ico-after:after{margin:12px 0 0 7px}.content-container input[type=submit],.content-container button,.content-container .btn,#modals input[type=submit],#modals button,#modals .btn{height:40px;line-height:40px;cursor:pointer}.content-container input[type=radio],.content-container input[type=checkbox],#modals input[type=radio],#modals input[type=checkbox]{float:left;margin-right:5px;height:15px;width:15px;border:1px solid #BBB;background:#FCFCFC}.content-container input[type=radio]:checked,.content-container input[type=checkbox]:checked,#modals input[type=radio]:checked,#modals input[type=checkbox]:checked{background:#555}.content-container [type=submit],.content-container button,.content-container .btn,#modals [type=submit],#modals button,#modals .btn{color:#DDD;padding:0 15px;border:none;float:right;text-decoration:none;margin-left:1px;outline:none}.content-container [type=submit],.content-container .btn-submit,#modals [type=submit],#modals .btn-submit{color:#FFF;background:#084561}.content-container [type=submit]:not([disabled]):hover,.content-container [type=submit]:not([disabled]):focus,.content-container .btn-submit:not([disabled]):hover,.content-container .btn-submit:not([disabled]):focus,#modals [type=submit]:not([disabled]):hover,#modals [type=submit]:not([disabled]):focus,#modals .btn-submit:not([disabled]):hover,#modals .btn-submit:not([disabled]):focus{background:#396A81}.content-container .btn-cancel,#modals .btn-cancel{background:#c0392b}.content-container .btn-cancel:not([disabled]):hover,.content-container .btn-cancel:not([disabled]):focus,#modals .btn-cancel:not([disabled]):hover,#modals .btn-cancel:not([disabled]):focus{background:#e74c3c}.content-container .btn-grey,#modals .btn-grey{background:#EEE;color:#555}.content-container .btn-grey:not([disabled]):hover,.content-container .btn-grey:not([disabled]):focus,#modals .btn-grey:not([disabled]):hover,#modals .btn-grey:not([disabled]):focus{background:#CCC;color:#333}.content-container [disabled],#modals [disabled]{cursor:default;background:#F7F7F7;color:#CCC}.content-container .form-sub-link,#modals .form-sub-link{display:block;display:inline-block;margin-top:8px}.content-container .checkbox,#modals .checkbox{padding:10px 0}.content-container .checkbox input,#modals .checkbox input{margin-top:8px}.zform-toolbar{margin:0;padding:2px;list-style-position:initial;list-style-image:none;list-style-type:none;border-bottom:none}.zform-toolbar a,.zform-toolbar button{display:block;float:left;cursor:pointer;background-color:#FFF;border-bottom:1px solid transparent;text-decoration:none;color:#999;height:27px;line-height:30px;padding:0 10px;margin-left:1px;text-indent:-9999px;width:0}.zform-toolbar a .zform-popup,.zform-toolbar button .zform-popup{text-indent:0;line-height:20px}.zform-toolbar a.ico-after,.zform-toolbar button.ico-after{padding-left:30px}.zform-toolbar a:after,.zform-toolbar button:after{top:7px;left:12px;display:none}.zform-toolbar button{padding:0 15px;height:30px;border-top:none;border-right:none;border-left:none}.zform-toolbar button[type=submit]{background:#084561;border-bottom-color:#084561;color:#DDD}.zform-toolbar button[type=submit]:hover,.zform-toolbar button[type=submit]:focus{color:#FFF;background:#396A81;border-bottom-color:#396A81}.zform-toolbar a:hover,.zform-toolbar a:focus,.zform-toolbar button:hover,.zform-toolbar button:focus{border-bottom-color:#1088bf;outline:none;background-color:#EEE}.zform-button{background-repeat:no-repeat;background-position:center center}.zform-button-bold{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAAwklEQVQoz2P4z4AfMlBLQXlC+fmS/wXvs+tT1ye8j5wfLIBhQnF95v+s/SBWxPyQ/17nMRTk1qf+TwYr8K/3++/4H0NBen38/2igAl8Bt/tu/y3mYyhIqI/8H3zfp971vMt/s/1YfBFRH/zfCyxhMt/iv9p5eQE0Bf71vv8dwQq0BdT+6/4XL0BT4FYPtBlqtMx/zf8C9WgKbOsd/uuDPSddoPKf/z2XAooCmwST9br71fbL90v2C+/n7edUoHpc4IYASlr8ehOQ9V8AAAAASUVORK5CYII=")}.zform-button-italic{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAAcUlEQVQoz2P4z4AfMlBbQXZD6oeE/5Efgg/gNCHuQeT/wAScJsQYhP/3/4DHipAJQf/dFuBR4PPA879tAE4FXgau/20+4PGF4wSX/0YL8CiweGDxXysApwIzB9P/Gv9xBpRJg+4BtQPyByQ30DguMCEAC2D/O2OrpxIAAAAASUVORK5CYII=")}.zform-button-strike{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAAn0lEQVQoz2P4z4AfMlBTQYlgwczstNTyhJmRu7EqyHuXVQ6iI8oD/2NRkJuW9j+5A8L2wGZCukvC/+j/ITN9jf8z2LtgtSJyd+j/wP8e/23PmKEqKC8t/w+D8f9t/ksguRvJBH9BCG2Upn3X6L/cGQwr3NLsy2Fsmf9idzEU2KaZ/9eHmiLyjr8cQ4FJmu47tTPy5ZJpwuW8HTSKC+wQAFs6/D/QOXeIAAAAAElFTkSuQmCC")}.zform-button-abbr{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACTUlEQVR42pWR4UtTYRTGB/0FgyBckZREI8SyElEEEyW0RJoxbaK2raYmaVMnt6ZYzpbTudqW091arqZoKYEVjWgFFRhCg77Ymt7J3d6522rh9yJ6eufHcOXOt3Nenuf8nveIRH9V10wY7dMEre4wNM7gN1G61TYtPB6aJ7g8F0cDG21J20DDrkDp5D3NngTkjlhhWmK1i6DB+vldLZvYXjsaQ5WZ6LYsVk7ER1rGA5AbPw7LeheLFaME5YPhyS2JG1zxgyp7ENX9/pJkr32jedD4cAilA6uL/xXXOWNjcjuBzPgJJy3CDu3b827rBxPM7wcgu9OPalfFtnKbIlZqJ8wxK/EVWYiv0ExmCwYjTZsatr48azEtXIM3NI/eF904brv588TYGlSTcRSZCeonBFx69BU17BoOGfjNTepmZMN6bwesC17I7wrQTMVRMERMybe867xJ5RZwxhnDgZ5VJmW0ClvJj86nr9B4P458w+vfeUZenJzn9PGsilJU2SPYx3BNqcSxYmMB8vW5OKy/ipwrjl8U15fdx+OUPYobzxKQMiFkdnLilAT5gxExxfXVUNTTjg1c/36Gmz13T0AbjbRbu+z/53VyDbxfwQqQj69B2sNtZN2j45jKkQgqzBHsvBhMnZ/ilpVZCEzPvyNbH0KWjhNT3L1062rHlICjdCZpDpalNKC4TZW3Ihh4kkCVLYqsrhVIdSsoN4Wh9XxB/e0ojnRzkKgDm5vQ3xVTXDZTu4xd7ctJXL/kQpChWxmJJrBOhesZ6iU2Q7kk/gOYnkYcn8opfQAAAABJRU5ErkJggg==")}.zform-button-key{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABe0lEQVR42pWSQW5TQQyGv/GMX1KVsmJLeggEO+ACCHEJWOQKBSo19ADZpoIFN0CIY9BK0G1DuyebIlGSzNgsXt5LiKia/tJItmR/M7894dPnLy/NbGTmgHOzAkECEsKrF8+fHaWc8+jRwwfc3dnB3W5uD8Llr0uOT76NgKNkZpydjXn65DGb6uvxCXe2twFIZsbWVgeAfr9Pp9NBRDAzZrMZe6/fkHMGwN3Z7d2nqpTfV39qQClGShUABwcDut0u+/tvGQzeMZ1OyTkjqgDUc4KUFLOrBlDQpsCtPmZtLFHap4s3gISbNRYK1QIQYyTGiLu38ap8AahUKVZWLcR/AOvxOkA1Lu2sWogxIiLM53NE5FpAPQNbbkE11UmMYMZwOMRKqfP/AVSx1oIZKWk7nKYwiBCv+QeaEt5YsDULm0hVKcWWMyCEek0imwEqXdpxd0QC309PgbBBu9Pr9ZhMJjXgx3h8+P7Dxz1uqYvz80MWV94Ddrm9LoCffwHdG70wvg5ZlgAAAABJRU5ErkJggg==")}.zform-button-sup{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABLElEQVR42mNgGDkgZMoDhdJVzy+0bH75wbfrbgPJBiTPe7wBqFHBq+1WQ8P65//JdknirIcXUuY9eoAhUV5efqC4uPhAbm7ugbS0tAPx8fEK4eHhB/z8/A64uroeAKmxr7jWEDbp3gXznEsGGAYANQcANX9ISUn5D9Q8ASQG1NwA1LzAxsZGwbroSoBT9bUFJhkXBAyTLzjoxZ9VwDAEaLMDUPP/yMjI/0DNBTCbQcC79eaB9LkP/yfPevA/bOLdDzj9CHT2hMDAwP9ubm7/gTYLkBxIQJsFQJpdXFz+GxkZTSDZAJCzgTYXWFtb/zcwMPivoKDgQLTN0AArAPE1NTUnAF3wX0JC4oOgoKABsTYfADkbqNkAaPMBoOYDQM0HuLi4DrCwsBgMzjwCAMHEeHCN9BV5AAAAAElFTkSuQmCC")}.zform-button-sub{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABLElEQVR42mNgGD6gvLz8QHFx8YHc3NwDaWlpB+Lj4xXCw8MP+Pn5HXB1dT1A0ACg5gCg5g8pKSn/gZongMSAmhuAmhfY2NgoEOUKoM0OQM3/IyMj/wM1FxBlMzoAOntCYGDgfzc3t/9AmwVINgBoswBIs4uLy38jI6MJJBsAcjbQ5gJra+v/BgYG/xUUFBxA4iFTHiiUrnp+oWXzyw++XXcbsNoMDbACEF9TU3MC0AX/JSQkPggKChokz3u8AahRwavtVkPD+uf/cdl8AORsoGYDoM0HgJoPADUf4OLiOsDCwmIAUpc46+GFlHmPHpCVVuwrrjWETbp3wTznkgHJmq2LrgQ4VV9bYJJxQcAw+YKDXvxZBZIM8G69eSB97sP/ybMe/A+bePfD4MlDAC7MeHCrEeunAAAAAElFTkSuQmCC")}.zform-button-center{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAAfElEQVR42mP8z4AfMDFQqoAFRJT//8fwBwx/g+EvMP7FsJeRgYHxPzEmMDDkZP+eAtMNhTnHpoJkiDMh9T+yzQh4iwQ3BGf/moKsF2hWziMS3OD9H9Xu31D4mRg3MPwHQ9Ns/f+a/1X+y/2X/C/yn/8/93/2bIgMI8WxCQClCFYAGIFCIgAAAABJRU5ErkJggg==")}.zform-button-right{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAAY0lEQVR42mP8z4AfMDFQqoAFRJT//8fwBwx/g+EvMP7FsJeRgYHxPzEmQEDS/99QnTB4hmgTUv8j24yAt0h0g/t/hF6Iec+JNsH7P6rdv6HwM4lu0Pr/G64bEq5/iDGBYGQBABNITB8iVnJIAAAAAElFTkSuQmCC")}.zform-button-ul{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAA1UlEQVR42mNgGBQgZ/7jgqm7Xj8A0aTqZQERmtIcBQqibPJAJsiACeXl5dlAesrfv38Z/vz5w/D792+GX79+gemfP3+C2WvXrmWkigsGCUiZ+aigc9PLByE9d8kLRCUx1gIZIRb5N5Ic4ECMi4vLBgbUFFCAIeMfP37A2bdu3UIEYkDHrYKSxY8fuFZeG6qBaJt/qSB+2r0H1nmXyAxEdZ4CAwVucEo8CgxEIyOjbGBATYGlOhCNnBpBqROYShnhBty58WUCSDOUZjh37txUIDWVLt4HAP/ViGJIIAyXAAAAAElFTkSuQmCC")}.zform-button-ol{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAA70lEQVR42mNgoAbImf9YZdHhd//JNgCkmSIDYIbA2OXl5dlA/L+kpOR/QUHB/+zs7P+pqan/ExIS/kdGRv4PDg7+j9UFiw5S6Aqywdz9b//P2vP6f8TEeypkGxLae0+ld8tL8rwQ1HVHpXPTc7jmuLi47IiIiP+BgYH/vby8/js7O/+3sbH5b2Ji8l9XV/e/mpoaqkVt65//b1zz9H/NqqcDFIjlyx7/L136+H/x4sfkuwCk2TrvEvmxANIMc4GRkVG2trb2fxUVlf9ycnL/xcXF/wsJCf3n4eH5z87O/p+Zmfk/hu0gbFd0pYPu4QcAKY588QFUIAIAAAAASUVORK5CYII=")}.zform-button-quote{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABH0lEQVR42mNgGDQgon2HEBAvBeKfQPwfD94FxCrYDNi48uCt/7///P2PD2w5eR9kyG0gZkPWzAPEf/7++/f/w7d//19++vf/2cd//5+8//f/4bt//++9+ff/9qu//++8ghheveA4yBAzZAPkcqYeAEu+AGp89uHf/8dAzQ/e/vt/F6r5+ou//68+gxjQueosyABvrAY8BWp+9A6q+fW//7deQjRfAWq++AS3AXAvgJx/H2jrndd//98Ear72/O//y0DNF56ADPgDNqB20QmQAZZYAxFkCDIAuebC479gg9ECkRNXNP6BRdncHVfhBr3//APMB4pfxhqNONLGnefvvsI0fgfiWlISVu/MbVdAGr8AcSGpqVIJiO8BcQrD8AcAGopyopBVAH0AAAAASUVORK5CYII=")}.zform-button-link{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAA6UlEQVQoz2P4z4AfMtBJgR13Vmnru3n/ax7mmOdI1Nyd97/1XVapHTdUgRGbT9fE/y/+3/1/8H/jvepDN3/c/X/k/8T/Pl1GbGAFhn7FH66+i9jm/Sf1/6T/lf9T/3v/idi24mHxB0M/iAldTd8np/tz2X/e+//c/0P/1/63/+zPNTm96btRF1iBbmb6+2klQTsdf7n9DwRCt/+Ov4J2TitJf6+bCVagqel7vff9qrfr/k//X/i/Akiu+7/qbe973+uammAFasz2Bl73U75kf8/+GR4X7pz9Kft7yhev+/YGasz0C0mKFAAASj0PpKVVf4oAAAAASUVORK5CYII=")}.zform-button-image{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAB8ElEQVQ4y6WTPWtUQRSGnzP33r33Jgu7kI1hI1GSgGARxFRG/4CFhY1VUlhI+oCNP8LfIKaz0MpCLEz+QUCwCIQVQc0X+dhsNtm5O3PGImbJboIIGaabmeec9533SAiB66wYYPnj2mtVmT8pNLPuilsDNZIYsoQ3L57OLsUAGmThyaOJ0SzLRCT6Z8WOgnddPnzZeA6cAU6spmmayfLqAR32aMk6k2M75EkTF5T9o5xvGxWGwl1iRnj5bBKvIj0JhQNjIoxAYbaYrO2Qln7QtC2cd8RpytREne+NYaqlGqoDHgAoYIxgwy6l5IDD0ybWdyicw4U2aZrStjkjuSEQesb0A0QITrG+S8dZTruWQh1eAekS1BMb4eLPmZ7R4QyQMUqrPUwgwarHOo9IiXarTLk0ThQZCHJZQghnEsrRTX5tbVPJNhkaNqTiON4fYnurTr0yRWzkcg7CRUByg/H8Pj/XVqiWfyPek3RGuTW9QDmr41X7YtHXwfreIl4Vr8odu8vcxG0UaGxu8+n4FXqkqCrweaCDEBDg8exS7yCaOeSkvUe2+ZXaw0Xmo6Qvmec+xgByRV59XsXnVWxt+oo8DpiYJdJEu5V7Yw9A5C8qnO9Lj50riCMJPUAplnfvVxpzhQ8z/zOccQSJ4S2AXHec/wAGb9qTrxXEvwAAAABJRU5ErkJggg==")}.zform-button-attention{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACJklEQVR42qVTTUiUYRB+vh93dX903bKUYMNlMWHJBC1WW8GjZVCnfpa6Fp1i6dilQwcJglgrDEKiQqhDRYWVCEsSFJ0Ksh8zKjJZ3V0WU3G/73tnpoNrFGkZzmHmMDPPPM8wA6zRtJUSuXSHISSvhLnALJ21Xc9ouTp9JQAhSblqd0VdG7viQnz0v2hlh+PBqaH272TPiF0Ylcl72/MTd1qCq2bAxNcqQgm/puswvUF46hNBIT6zqulTj9ubMw9jJGSJNXVB7Gy/sJ2TLze3qc8DW5v/yUCYb/gakzqrOXwcuoXxR1fBTgaBppMGE/f+FSAzGEuUVbdFvZv3YeFrEiKACFCc6IE/0g13bUf8w5WGxLIAmcGYj5lTnvABsMoDXOoWAbMDLo6hqvEgmPjsu0th3x8ATNzvCe1f564Ow8ndBiAoD3iWhMHKXERFTQiVWw5tUkXn1G+HNHl/R0SY39btTpu08BLO9GUwA3pZOeZzs3B7GYYhMCo7Yfj3YrS31SZLRVtO58f1xaPhAV/DcVN4DjT7HBAGIPg08h7TbyYBCCAMVRiGps+jJpZ0Kcs5DwDat7ut3UZV04MNHSmo2SdwstcXJbFARAME0A2BJjZECLqxHuX1PXjdl8DM2Mgek4n6ApHDAADT1w7T11YSpy3JLzn5uQ9oLtTtPIbCaPqcKcTp7NMTR4QYTIxfIzkEshwoywFZDshSIFuBHAIrAit6sdZvxg9QwSUHEnNo0gAAAABJRU5ErkJggg==")}.zform-button-error{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACU0lEQVR42q2T7U9SYRjG/VvgQys313pbc81WW80EBT1EICDiIdMjNlojTcdhptlWzoY0PzS11F618kumMWPlS7bUXpmWx0ohTsGK0HNAIN0V0oK51E9e2/Xt+f2ePffuJyVls+MqLxfOUWXmT1QJM6MnuWm9jvtIaphJUmV2FimEG8JuQznxhaLYn7ZGhIcciLwfR2RsGPzDLriMxXhbQLCvNFJiXXi2lOIX7ndheeYDovYHiHZaEW29hN93W7A0aoe32ohxlZh/qchcLZkzGAQx2MPd7sQy40T06gUErBbMN1YhfMWCSBONcMMZhB/dgfskidFjhzwj8gOChCAG075aM5acE/EbF200/BdNCNUZVpU7SyLccwNvJBkYlGXQCcFn6gQT7LmJaHcrAg0V+KGVrdmFChJ8Yw28lko8JdKZhIAp1Ycij3sQtVkQOG/EevEqs+GnCjDf2gyHZE8oIZgmtaHF7naE640InSvZUOArVmO+pRkD0h1JwVSRmvE31GDRSoM7rYkfXLMqCQK11XBVm2AXpSWf4CxU0IxchFB3BwJ6OfzFef/BrEIMNj8Pwc5rGJbuQn/WtuQQ32llgtc6wuMu0yF4rz0+MJ9a+hdU5oCVx2C5FHxHGyYLZSuwp1e0VbBqFybys4kx5RF+9rgawVvt+FVPw0uq8E2jhL/ODP56G6Y0uejLSuVj8Nrb+EJxmHh+9CA7nrcP36tM8Dddjvdr5Sk8y965ArPrwv8yJNsvHJSmmx3EXuZJ7m5uQLSd689JY/rEqebezC3CTf+9fwCiP9Om7nIiOAAAAABJRU5ErkJggg==")}.zform-button-question{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACZUlEQVR42r2SXUiTURjH34K6CbryIgi62FXQVezOZLM2isLSQbFljVpOvOgmRelmQUblkD7VssFGgRSrtQyKUvrYLEdI5XQfVtIWS833Zeac22xra/+e854FE7vppgN/zsPz/P7Ped7zHkEoW6mLxnXpzvqelNWwlOrQI3W+JBZTTq4RI/xtLVrrry12HkbO04vizBBQ/Az8Kolilst5roMxjF1mTpzVOzN3LEDaD/wYA+YfA5IDiN/kEh08tzQmM4xlHtk8d0Z/LmlvBvJBggaBqW7gy2WIV00IG9QIH1Qjbm8CvvUAX7s4QyzzMK8gWnRZfB8Gki+AGRsw60DG14HQ/iqaxoms/xJGddvI2EdN7MC0jbPkEU/psoJ0Wk/fGQDm3DQqQdJtKjoJctHI/ciHehE1aYAFF68xhrHkEU/WQpi1HKBLogaJR1S4z4vzD1AUXYi01NEklUD2CTV4SI3dnEnQfSCA6da9EGLNNTks+GjcNwQRmCAlB+j05wS95mJx8imvMUZmfYi11OQET4PWLnYdJ/ADkBsBUl66aS8y/lsI1ikRrFVSnpqkPXIeP0dklnk8Zq2d/YiNbxu1g5KtlUD6Tflx2t8DBRLGuQqjJKphgvYgJFsbmId5/zwFxctDqr5I+zGCYiR6PIiWYq5CfBiJgW5ET+zDqyM77jHPssdkVW2pllwXCE4j+c6NgL4Sn0zbMdmgwaRZg4+N2qzXWH13c8X6KsI3rXjKE22GG8ViBFL/FYSMauxWbNhJaWWZtpaMq1eYw0171obNuxA6qsGQQfWsZFgj/MNaVXaSQvif6zcxVDmUf47DnQAAAABJRU5ErkJggg==")}.zform-button-information,.zform-button-infoblocks{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACl0lEQVR42q2T7U9SURzH/VvgT+je3rQ2fdFcmw2StBeZNUwTtZWuwmywSjERnwAFbAiGmg+UT1CSIiRwJSPkITT15sAM2trqDWQty29H2jA35yvP9nl3v5/f+Z57TlbWUa983Qr3jCYiyVOF2VMt/mSOwpfMViyw2Qqv5ORDhntomKddFpxWhhIVvUH0OmMYZTbTGO1RCLWvQUtmE7TULjgwTKYKclsDqZbJVdj8CfRMxyAzv8eD4WUoLaswuzbQOBoBXTmRoq9P7JfkqcOc3LbF+G7Y8iYBCQndGQhhyPMRQ+4N3DYFIe4PwTS7DtnTIOgyc5wuHeZkBLnKRWm53g+r7zPqBiIQkwo3DQF8/7mdptrgQ3WPD+LHfgy8iuJC80tQRf3SjCCnzcca7TGoLSxu9QZQY/CjWu9Dn3MdJkJlN/MPnYfUCkE7vQK60MBmBCdkzNb4wifU9QXJpLeoeuQlHzPYXTsEkcaN8s45ggvXdG6YmSgoQddWRkBLnVtj3s10191JFVoPCXkQiX1D6sc2yjqcKG134ApBpHJgZJ4I+Kr/BXZWb2chf7aEKp0Xoi43rqrn8C76lQh+oUQxgxLSW9hsQ20PA7UtDPpsx14FutYmLVY6MeSKoUrDQKR0webbwO8/O+kKwQ9fUCyzEizofh5B4d1RImjfO0T6xhiHFpnj90cCMNnXUKZ0QNgyjUvyKRQ3WHCxfgJF9eNoHfGT3ztPti+P03w5Z99doISDgmMFxpRk0AfjzArEejfZ8gtcbrSiRuOA1hKCuI8BzWtIkfDBt5EqNAqogu7E+XuTUE8t4YmbJayhwxpGfp0ZFK8xQfObBIe+B/qclksJOiVUvoql+M1JiteUJBNZguQ4v4F75K/3L7zz0NlKPuwgAAAAAElFTkSuQmCC")}.zform-button-secret{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACfUlEQVR42m1Sz2sTQRh9u9nml02M2hoapaZNtaIg4q0eBMEeRPGg3jyJhyK00EvpsZBr/wqhAfHQs3fBglRa0EYTm5YYFEqKSRvTJDs7s77ZbdpYHfiYmW++7817b8bAiZHL5fqVUnNSygnGWQYYvxgrjuMszs7O/u6tN3o3S0tLN9m8nEqlRuLxOEzTBPdot9uoVqvY5iDQ4/n5+fV/ANjcz8O1TCYzZts2KpUKms2mvh2WZSGZTHp1+Xx+k7kbCwsLLb03uwBMvhwaGhoTQqBYLG41Go0010Edel0oFH5qYLIbo5Tpbp/VXTA5EY1GUSqVwKaHMzMz5R515Ww2e69cLufT6bRX+z+AQa2Zt+n19klzdU6z0zVkO/iXB+V3z92V0jh29iKe5kfXVxFwBVzpwHX8EELi1fotz9RkuIYHF1ZxdWrN8Bm4Lp4+uUs0E0Ygwvk+oIhthfUhDRKQTgPZySbzwmvZfP3+WIK+SRc6u29ghQZgGP0s7AMiCaYVcLAHuf8NdusHlHOAyMg0XLvTA0CKUPomG/WNj9R5Colrt1F5u8j+8xi+M4n61w0C1BBLnyFhCVfYvQDCk+GSamL8CszgAN1RkB2JT7sRDMNGIjOCdjPE2gOPVRfA+wcu3dWoWmvt8zpZfOCJA9VW6LRI1SWzwhfUi999uUp5PccM9EajUkLichqB6DkC2Bh9NoVRwYb9HZzOpBDc7/MZUO4JANtDVY72YIMAMSBMI60g8xqgjlatCtFsIDYcp93Kl90LoCWELr5A5FIARjDkP6HJl1CUZrcQazWosEOi0vdLG38EwCfZWp7zvfA+jjgM52jmD/M/lpT+WgNx/AHLKabZiPgg0gAAAABJRU5ErkJggg==")}.zform-button-blockcode,.zform-button-monospace{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABD0lEQVR42mNgGAVYgWHPQ36yNRt0PuD3nPf0WMSq5x9gYnGbX3/wXvz8GEgOr2b9tntCHrOfHiva9vq//9yn92DiIate3ivb/eY/SE679o4QVs16Lfciole//F649dV/v1lP76kX3JBGkpMOWPTsHkguYunz70C5CBTNug132cKXP/9YueMNUMGz36o514zRLdAsv2UMkivd9PJ/4MzHHxWSrrChKFAvvhkROv/p96xVL/579D24Jx93SRpJTtp76qN7ILmgmY++A+UisHpDMeWKkG3DnWOpi5/+d225Cw8Dr0mP7mWseP4fJCcXfVEIb0DKRFzgtyy/ecy78x48FvynPPxgU3vnGEhuNJFjAgDXGIoQBpiXVgAAAABJRU5ErkJggg==")}.zform-button-titles{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAApklEQVQoz73QsQ2EMAwFUEsUNDRUVJeKLh1VdE2qSIiGAikZxBNkAja4Cf4iLOI1uCjkdKkokSvrP9lO6KT7okeAjx4eWzhpCQ4WJp6k53GvJnjZcLUplhS/RyipwCZrAQZTDhQPNVhlORxbNjwdOgcD9zVYxJUJGmMOeu5q4MQW8NvdcVsDK6YAhWt3y80f2JhOg07PVGFAjy62ofkQaKfXU199X1/TU/Qkt2QxeAAAAABJRU5ErkJggg==")}.zform-button-title1{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAApklEQVQoz73QsQ2EMAwFUEsUNDRUVJeKLh1VdE2qSIiGAikZxBNkAja4Cf4iLOI1uCjkdKkokSvrP9lO6KT7okeAjx4eWzhpCQ4WJp6k53GvJnjZcLUplhS/RyipwCZrAQZTDhQPNVhlORxbNjwdOgcD9zVYxJUJGmMOeu5q4MQW8NvdcVsDK6YAhWt3y80f2JhOg07PVGFAjy62ofkQaKfXU199X1/TU/Qkt2QxeAAAAABJRU5ErkJggg==")}.zform-button-title2{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAAwklEQVQoz73QsQ2DMBAF0JMoaBARiiIqXER07lxZNK4sIRoKJHuCTMAETJANmOBvkAnYIBPcGsQCh5ISXfmfvs9HK50PXQLc5OAw+JU6b2GgJyXlXEO0R4PjAbs3UKwqudST+Dy4qCIYuI9A48nS1yEomxtnTQQ9d4sdzahHtUjeaYHsm+YRdGxjg0S9geKdIZXHDpZNBGE13uLXSklO/x0M6wgE7lw0oRwJaKF2A2bSUJDhm8KXCG/PWwyarzv1+fwAYArrjnYCa/AAAAAASUVORK5CYII=")}.zform-button-title3{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAAxElEQVQoz73QsanDMBSF4cPzAxdx4KkSBKxKnTpXIo0qg3GTwmBPkAk0gSbIBpngbuAJsshZ46Z4wmXK8LcfpzhQfA5fAWtZZZVlU8zbKEliGUJ4enHTsbBykX+fJFIRdl/cbnmAhbcKogxU+F5h72Y/wI3za8wpxzy8AhWut3Jmlw8wc6wLQTwVCtN3e8tmqmBkqsDLhTaYu6Ltf4lcQWKswMkfTT6xvTbhh7gqoEglyiBhU7jNipHu0ZbmiQem7139uTdX8exNUqtqywAAAABJRU5ErkJggg==")}.zform-button-title4{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAAt0lEQVQoz73QsQ2DMBAFUEsUNAiJiipXRNfRUSE3rty4oYhkBmGCmyAbZILbgAnYgAluDXIBJ6SiRNdY+k/fPpvVnI+5BESKrDOsph8Ce3b0CZob0q8hSuTdayxbXOIE/AceCTjuNoAvmOsDPKSfw+hHN3ZzqwCfYGuuDtBLSA0t3wUtLBovxZJTAkF8Ao0CKGtb2WLKp6xJwItLABlkP+Wcfa/wpE/jVtfEAVjLt/UyMnTdV5/PG1Cu8REDzPeUAAAAAElFTkSuQmCC")}.zform-button-table{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAByElEQVR42q2TzytEURTHv/Pe85sFGVPIRpHflKYRC8rCilCKspKlvZVY8H/IQhQldiyEyUSKUhKxUH7MTH7LNO/+cO6b8d4bWRCn3jvv3nfO53zvufcCfzSPes1tPUxIiVEuRakQAlwATHmuviUYeefh4EzSvNifGa7wGwogpBzr9+cV/qby5MJ5vfIWgGhW8srFLFVmVIXBJG9y0/E09/lvvGUapskzXABpUYeqR35U/S1GUMbhANSiyeZ3wj8CdDcXIO4GsCRA2WBbERaDdxho9dlzS6E79AeccfQ5lqrAJAA1EoZOwbth6LqG5VAYHg3Qkkkre6SOYtIoo6okG3HzyxJUFwzdg16/l4Ij6PEXpShwj8+vn8GYSFUgaWxQubWDCClIeCtAcyAGnRqVVl2cSQXdAKJJJY8Su5q82DiKorPBORbrhxEEKvORl2WF4/TqCTkZhquJIkHTNY+VrOzT0xSdBWD75MEGlnvT7Z1LABhL9IDkdtQVYvM4ivZaR8FyKIK+gNceKwV6cmlOD2gJtWW5uLl/R7kvC5e3r/ZdqClJt5LcJoQUrl2Qwan5s8Y4Fzlqf9XDqS+mdXnYt4fp8SW2iv+wD9RSCSl9jwFVAAAAAElFTkSuQmCC")}.zform-button-math{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAAs0lEQVQoz2P4z4AfMhCpoNGh8X/d/+oz5UeLz+T/yPqfchTDhLrz+/6XnSnqye3JmJzcEzsfQ0GlQff/Cf9zHCC8sP1Y3FBQP/9/2v0EATyOTDk/+39kAR4FsQkR74Nm4VQQIxB2P/A2nnAIXe9/xrMHwjb5j6EgOMHvvMdpEMsC6Ez992gKggx83ru/cay3qTfvN7qv918L3ZveCa77HfZb7Tfdb7hfd7/mfrV+UuOCAgUAOHoB5MLjQikAAAAASUVORK5CYII=")}.zform-button-footnote{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABlUlEQVR42qWTx05CURCGeQCfwBIX+gY+i23v3qCIEevCaMB7uY0LF9SoxF5iiSshlmCjG2kixRghajQRrjuJ+T3XFRukOMkkM2dyvjP/nIxKVWSL9uWC6j82v7AE+/IqZucXGmoCSLY55PIy1je3YbHOdVUNEMwSvgoFyJ+f2NrZhVmyrVUF4AQzZFnGbShMIDIczmMIoiVTMYDhRby9vePiyg1fIIjnl1dcu71geRNEi7X8XBhOQCabhc8f+PVA8Abph0eEozEFQLqR/p4LzXBIpdMIEQmKjFA4gmgsRs4ecBdPYNG+At5k2S0JoIwcuRDHfSIJt8eDRDIFhhNhoBjQjECkiAoAJQEGmkU4EsPpmQtGRc5T9neQfRqtRMptRV4CQF5ye/2gWeF7QDu04Tq/xBOBUEY2X9EvzNAMTGYr2js6e0jaxJNvzX3kcORwYlpPdZcFGCgWupHxPRLWKXmvut/q8fiQz+UxOaVHJU0o+pqL8npelLB/cAjd6MRJTfuh1gyu6IbHXCRsqXVJG4m3lir+AKcgCFAzJG3uAAAAAElFTkSuQmCC")}div.zform-popup{top:18px;z-index:100;background:transparent;background-color:#fff;background-image:linear-gradient(to center top, #ebebe5 8%,#f9f9f6 75%);border:1px solid #CCCCCC;border-radius:3px;padding:2px}.zform-code-col{display:inline-block;vertical-align:top;margin:2px;min-width:100px}.zform-code-col>span{display:block;color:#2677C9;cursor:pointer}.zform-code-col>span[data-zform-selected='true']{color:blue;font-weight:bold}.zform-code-col>span:hover,.zform-code-col>span:focus{color:#C87B02}#zform-modal-overlay{position:fixed;top:0;left:0;width:100%;height:100%;background:#000;opacity:0.5;filter:alpha(opacity=50);display:none;z-index:99}#zform-modal-wrapper{position:fixed;top:0;left:0;width:100%;height:100%;display:none;margin-top:10%;text-align:center;z-index:100}#zform-modal-wrapper>div{display:inline-block;background:#f4f6f6;border:1px solid #555;border-radius:2px;box-shadow:0 2px 26px rgba(0,0,0,0.3),0 0 0 1px rgba(0,0,0,0.1)}#zform-modal-wrapper>div>header{background:#084561;color:#fff;font-weight:bold;line-height:27px;text-align:left;padding-left:6px;padding-right:6px;white-space:nowrap}#zform-modal-wrapper>div>footer{background:#e7ebec;text-align:right;padding-right:6px;line-height:32px;border-top:2px solid #d1d4d5}#zform-modal-wrapper>div>footer>a{cursor:pointer}#zform-modal-wrapper section{display:block;margin:8px;min-width:200px;min-height:50px}.zform-modal label{display:inline-block;width:70px;text-align:left}@media only screen and (max-width: 760px){html.dropdown-active{overflow:hidden}html.dropdown-active .page-container{width:100%}html.dropdown-active .main-container{display:none}.header-menu-dropdown{display:none !important}.dropdown{width:100%;top:180px;bottom:0;border-bottom:none}.dropdown .dropdown-list{overflow:auto;position:absolute;top:36px;bottom:50px}.dropdown .dropdown-link-all{position:absolute;left:0;right:0;bottom:0;height:50px;line-height:50px}form.forum-message .message{padding-top:0 !important}.message-actions a{width:0px}.message-bottom .message-karma a{border-bottom-width:1px !important}.message-submit{display:block !important;width:calc(100% - 16px);margin:0 8px !important}.message-submit button{float:left;display:block;width:49.5%}.message-submit button[type=submit]{float:right}}@media only screen and (max-width: 959px){body{background:#222}body:not(.swipping) .page-container,body:not(.swipping) .mobile-menu{-moz-transition-property:-moz-transform;-o-transition-property:-o-transform;-webkit-transition-property:-webkit-transform;transition-property:transform;-moz-transition-duration:0.3s;-o-transition-duration:0.3s;-webkit-transition-duration:0.3s;transition-duration:0.3s;-moz-transition-timing-function:ease;-o-transition-timing-function:ease;-webkit-transition-timing-function:ease;transition-timing-function:ease}body.swipping *{-moz-user-select:-moz-none;-ms-user-select:none;-webkit-user-select:none;user-select:none;-webkit-pointer-events:none;-moz-pointer-events:none;pointer-events:none}.js .page-container{position:absolute;z-index:10;-moz-transform:translate3d(0, 0, 0);-ms-transform:translate3d(0, 0, 0);-o-transform:translate3d(0, 0, 0);-webkit-transform:translate3d(0, 0, 0);transform:translate3d(0, 0, 0)}.js .mobile-menu{display:block;position:absolute;position:fixed;overflow-x:hidden;overflow-y:auto;z-index:1;-moz-transform:translate3d(-20%, 0, 0);-ms-transform:translate3d(-20%, 0, 0);-o-transform:translate3d(-20%, 0, 0);-webkit-transform:translate3d(-20%, 0, 0);transform:translate3d(-20%, 0, 0);width:90%;height:100%;padding-bottom:20px;background:#222;-moz-user-select:-moz-none;-ms-user-select:none;-webkit-user-select:none;user-select:none}.js .mobile-menu .search{height:50px;position:relative;top:0;left:0;width:100%}.js .mobile-menu .search input{color:#EEE;background-color:#333;width:76%;height:30px;padding:10px 5%;font-size:16px;font-size:1.6rem}.js .mobile-menu .search input:hover,.js .mobile-menu .search input:focus{padding-bottom:7px;border-bottom:3px solid #084561;background-color:#333}.js .mobile-menu .search button{display:none}.js .mobile-menu .search .search-more{background-color:#3F3F3F;width:14%;height:50px;line-height:50px;color:#CCC}.js .mobile-menu .mobile-menu-bloc,.js .mobile-menu .mobile-menu-link{width:90%;line-height:40px;text-indent:0}.js .mobile-menu .mobile-menu-bloc{margin:0 5% 15px}.js .mobile-menu .mobile-menu-bloc:nth-child(2){margin-top:15px}.js .mobile-menu .mobile-menu-bloc ul,.js .mobile-menu .mobile-menu-bloc li{margin:0;padding:0}.js .mobile-menu .mobile-menu-bloc .mobile-menu-link{margin:0;width:100%}.js .mobile-menu .mobile-menu-bloc:not(.mobile-show-ico) .ico-after:after{display:none}.js .mobile-menu .mobile-menu-bloc[data-title]:before{display:block;content:attr(data-title);height:30px;font-size:14px;font-size:1.4rem;text-transform:uppercase;padding-bottom:3px;border-bottom:2px solid #3F3F3F;font-weight:bold;color:#666}.js .mobile-menu .mobile-menu-bloc.mobile-show-ico .ico-after{padding-left:30px;width:calc(100% - 30px)}.js .mobile-menu .mobile-menu-bloc.mobile-show-ico .ico-after:after{top:12px;left:2px}.js .mobile-menu .mobile-menu-link{display:block;height:40px;text-decoration:none;color:#CCC;font-size:16px;font-size:1.6rem;text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.js .mobile-menu .mobile-menu-link.mobile-menu-sublink{width:90%;margin:0 0 0 10%}.js .mobile-menu .mobile-menu-link.mobile-menu-bloc[data-title]{height:80px}.js .mobile-menu .mobile-menu-link.mobile-menu-bloc:not([data-title]){margin-bottom:0}.js .mobile-menu .mobile-menu-link:not(:last-child):not(.mobile-menu-bloc){border-bottom:1px solid #2C2C2C}.js .mobile-menu .mobile-menu-link[data-prefix]:before{content:"[" attr(data-prefix) "] "}.js .mobile-menu .mobile-menu-link.unread{font-weight:bold;color:#EEE}.js .mobile-menu .mobile-menu-link img{float:left;margin:5px 5px 5px 0;width:30px;height:30px}.js .mobile-menu .mobile-menu-link .label{padding:0 0 0 50px}.js .mobile-menu .mobile-menu-link img+.label{padding:0 0 0 10px}.js.show-mobile-menu{width:100%}.js.show-mobile-menu body{position:fixed}.js.show-mobile-menu .page-container{height:100%;-moz-transform:translate3d(90%, 0, 0);-ms-transform:translate3d(90%, 0, 0);-o-transform:translate3d(90%, 0, 0);-webkit-transform:translate3d(90%, 0, 0);transform:translate3d(90%, 0, 0);overflow:hidden;-moz-box-shadow:0 0 7px rgba(0,0,0,0.25);-webkit-box-shadow:0 0 7px rgba(0,0,0,0.25);box-shadow:0 0 7px rgba(0,0,0,0.25)}.js.show-mobile-menu .mobile-menu{-moz-transform:translate3d(0, 0, 0);-ms-transform:translate3d(0, 0, 0);-o-transform:translate3d(0, 0, 0);-webkit-transform:translate3d(0, 0, 0);transform:translate3d(0, 0, 0)}.js.enable-mobile-menu .mobile-menu-hide{display:none}.js.enable-mobile-menu .page-container .mobile-menu-bloc,.js.enable-mobile-menu .page-container .mobile-menu-link,.js.enable-mobile-menu .page-container .search{display:none}.js.enable-mobile-menu .page-container .mobile-menu-btn+.header-logo{margin-left:0}.js.enable-mobile-menu .page-container .mobile-menu-btn{display:block;float:left;height:50px;width:50px}.js.enable-mobile-menu .page-container .mobile-menu-btn:after{display:block;content:" ";position:absolute;top:15px;left:13px;height:22px;width:22px;background-image:url('../images/[email protected]');background-repeat:no-repeat;background-position:0 -3280px}.page-container .header-logo{width:40px;height:50px;margin-left:50px;float:left}.page-container .header-logo-link{background-image:url("../images/[email protected]") !important;background-size:100%;width:100%;height:100%}.page-container .header-logo-link:after{display:block;content:attr(data-title);position:absolute;top:0;left:95px;right:155px;line-height:50px;text-indent:0;text-align:left;font-weight:normal;font-size:17px;font-size:1.7rem;text-overflow:ellipsis;white-space:nowrap;overflow:hidden;max-width:200px}.page-container .header-container .header-menu{height:30px}.page-container .header-container .header-menu .header-menu-list{padding-top:50px}.page-container .header-container .header-menu .header-menu-list>li>a{line-height:50px}.page-container .logbox{float:right;width:150px;background:none}.page-container .logbox .notifs-links{width:100%}.page-container .logbox .notifs-links .ico-link{height:50px;width:50px}.page-container .logbox .dropdown{top:50px}.page-container .logbox .dropdown.my-account-dropdown .dropdown-list{bottom:0}.page-container .logbox .dropdown.my-account-dropdown .dropdown-list li{height:45px;line-height:45px}.page-container .logbox.unlogged{font-size:13px;font-size:1.3rem}.page-container .logbox.unlogged a{background-color:rgba(255,255,255,0.1);line-height:30px;height:30px;margin:10px 0;width:74px;margin-right:1px}html:not(.enable-mobile-menu) .header-container{border-bottom:1px solid #CCC}html:not(.enable-mobile-menu) .page-container .header-logo{margin-left:10px}html:not(.enable-mobile-menu) .page-container .header-logo-link:after{left:55px;right:205px}html:not(.enable-mobile-menu) .logbox .notifs-links .ico-link,html:not(.enable-mobile-menu) .logbox .my-account{position:absolute;top:0;right:0;height:50px;width:50px}html:not(.enable-mobile-menu) .logbox .notifs-links .ico-link .avatar,html:not(.enable-mobile-menu) .logbox .my-account .avatar{height:50px;width:50px}html:not(.enable-mobile-menu) .logbox .notifs-links :nth-child(1) .ico-link{right:150px}html:not(.enable-mobile-menu) .logbox .notifs-links :nth-child(2) .ico-link{right:100px}html:not(.enable-mobile-menu) .logbox .notifs-links :nth-child(3) .ico-link,html:not(.enable-mobile-menu) .logbox .notifs-links .ico-link:nth-child(3){right:50px}html:not(.enable-mobile-menu) .logbox.unlogged{position:absolute;top:0;right:0}.main{width:100%}.main .content-container .content-col:not(:first-child),.main .sidebar{margin-top:50px}.home .main .content-container article{padding:20px 4%}.main .sidebar{width:102.5%}.main .sidebar h3,.main .sidebar h4,.main .sidebar ul li{padding-left:5.5%}.main .sidebar h3 a,.main .sidebar h4 a,.main .sidebar ul li a{white-space:normal}.content-col-2:not(:first-child),.content-col-3:not(:first-child){margin-top:50px}.header-menu-dropdown{display:none !important}.topic-list .topic{background:none !important}.main .content-container .topic-message{padding:20px 0}.main .content-container .topic-message .user{position:absolute;top:7px;z-index:10;width:100%}.main .content-container .topic-message .user .avatar-link{float:left;display:none}.main .content-container .topic-message .user .badge{float:left;height:20px;line-height:20px;font-size:12px;width:50px;margin-left:10px}.main .content-container .topic-message .user .user-metadata{float:right;width:140px;margin-right:10px}.main .content-container .topic-message .user .user-metadata a{float:left;height:20px;line-height:20px;border-bottom:none;width:68px}.main .content-container .topic-message .message{border-right:0;border-left:0;padding-top:65px}.main .content-container .topic-message .message .message-metadata{position:absolute;top:0;left:0;right:10px;z-index:15;height:30px;line-height:30px}.main .content-container .topic-message .message .message-metadata .date{float:right}.main .content-container .topic-message .message .message-actions{margin:35px 10px 0 0}.main .content-container .topic-message .message .message-actions a{text-indent:-9999px}.main .content-container .topic-message .message .message-actions a:after{left:12px}.main .content-container .topic-message .message .message-bottom{min-height:0}.main .content-container .topic-message .message .message-bottom .signature{display:none}.main .content-container .topic-message .message .message-bottom .message-karma{position:absolute;top:35px;left:10px}.main .content-container .topic-message .message .message-bottom .message-karma a{margin-right:1px;margin-left:0}.main .content-container .topic-message .message .message-bottom .message-karma .tick{text-indent:-9999px;margin-right:1px}.main .content-container .topic-message .message .message-bottom .message-karma .tick:after{left:12px}.main .content-container .topic-message .message .message-bottom .message-karma .upvote,.main .content-container .topic-message .message .message-bottom .message-karma .downvote{padding:0 7px;text-align:center;min-width:30px}.main .content-container .topic-message .message .message-bottom .message-karma .upvote:after,.main .content-container .topic-message .message .message-bottom .message-karma .downvote:after{display:none}.main .content-container .article-content p,.main .content-container .article-content ul:not(.pagination){font-size:15px;font-size:1.5rem;font-size:1.8ex}.main .content-container .content-wrapper h1,.main .content-container .content-wrapper h2,.main .content-container .content-wrapper h3,.main .content-container .content-wrapper h4,.main .content-container .content-wrapper h5,.main .content-container .content-wrapper h6,.main .content-container .content-wrapper .subtitle,.main .content-container .content-wrapper .authors,.main .content-container .content-wrapper p{padding-left:15px;padding-right:15px}.page-footer{text-align:center;height:auto}.page-footer p{border-bottom:1px solid #5b3a03}.page-footer p,.page-footer ul{display:block;float:none}.page-footer ul{line-height:30px}.page-footer ul li{margin:0 5px}}@media only screen and (min-width: 760px){.dropdown{-moz-box-shadow:0 5px 7px rgba(0,0,0,0.3);-webkit-box-shadow:0 5px 7px rgba(0,0,0,0.3);box-shadow:0 5px 7px rgba(0,0,0,0.3)}.header-right .dropdown{width:350px;left:auto;padding:0}.header-right .dropdown .dropdown-list{max-height:270px;overflow-x:hidden;overflow-y:auto}.header-right .dropdown .dropdown-list::-webkit-scrollbar{width:10px;height:10px}.header-right .dropdown .dropdown-list::-webkit-scrollbar-track{background-color:#06354a}.header-right .dropdown .dropdown-list::-webkit-scrollbar-thumb{background-color:#396a81;border:1px solid #06354a;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}.header-right .dropdown .dropdown-list::-webkit-scrollbar-thumb:hover{background-color:#5196b6}.header-right .dropdown .dropdown-list::-webkit-scrollbar-thumb:active{background-color:#71b4d3}.header-right .dropdown.my-account-dropdown{width:230px}}@media only screen and (min-width: 960px){html,body,.page-container{height:100%}.main-container{min-height:calc(100% - 146px)}.screen{display:inline}.wrapper{width:95%;margin:0 2.5%}.header-container{z-index:1;position:relative;-moz-box-shadow:0 0 4px rgba(0,0,0,0.3);-webkit-box-shadow:0 0 4px rgba(0,0,0,0.3);box-shadow:0 0 4px rgba(0,0,0,0.3)}.header-container header{background-image:-moz-linear-gradient(left, rgba(0,0,0,0) 20%,rgba(255,255,255,0.07) 40%,rgba(255,255,255,0.07) 60%,rgba(0,0,0,0) 80%);background-image:-o-linear-gradient(left, rgba(0,0,0,0) 20%,rgba(255,255,255,0.07) 40%,rgba(255,255,255,0.07) 60%,rgba(0,0,0,0) 80%);background-image:-webkit-linear-gradient(left, rgba(0,0,0,0) 20%,rgba(255,255,255,0.07) 40%,rgba(255,255,255,0.07) 60%,rgba(0,0,0,0) 80%);background-image:linear-gradient(to right, rgba(0,0,0,0) 20%,rgba(255,255,255,0.07) 40%,rgba(255,255,255,0.07) 60%,rgba(0,0,0,0) 80%)}.header-logo{float:left;text-align:left;width:240px}.header-container .header-menu{float:left;width:34%;margin-left:.5%}.header-container .header-menu .header-menu-list>li>a{max-width:150px;font-size:1.6rem;font-size:16px}.dropdown{top:60px}.has-dropdown{position:relative;text-indent:-7px}.has-dropdown:after{content:" ";display:block;position:absolute;top:47%;left:83%;height:0;width:0;border:6px solid transparent;border-top:6px solid rgba(255,255,255,0.7)}.has-dropdown:hover:after,.has-dropdown:focus:after,.has-dropdown.active:after{border-top:6px solid #FFF}.logbox .dropdown.my-account-dropdown ul li{height:30px;line-height:30px}.lt-ie9 .dropdown{top:90px}.header-right{float:right;width:230px}.header-right .dropdown{right:2.5%}.breadcrumb{position:relative;display:block;float:left;width:calc(100% - 230px);height:30px}.breadcrumb:after{content:" ";display:block;position:absolute;top:0;right:0;width:50px;height:100%;background-image:-moz-linear-gradient(left, rgba(231,235,236,0),rgba(231,235,236,0.75));background-image:-o-linear-gradient(left, rgba(231,235,236,0),rgba(231,235,236,0.75));background-image:-webkit-linear-gradient(left, rgba(231,235,236,0),rgba(231,235,236,0.75));background-image:linear-gradient(to right, rgba(231,235,236,0),rgba(231,235,236,0.75))}.breadcrumb ul{margin:0;padding:0;list-style:none;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.breadcrumb ul li{position:relative;display:inline-block;padding-right:30px;line-height:30px}.breadcrumb ul li a{text-decoration:none;color:#084561}.breadcrumb ul li a:hover,.breadcrumb ul li a:focus{text-decoration:underline;outline:none}.breadcrumb ul li:not(:last-child):after{display:block;position:absolute;top:0;right:7px;content:" ";height:30px;width:15px;background-image:url('../images/[email protected]');background-repeat:no-repeat;background-position:0 -320px;filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=20);opacity:0.2}.search:before{content:" ";display:block;position:absolute;left:-20px;height:30px;width:20px;background:-moz-linear-gradient(right, rgba(0,0,0,0.03),rgba(0,0,0,0));background:-o-linear-gradient(right, rgba(0,0,0,0.03),rgba(0,0,0,0));background:-webkit-linear-gradient(right, rgba(0,0,0,0.03),rgba(0,0,0,0));background:linear-gradient(to left, rgba(0,0,0,0.03),rgba(0,0,0,0))}.search form input{padding:8px 10px;height:14px;width:150px}.search form button{height:30px;line-height:30px;width:30px}.search form button:after{top:7px}.search .search-more{width:30px;height:30px;line-height:30px}body.no-sidebar .main .content-container{width:100%}body.no-sidebar .main .sidebar{display:none}.main{display:-webkit-box;display:-ms-flexbox;display:-webkit-flex;display:-moz-box;display:flex;-webkit-box-orient:horizontal;-webkit-box-direction:reverse;-moz-box-orient:horizontal;-moz-box-direction:reverse;-ms-flex-direction:row-reverse;-webkit-flex-direction:row-reverse;flex-direction:row-reverse;height:100%;margin-left:0;padding-left:2.5%}.main .content-container{width:80%;margin-right:0}.main .content-container h1,.main .content-container h2{margin-left:1px}.main .content-container .content-col-2{width:49.5%;margin:0 0 0 1%}.main .content-container .content-col-3{width:32%;margin:0 0 0 2%}.main .content-container .content-col-2,.main .content-container .content-col-3{float:left}.main .content-container .content-col-2:first-child,.main .content-container .content-col-3:first-child{margin:0}.main .sidebar{width:22.5%;border-bottom:none}.main .sidebar h3,.main .sidebar h4,.main .sidebar ul li{padding-left:11.5%}.main .sidebar h3:first-child{margin-top:31px}.main .sidebar h4[data-num]{padding-left:calc(11% + 25px)}.main .sidebar h4[data-num]:before{left:11%}.main .sidebar.sommaire ul li.current ul{margin-left:calc(-11% - 10px);width:calc(111% + 10px);background:-moz-linear-gradient(top, rgba(0,0,0,0.07),rgba(0,0,0,0) 3px);background:-o-linear-gradient(top, rgba(0,0,0,0.07),rgba(0,0,0,0) 3px);background:-webkit-linear-gradient(top, rgba(0,0,0,0.07),rgba(0,0,0,0) 3px);background:linear-gradient(to bottom, rgba(0,0,0,0.07),rgba(0,0,0,0) 3px)}.main .sidebar.sommaire ul li.current ul a{padding-left:calc(11% + 30px)}.content-cols .main .content-container{width:79%;margin-left:1.5%}.home .main .sidebar{margin-top:30px;border-top:1px solid #FFF}.home .main .sidebar h3:first-child{margin-top:0}.full-content-wrapper .tutorial-list article{width:46%;float:left}.topic-list .topic .topic-description{background-size:0 0}.topic-list .topic .topic-description[style]:before{display:block;position:absolute;content:" ";right:0;background-image:inherit;background-repeat:no-repeat;background-position:top right;background-size:80px 80px;height:100%;width:80px;margin-top:-5px;-webkit-mask-box-image:-webkit-linear-gradient(right, #000, transparent);filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=20);opacity:0.2}.topic-message{margin:0 0 25px}.topic-message .user:after,.topic-message .message:after{content:" ";display:block;position:absolute;top:10px;height:0;width:0;border:20px solid transparent;border-left:0}.topic-message .user{position:absolute;padding-top:60px;top:0;left:0}.topic-message .user:after{left:60px;border-right-color:#D2D4D6}.topic-message .message{margin-left:80px}.topic-message .message:after{top:9px;left:-19px;border-right-color:#FDFDFD}.pagination{border:1px solid #d2d5d6}.content-wrapper{margin:0 0 0 1.5%}.full-content-wrapper{margin:0 0 0 2%}.enable-mobile-menu #modals .modal{-moz-box-shadow:0 2px 7px rgba(0,0,0,0.7);-webkit-box-shadow:0 2px 7px rgba(0,0,0,0.7);box-shadow:0 2px 7px rgba(0,0,0,0.7)}.enable-mobile-menu #modals .modal .modal-title{line-height:50px}.enable-mobile-menu #modals .modal [type=submit]:hover,.enable-mobile-menu #modals .modal [type=submit]:focus,.enable-mobile-menu #modals .modal .btn:hover,.enable-mobile-menu #modals .modal .btn:focus{color:#EEE !important;background:#084561 !important}}@media only screen and (min-width: 1140px){.wide{display:inline}table .wide{display:table-cell}.header-container .header-menu{width:40%;margin-left:5%}.full-content-wrapper .tutorial-list article{width:29.3%}}.header-logo-link{background-size:100%;background-image:url("../images/[email protected]")}.ico,.ico-after:after,.breadcrumb ul li:not(:last-child):after{background-size:40px 3000px !important;background-image:url('../images/[email protected]') !important}.js.enable-mobile-menu .page-container .mobile-menu-btn:after{background-position:0 -1640px}.logbox .notifs-links .ico-link .notif-text.ico-messages{background-position:0 -1680px}.logbox .notifs-links .ico-link .notif-text.ico-notifs{background-position:0 -1960px}.logbox .notifs-links .ico-link .notif-text.ico-alerts{background-position:0 -120px}.logbox .notifs-links .ico-link .notif-text.ico-gear{background-position:0 -1200px}.breadcrumb ul li:not(:last-child):after{background-position:0 -160px}.search form button:after{background-position:0 -2320px}.main .content-container h2.ico-articles:after{background-position:0 -440px}.main .content-container h2.ico-tutorials:after{background-position:0 -2800px}.main .content-container .article-content .information.ico-after:after,.main .content-container .message-content .information.ico-after:after{background-position:0 -1480px}.main .content-container .article-content .question.ico-after:after,.main .content-container .message-content .question.ico-after:after{background-position:0 -2120px}.main .content-container .article-content .error.ico-after:after,.main .content-container .message-content .error.ico-after:after{background-position:0 -1160px}.main .content-container .article-content .warning.ico-after:after,.main .content-container .message-content .warning.ico-after:after{background-position:0 -2960px}.ico-after.online:after,.ico-after.view:after{background-position:0 -2920px}.ico-after.online.blue:after,.ico-after.view.blue:after{background-position:0 -2840px}.ico-after.online.light:after,.ico-after.view.light:after{background-position:0 -2880px}.ico-after.edit:after{background-position:0 -1120px}.ico-after.edit.blue:after{background-position:0 -1040px}.ico-after.edit.light:after{background-position:0 -1080px}.ico-after.alert:after{background-position:0 -80px}.ico-after.alert.blue:after{background-position:0 0}.ico-after.alert.light:after{background-position:0 -40px}.ico-after.cite:after{background-position:0 -680px}.ico-after.cite.blue:after{background-position:0 -600px}.ico-after.cite.light:after{background-position:0 -640px}.ico-after.tick:after{background-position:0 -2760px}.ico-after.tick.green:after{background-position:0 -2680px}.ico-after.tick.light:after{background-position:0 -2720px}.ico-after.upvote:after{background-position:0 -2640px}.ico-after.upvote.voted:after{background-position:0 -2600px}.ico-after.downvote:after{background-position:0 -2560px}.ico-after.downvote.voted:after{background-position:0 -2520px}.ico-after.lock:after{background-position:0 -1600px}.ico-after.lock.blue:after{background-position:0 -1520px}.ico-after.lock.light:after{background-position:0 -1560px}.ico-after.more:after{background-position:0 -1800px}.ico-after.more.blue:after{background-position:0 -1720px}.ico-after.more.light:after{background-position:0 -1760px}.ico-after.cross:after{background-position:0 -880px}.ico-after.cross.blue:after{background-position:0 -720px}.ico-after.cross.red:after{background-position:0 -800px}.ico-after.cross.light:after{background-position:0 -760px}.ico-after.cross.white:after{background-position:0 -840px}.ico-after.pin:after{background-position:0 -2080px}.ico-after.pin.blue:after{background-position:0 -2000px}.ico-after.pin.light:after{background-position:0 -2040px}.ico-after.beta:after{background-position:0 -560px}.ico-after.beta.blue:after{background-position:0 -480px}.ico-after.beta.light:after{background-position:0 -520px}.ico-after.offline:after,.ico-after.arrow-right:after{background-position:0 -400px}.ico-after.offline.blue:after,.ico-after.arrow-right.blue:after{background-position:0 -320px}.ico-after.offline.light:after,.ico-after.arrow-right.light:after{background-position:0 -360px}.ico-after.arrow-left:after{background-position:0 -280px}.ico-after.arrow-left.blue:after{background-position:0 -200px}.ico-after.arrow-left.light:after{background-position:0 -240px}.ico-after.move:after{background-position:0 -1920px}.ico-after.move.blue:after{background-position:0 -1840px}.ico-after.move.light:after{background-position:0 -1880px}.ico-after.star:after{background-position:0 -2480px}.ico-after.star.yellow:after{background-position:0 -2440px}.ico-after.star.blue:after{background-position:0 -2360px}.ico-after.star.light:after{background-position:0 -2400px}.ico-after.download:after{background-position:0 -1000px}.ico-after.download.blue:after{background-position:0 -920px}.ico-after.download.light:after{background-position:0 -960px}.ico-after.import:after{background-position:0 -1440px}.ico-after.import.blue:after{background-position:0 -1360px}.ico-after.import.light:after{background-position:0 -1400px}.ico-after.history:after{background-position:0 -1320px}.ico-after.history.blue:after{background-position:0 -1240px}.ico-after.history.light:after{background-position:0 -1280px}.ico-after.rss:after{background-position:0 -2280px}.ico-after.rss.blue:after{background-position:0 -2160px}.ico-after.rss.orange:after{background-position:0 -2240px}.ico-after.rss.light:after{background-position:0 -2200px}.codehilite .hll{background-color:#ffc}.codehilite{background:#f8f8f8}.codehilite .c{color:#408080;font-style:italic}.codehilite .err{border:1px solid red}.codehilite .k{color:#008000;font-weight:bold}.codehilite .o{color:#666}.codehilite .cm{color:#408080;font-style:italic}.codehilite .cp{color:#bc7a00}.codehilite .c1{color:#408080;font-style:italic}.codehilite .cs{color:#408080;font-style:italic}.codehilite .gd{color:#a00000}.codehilite .ge{font-style:italic}.codehilite .gr{color:red}.codehilite .gh{color:#000080;font-weight:bold}.codehilite .gi{color:#00a000}.codehilite .go{color:gray}.codehilite .gp{color:#000080;font-weight:bold}.codehilite .gs{font-weight:bold}.codehilite .gu{color:#800080;font-weight:bold}.codehilite .gt{color:#0040d0}.codehilite .kc{color:#008000;font-weight:bold}.codehilite .kd{color:#008000;font-weight:bold}.codehilite .kn{color:#008000;font-weight:bold}.codehilite .kp{color:green}.codehilite .kr{color:#008000;font-weight:bold}.codehilite .kt{color:#b00040}.codehilite .m{color:#666}.codehilite .s{color:#ba2121}.codehilite .na{color:#7d9029}.codehilite .nb{color:green}.codehilite .nc{color:#0000FF;font-weight:bold}.codehilite .no{color:#800}.codehilite .nd{color:#a2f}.codehilite .ni{color:#999999;font-weight:bold}.codehilite .ne{color:#D2413A;font-weight:bold}.codehilite .nf{color:blue}.codehilite .nl{color:#a0a000}.codehilite .nn{color:#0000FF;font-weight:bold}.codehilite .nt{color:#008000;font-weight:bold}.codehilite .nv{color:#19177c}.codehilite .ow{color:#AA22FF;font-weight:bold}.codehilite .w{color:#bbb}.codehilite .mf{color:#666}.codehilite .mh{color:#666}.codehilite .mi{color:#666}.codehilite .mo{color:#666}.codehilite .sb{color:#ba2121}.codehilite .sc{color:#ba2121}.codehilite .sd{color:#BA2121;font-style:italic}.codehilite .s2{color:#ba2121}.codehilite .se{color:#BB6622;font-weight:bold}.codehilite .sh{color:#ba2121}.codehilite .si{color:#BB6688;font-weight:bold}.codehilite .sx{color:green}.codehilite .sr{color:#b68}.codehilite .s1{color:#ba2121}.codehilite .ss{color:#19177c}.codehilite .bp{color:green}.codehilite .vc{color:#19177c}.codehilite .vg{color:#19177c}.codehilite .vi{color:#19177c}.codehilite .il{color:#666}.codehilitetable{width:100% !important;table-layout:fixed;border-color:rgba(0,0,0,0.15)}.codehilitetable td{padding:0}.codehilitetable .linenos{background-color:#fbfbfc;border-right:1px solid #ececf0;width:46px}.codehilitetable .codehilite,.codehilitetable .linenos{padding-top:15px;padding-bottom:15px}.codehilitetable .linenodiv pre{text-align:right;padding-right:emCalc(6px);color:#bebec5}.codehilitetable .codehilite pre{padding-left:emCalc(6px)}.codehilitetable .codehilite{width:100%;height:auto;overflow:auto}.codehilitetable .codehilite pre{white-space:pre;overflow:auto;overflow:auto}@media print{*{background:transparent !important;color:#000 !important;box-shadow:none !important;text-shadow:none !important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:0.5cm}p,h2,h3{orphans:3;widows:3}h2,h3{page-break-after:avoid}.dropdown{display:none !important}}
+/*! normalize.css v1.1.2 | MIT License | git.io/normalize */article,aside,details,figcaption,figure,footer,header,hgroup,main,nav,section,summary{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}audio:not([controls]){display:none;height:0}[hidden]{display:none}html{font-size:100%;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}html,button,input,select,textarea{font-family:sans-serif}body{margin:0}a:focus{outline:thin dotted}a:active,a:hover{outline:0}h1{font-size:2em;margin:.67em 0}h2{font-size:1.5em;margin:.83em 0}h3{font-size:1.17em;margin:1em 0}h4{font-size:1em;margin:1.33em 0}h5{font-size:.83em;margin:1.67em 0}h6{font-size:.67em;margin:2.33em 0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:bold}blockquote{margin:1em 40px}dfn{font-style:italic}hr{-moz-box-sizing:content-box;box-sizing:content-box;height:0}mark{background:#ff0;color:#000}p,pre{margin:1em 0}code,kbd,pre,samp{font-family:monospace,serif;_font-family:'courier new',monospace;font-size:1em}pre{white-space:pre;white-space:pre-wrap;word-wrap:break-word}q{quotes:none}q:before,q:after{content:'';content:none}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-0.5em}sub{bottom:-0.25em}dl,menu,ol,ul{margin:1em 0}dd{margin:0 0 0 40px}menu,ol,ul{padding:0 0 0 40px}nav ul,nav ol{list-style:none;list-style-image:none}img{border:0;-ms-interpolation-mode:bicubic}svg:not(:root){overflow:hidden}figure{margin:0}form{margin:0}fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em}legend{border:0;padding:0;white-space:normal;*margin-left:-7px}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,select{text-transform:none}button,html input[type="button"],input[type="reset"],input[type="submit"]{-webkit-appearance:button;cursor:pointer;*overflow:visible}button[disabled],html input[disabled]{cursor:default}input[type="checkbox"],input[type="radio"]{box-sizing:border-box;padding:0;*height:13px;*width:13px}input[type="search"]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}input[type="search"]::-webkit-search-cancel-button,input[type="search"]::-webkit-search-decoration{-webkit-appearance:none}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}textarea{overflow:auto;vertical-align:top}table{border-collapse:collapse;border-spacing:0}html,body,button,input,select,textarea{font-family:"Segoe UI","Trebuchet MS",Helvetica,"Helvetica Neue",Arial,sans-serif;color:#222}.wf-active html,.no-js html,.wf-active body,.no-js body,.wf-active button,.no-js button,.wf-active input,.no-js input,.wf-active select,.no-js select,.wf-active textarea,.no-js textarea{font-family:"Source Sans Pro","Segoe UI","Trebuchet MS",Helvetica,"Helvetica Neue",Arial,sans-serif}html{height:100%;width:100%;font-size:62.5%;overflow-x:hidden}body{background:#f7f7f7;font-size:14px;font-size:1.4rem;line-height:1.7em;min-height:100%;width:100%}.page-container,.main-container{min-height:100%;background:#f7f7f7}.content-container{margin-bottom:50px}hr{display:block;height:1px;border:0;border-top:1px solid #ccc;margin:1em 0;padding:0}img{vertical-align:middle}fieldset{border:0;margin:0;padding:0}textarea{resize:vertical}a{color:#1088bf;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}a:hover{color:#d68807;text-decoration:none}.chromeframe{margin:0;background:#ccc;color:#000;padding:0.2em 0;text-align:center}.mobile-menu,.mobile-menu-btn{display:none}.ico{background-image:url('../images/[email protected]');background-repeat:no-repeat}.ico-after{position:relative}.ico-after:after{content:" ";display:block;position:absolute;top:0;left:0;width:16px;height:16px;background-image:url('../images/[email protected]');background-repeat:no-repeat}.a11y{display:block;width:0;height:0;text-indent:-9999px}.ir{background-color:transparent;border:0;overflow:hidden;*text-indent:-9999px}.ir:before{content:"";display:block;width:0;height:150%}.hidden{display:none !important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.clearfix:before,.clearfix:after{content:" ";display:table}.clearfix:after{clear:both}.clearfix{*zoom:1}.header-container header .accessibility{list-style:none;margin:0;padding:0 2.5%;background:rgba(0,0,0,0.2);overflow:hidden;height:0}.header-container header .accessibility.focused{height:auto}.header-container header .accessibility li{display:inline;margin:0;padding:0}.header-container header .accessibility li a{display:inline-block;padding:0 7px}.header-container header .accessibility li a:hover,.header-container header .accessibility li a:focus{color:#084561;background-color:#fff}.header-container header{background:#084561;border-bottom:3px solid #f8ad32}.header-container header a,.header-container header button{text-decoration:none;color:#FFF;-moz-transition-property:background;-o-transition-property:background;-webkit-transition-property:background;transition-property:background;-moz-transition-duration:0.15s;-o-transition-duration:0.15s;-webkit-transition-duration:0.15s;transition-duration:0.15s}.header-container header a:focus,.header-container header button:focus{outline:none}.header-logo{text-align:center;margin:0;padding:0;width:100%}.header-logo-link{display:block;margin:0 auto;text-indent:-9999px;width:100%;max-width:240px;height:60px;background:url("../images/logo.png") no-repeat center center;background-size:100% auto}.header-logo-link.oldie{width:240px}.header-logo-link:hover,.header-logo-link:focus{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=70);opacity:0.7}.dropdown{display:none;position:absolute;text-align:left;top:50px;left:0;right:0;background-color:#396a81;margin:0;padding:10px 2.5%;font-size:14px;font-size:1.4rem;border-bottom:3px solid #f8ad32;z-index:50}.dropdown .dropdown-title{text-transform:uppercase;color:#FFF}.dropdown .dropdown-list{width:100%;padding:0}.dropdown .dropdown-list>li{width:20%;float:left}.dropdown .dropdown-list>li.dropdown-empty-message{color:rgba(255,255,255,0.5);text-align:center;line-height:60px;background:none !important}.dropdown .dropdown-list>li ul{margin:0 0 10px;padding:0}.dropdown .dropdown-list>li ul li{position:relative}.dropdown .dropdown-list>li ul li a{display:block;width:95%;height:25px;line-height:25px;color:#95d7f5;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}.dropdown .dropdown-list>li ul li a:hover,.dropdown .dropdown-list>li ul li a:focus{text-indent:3%;background-color:rgba(0,0,0,0.3)}.dropdown .dropdown-link-all{display:block;clear:both;text-align:center;height:30px;line-height:30px;border-top:1px solid #274a5a;background-color:#396a81;-moz-transition-property:color,background-color;-o-transition-property:color,background-color;-webkit-transition-property:color,background-color;transition-property:color,background-color}.dropdown .dropdown-link-all:first-child{border-top:0 !important;border-bottom:1px solid #274a5a}.dropdown .dropdown-link-all:hover,.dropdown .dropdown-link-all:focus{color:#95d7f5;background-color:#274a5a;border-top:1px solid #396a81}.active+.dropdown{display:block}.header-container .header-menu{height:60px}.header-container .header-menu .header-menu-list{margin:0;padding:0}.header-container .header-menu .header-menu-list>li{display:block;float:left;width:33.3%}.header-container .header-menu .header-menu-list>li>a{display:block;position:relative;text-align:center;line-height:60px;text-transform:uppercase;font-size:1.5px;font-size:1.5rem;text-shadow:rgba(0,0,0,0.75) 0 0 3px}.header-container .header-menu .header-menu-list>li>a:hover,.header-container .header-menu .header-menu-list>li>a:focus,.header-container .header-menu .header-menu-list>li>a.active{background:#396a81}.header-container .header-menu .header-menu-list>li>a.current:before{content:" ";display:block;position:absolute;bottom:0;left:0;right:0;height:2px;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s;-moz-border-radius:2px 2px 0 0;-webkit-border-radius:2px;border-radius:2px 2px 0 0;background-color:#f8ad32}.header-container .header-menu .header-menu-list>li>a.current.active:before{height:0}.logbox{background:rgba(255,255,255,0.05)}.logbox .notifs-links{margin-right:60px}.logbox .notifs-links .ico-link{display:block;position:relative;width:33.3%;height:60px;line-height:60px;float:left}.logbox .notifs-links .ico-link .notif-count{display:block;position:absolute;z-index:1;top:50%;right:50%;margin:-20px -22px 0 0;padding:0 5px;height:16px;line-height:14px;background:#c0392b;-moz-border-radius:16px;-webkit-border-radius:16px;border-radius:16px}.logbox .notifs-links .ico-link .notif-text{display:block;position:absolute;text-indent:-9999px;height:22px;width:22px;top:50%;left:50%;margin:-11px 0 0 -11px}.logbox .notifs-links .ico-link .notif-text.ico-messages{background-position:0 -3360px}.logbox .notifs-links .ico-link .notif-text.ico-notifs{background-position:0 -3920px}.logbox .notifs-links .ico-link .notif-text.ico-alerts{background-position:0 -240px}.logbox .notifs-links .ico-link .notif-text.ico-gear{background-position:0 -2400px}.logbox .notifs-links .ico-link:hover,.logbox .notifs-links .ico-link:focus,.logbox .notifs-links .ico-link.active{background:#396a81}.logbox .dropdown{overflow:hidden}.logbox .dropdown .dropdown-title{display:block;width:100%;height:35px;line-height:37px;text-align:center;border-bottom:1px solid #274a5a;background-color:#396a81}.logbox .dropdown,.logbox .dropdown .dropdown-list{margin:0;padding:0;list-style:none;background-color:#19526c}.logbox .dropdown li,.logbox .dropdown .dropdown-list li{display:block;width:100%;height:60px}.logbox .dropdown li a,.logbox .dropdown .dropdown-list li a{display:block;overflow:hidden;position:relative;height:100%;width:100%}.logbox .dropdown li a,.logbox .dropdown li a:hover,.logbox .dropdown li a:focus,.logbox .dropdown li a.read:hover,.logbox .dropdown li a.read:focus,.logbox .dropdown .dropdown-list li a,.logbox .dropdown .dropdown-list li a:hover,.logbox .dropdown .dropdown-list li a:focus,.logbox .dropdown .dropdown-list li a.read:hover,.logbox .dropdown .dropdown-list li a.read:focus{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=100);opacity:1;-moz-transition-property:opacity,background-color;-o-transition-property:opacity,background-color;-webkit-transition-property:opacity,background-color;transition-property:opacity,background-color}.logbox .dropdown li a:hover,.logbox .dropdown li a:focus,.logbox .dropdown .dropdown-list li a:hover,.logbox .dropdown .dropdown-list li a:focus{background-color:#396a81}.logbox .dropdown li a:hover .username,.logbox .dropdown li a:focus .username,.logbox .dropdown .dropdown-list li a:hover .username,.logbox .dropdown .dropdown-list li a:focus .username{text-shadow:rgba(0,0,0,0.5) 0 0 5px}.logbox .dropdown li a:hover .date,.logbox .dropdown li a:focus .date,.logbox .dropdown .dropdown-list li a:hover .date,.logbox .dropdown .dropdown-list li a:focus .date{color:#95D7F5}.logbox .dropdown li a.read,.logbox .dropdown .dropdown-list li a.read{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=50);opacity:0.5}.logbox .dropdown li .avatar,.logbox .dropdown .dropdown-list li .avatar{float:left;height:30px;width:30px}.logbox .dropdown li .username,.logbox .dropdown .dropdown-list li .username{display:block;float:left;margin:4px 0 0 7px;color:#95D7F5;width:50%;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.logbox .dropdown li .date,.logbox .dropdown .dropdown-list li .date{color:#5196b6;float:right;padding:4px 10px 0 0;-moz-transition-property:color;-o-transition-property:color;-webkit-transition-property:color;transition-property:color}.logbox .dropdown li .topic,.logbox .dropdown .dropdown-list li .topic{display:block;position:absolute;bottom:0;left:0;overflow:hidden;height:25px;padding:4px 7px 2px;text-overflow:ellipsis;white-space:nowrap;width:95%;width:calc(100% - 14px)}.logbox .dropdown li:nth-child(2n+1),.logbox .dropdown li:nth-child(2n+1) form button,.logbox .dropdown .dropdown-list li:nth-child(2n+1),.logbox .dropdown .dropdown-list li:nth-child(2n+1) form button{background-color:#084561}.logbox .my-account{display:block;height:60px;width:60px;float:right}.logbox .my-account .username{display:none}.logbox .my-account .avatar{background:#396a81}.logbox .dropdown.my-account-dropdown a,.logbox .dropdown.my-account-dropdown button{padding-left:10px}.logbox .dropdown.my-account-dropdown button{width:100%;height:30px;line-height:28px;background:transparent;text-align:left;border:0}.logbox .dropdown.my-account-dropdown button:hover,.logbox .dropdown.my-account-dropdown button:focus{background:#396a81}.logbox.unlogged a{display:block;width:50%;text-align:center;float:left;line-height:60px;height:60px}.logbox.unlogged a:hover,.logbox.unlogged a:focus{background-color:#396a81}.avatar{height:60px;width:60px;background-color:#FFF}.sub-header{background:#EEE}.breadcrumb{display:none}.search{display:block;position:relative}.search form input,.search form button{float:left;border:none;background:rgba(255,255,255,0.25);height:40px;-moz-transition-property:background;-o-transition-property:background;-webkit-transition-property:background;transition-property:background;-moz-transition-duration:0.15s;-o-transition-duration:0.15s;-webkit-transition-duration:0.15s;transition-duration:0.15s}.search form input:hover,.search form input:focus,.search form button:hover,.search form button:focus{outline:none;background-color:rgba(255,255,255,0.75)}.search form input{height:30px;padding:5px 3%;width:70%}.search form button{width:12%;text-indent:-9999px}.search form button:after{display:block;content:" ";position:absolute;top:12px;left:50%;margin-left:-8px;height:16px;width:16px;background-position:0 -4640px}.search .search-more{display:block;float:left;height:40px;font-family:Arial, sans-serif;line-height:40px;width:12%;text-align:center;font-weight:bold;text-decoration:none;font-size:24px;background:#fff;color:#084561;-moz-transition:background 0.15s;-o-transition:background 0.15s;-webkit-transition:background 0.15s;transition:background 0.15s}.search .search-more:hover,.search .search-more:focus{background:rgba(255,255,255,0.7)}.alert-box{position:relative;padding:8px 15px;margin:0 0 15px 2%;color:#FFF;text-shadow:rgba(0,0,0,0.2) 0 0 2px}.alert-box .close-alert-box{display:block;position:absolute;top:12px;right:15px;height:20px;width:20px;text-indent:-9999px;text-decoration:none}.alert-box .close-alert-box-text{width:auto;text-indent:0;top:8px}.alert-box.info,.alert-box.success{background:#27ae60}.alert-box.error{background:#c0392b}.alert-box.alert,.alert-box.warning{background:#e67e22}.alert-box a{color:#EEE}.content-wrapper .alert-box{margin:0 0 20px}.main .sidebar{padding:0 0 10px;background:#f0f0f0;border-bottom:1px solid #FFF;color:#424242;width:105%;margin:0 0 0 -2.7%}.main .sidebar .new-btn{display:block;height:40px;padding-left:11.5%;text-decoration:none;text-indent:25px;line-height:40px;font-size:16px;font-size:1.6rem;position:relative;color:#1088bf;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}.main .sidebar .new-btn:first-child{margin-top:31px}.main .sidebar .new-btn:hover,.main .sidebar .new-btn:focus{background:#fff}.main .sidebar .new-btn:after{top:12px;left:11.5%}.main .sidebar h3,.main .sidebar h4{font-weight:normal;margin:0;padding:0}.main .sidebar h3{font-size:18px;font-size:1.8rem;line-height:38px;line-height:3.8rem;color:#084561;border-bottom:1px solid #f8ad32;margin-top:30px;text-transform:uppercase}.main .sidebar h4{padding-top:20px;font-size:17px;font-size:1.7rem}.main .sidebar h4 a{text-decoration:none;color:#424242}.main .sidebar h4[data-num]{position:relative;padding-left:calc(5% + 25px)}.main .sidebar h4[data-num]:before{content:attr(data-num);position:absolute;margin-left:-35px;text-align:right;width:50px}.main .sidebar h3+ul{margin:7px 0}.main .sidebar ul{margin:0;padding:0;list-style:none;width:100%}.main .sidebar ul li{position:relative;padding:0 0 0 2.5%;-moz-transition:background 0.15s;-o-transition:background 0.15s;-webkit-transition:background 0.15s;transition:background 0.15s}.main .sidebar ul li:not(.inactive):hover,.main .sidebar ul li a:focus{background:#fff;outline:none}.main .sidebar ul li:not(.inactive):hover .ico-after.action-hover,.main .sidebar ul li a:focus .ico-after.action-hover{display:block}.main .sidebar ul li a,.main .sidebar ul li button,.main .sidebar ul li.inactive span{display:block;padding-left:25px;padding-right:10px;text-decoration:none;color:#0079b2;overflow:hidden;height:30px;line-height:30px;font-size:14px;font-size:1.4rem;text-overflow:ellipsis;white-space:nowrap;border:0;text-align:left;background:transparent}.main .sidebar ul li a[data-num],.main .sidebar ul li button[data-num],.main .sidebar ul li.inactive span[data-num]{position:relative}.main .sidebar ul li a[data-num]:after,.main .sidebar ul li button[data-num]:after,.main .sidebar ul li.inactive span[data-num]:after{content:attr(data-num) ".";position:absolute;left:0;width:18px;text-align:right;color:#424242}.main .sidebar ul li a.unread,.main .sidebar ul li button.unread,.main .sidebar ul li.inactive span.unread{font-weight:bold}.main .sidebar ul li a.ico-after:after,.main .sidebar ul li button.ico-after:after,.main .sidebar ul li.inactive span.ico-after:after{top:7px;left:0;filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=70);opacity:0.7}.main .sidebar ul li a.ico-after:hover:after,.main .sidebar ul li a.ico-after:focus:after,.main .sidebar ul li button.ico-after:hover:after,.main .sidebar ul li button.ico-after:focus:after,.main .sidebar ul li.inactive span.ico-after:hover:after,.main .sidebar ul li.inactive span.ico-after:focus:after{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=100);opacity:1}.main .sidebar ul li a.ico-after.action-hover,.main .sidebar ul li button.ico-after.action-hover,.main .sidebar ul li.inactive span.ico-after.action-hover{position:absolute;display:none;overflow:visible;top:0;padding:0;z-index:1;width:30px;height:30px;text-indent:-9999px;border-left:1px solid transparent;background:#fff;right:-32px}.main .sidebar ul li a.ico-after.action-hover[data-title]:hover:before,.main .sidebar ul li button.ico-after.action-hover[data-title]:hover:before,.main .sidebar ul li.inactive span.ico-after.action-hover[data-title]:hover:before{content:attr(data-title);display:block;position:absolute;background:#fff;color:#555;top:0;left:35px;height:27px;line-height:27px;line-height:2.7rem;text-indent:0;padding:0 15px;border:1px solid #EEE;-moz-box-shadow:rgba(0,0,0,0.5) 0 0 3px;-webkit-box-shadow:rgba(0,0,0,0.5) 0 0 3px;box-shadow:rgba(0,0,0,0.5) 0 0 3px}.main .sidebar ul li a.ico-after.action-hover:after,.main .sidebar ul li button.ico-after.action-hover:after,.main .sidebar ul li.inactive span.ico-after.action-hover:after{left:5px}.main .sidebar ul li.inactive span{color:#555;font-style:italic}.main .sidebar ul li .last-answer{display:none}.main .sidebar ul li button{width:100%;line-height:28px}.main .sidebar ul li li{padding:0}.main .sidebar ul li li a{position:relative;color:#084561;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}.main .sidebar ul li li a:hover,.main .sidebar ul li li a:focus{color:#0079B2;background:#fff;margin-left:-11px}.main .sidebar ul li li a:hover:before,.main .sidebar ul li li a:focus:before{content:"> "}.main .sidebar.sommaire h4{border-bottom:1px solid #d8dada;padding-bottom:5px;padding-right:15px;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.main .sidebar.sommaire h4+ul>li:first-child{margin-top:5px}.main .sidebar.sommaire ul li.current{margin-top:0 !important;padding-top:5px;margin-bottom:5px;background-color:#F4F6F6}.main .sidebar.sommaire ul li.current ul{margin-top:5px;padding-top:5px;padding-bottom:5px;margin-left:-25px;width:calc(105% + 25px);background:-moz-linear-gradient(top, rgba(0,0,0,0.07),rgba(0,0,0,0) 3px);background:-o-linear-gradient(top, rgba(0,0,0,0.07),rgba(0,0,0,0) 3px);background:-webkit-linear-gradient(top, rgba(0,0,0,0.07),rgba(0,0,0,0) 3px);background:linear-gradient(to bottom, rgba(0,0,0,0.07),rgba(0,0,0,0) 3px)}.main .sidebar.sommaire ul li.current ul a{padding-left:50px}.main .content-container{padding-top:30px}.main .content-container h1,.main .content-container h2{font-size:22px;font-size:2.2rem;line-height:38px;line-height:3.8rem;color:#084561;font-weight:normal;border-bottom:1px solid #f8ad32;margin:1px 0 15px}.main .content-container h1.illu,.main .content-container h2.illu{padding-left:60px}.main .content-container h1.ico-after,.main .content-container h2.ico-after{padding-left:80px}.main .content-container h1.ico-after:after,.main .content-container h2.ico-after:after{width:80px;height:40px;margin-left:21px}.main .content-container h1.ico-articles:after,.main .content-container h2.ico-articles:after{background-position:0 -880px}.main .content-container h1.ico-tutorials:after,.main .content-container h2.ico-tutorials:after{background-position:0 -5600px}.main .content-container h1.illu img,.main .content-container h2.illu img{position:absolute;margin:-6px 0 0 -60px;border:1px solid #cdd0d1;width:50px;height:50px}.main .content-container h1:not(:first-child),.main .content-container h2:not(:first-child){margin-top:50px}.main .content-container .subtitle{font-size:18px;font-size:1.8rem;color:#999;margin-top:-15px;margin-bottom:15px;padding:10px 0;font-weight:normal;border-bottom:1px solid #EEE}.main .content-container .member-item .avatar{margin-top:-2px;height:20px;width:20px;border:1px solid #CCC}.main .content-container .member-item:hover .avatar{border-color:#999}.main.home .content-container{margin-top:0}.tutorial-list article,.main .article-content .tutorial-list article{min-height:60px;padding:20px 2%;border-bottom:1px solid #e0e4e5}.tutorial-list article:nth-child(2n+1),.main .article-content .tutorial-list article:nth-child(2n+1){background-color:rgba(255,255,255,0.8)}.tutorial-list article,.tutorial-list article h3,.tutorial-list article a h3,.tutorial-list article h3 a,.main .article-content .tutorial-list article,.main .article-content .tutorial-list article h3,.main .article-content .tutorial-list article a h3,.main .article-content .tutorial-list article h3 a{color:#424242;font-weight:normal}.tutorial-list article a h3:hover,.tutorial-list article a h3:focus,.tutorial-list article h3 a:hover,.tutorial-list article h3 a:focus,.main .article-content .tutorial-list article a h3:hover,.main .article-content .tutorial-list article a h3:focus,.main .article-content .tutorial-list article h3 a:hover,.main .article-content .tutorial-list article h3 a:focus{text-decoration:underline}.tutorial-list article h3,.main .article-content .tutorial-list article h3{margin:0;padding:0;font-size:20px;font-size:2.0rem;height:27px;width:100%;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.tutorial-list article a,.main .article-content .tutorial-list article a{text-decoration:none}.tutorial-list article .article-metadata,.main .article-content .tutorial-list article .article-metadata{margin:0 0 5px;padding:0;color:#ee8709}.tutorial-list article .article-metadata a,.main .article-content .tutorial-list article .article-metadata a{color:#ee8709}.tutorial-list article .article-metadata a:hover,.tutorial-list article .article-metadata a:focus,.main .article-content .tutorial-list article .article-metadata a:hover,.main .article-content .tutorial-list article .article-metadata a:focus{text-decoration:underline}.tutorial-list article .article-illu,.main .article-content .tutorial-list article .article-illu{display:block;width:100%;height:100px;overflow:hidden;background-repeat:no-repeat;background-position:center center;-moz-background-size:cover;-o-background-size:cover;-webkit-background-size:cover;background-size:cover}.tutorial-list article .article-illu img,.main .article-content .tutorial-list article .article-illu img{width:100%;height:100%;filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=0);opacity:0}.tutorial-list article .resume,.main .article-content .tutorial-list article .resume{margin:20px 0 0;padding:0}.tutorial-list article .tutorial-img,.main .article-content .tutorial-list article .tutorial-img{float:left}.tutorial-list article .tutorial-infos,.main .article-content .tutorial-list article .tutorial-infos{margin:7px 0 0 70px}.taglist{list-style:none;padding:0;margin:-14px 0 15px;height:30px;line-height:30px}.taglist li{float:right}.taglist li a{display:block;text-decoration:none;padding:0 10px;background:#FBFBFB;color:#aaa9a7;margin-left:1px;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}.taglist li a:before{content:"#"}.taglist li a:hover,.taglist li a:focus{background:#FFF;color:#0e77a8;border-bottom:1px solid #0e77a8}.content-wrapper,.full-content-wrapper{margin:0 2%}.small-content-wrapper{width:90%;max-width:500px;margin:20px auto}.authors{color:#9c9c9c;padding-bottom:10px;border-bottom:1px solid #e0e4e5;margin-bottom:20px !important}.authors .authors-label{display:inline-block}.authors ul{display:inline-block;list-style:none;padding:0;margin:0}.authors ul li{display:inline-block;margin:0}.authors ul li .avatar{height:28px;width:28px;border:1px solid #cdd0d1;margin-right:3px;margin-top:-4px}.authors ul li a{display:block;text-decoration:none;color:#1088bf;height:36px;line-height:36px;padding:0 8px;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}.authors ul li a:hover,.authors ul li a:focus{background:#DDD;color:#084561}.authors ul li .info{padding-left:5px;color:#777}.pagination{list-style:none;margin:0;padding:0;border-top:1px solid #d2d5d6;border-bottom:1px solid #d2d5d6;background:#FBFBFB;height:40px;margin-bottom:20px !important}.pagination li{float:left}.pagination li a{display:block;text-align:center;text-decoration:none;color:#084561;min-width:45px;height:40px;line-height:40px;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}.pagination li a.current{height:38px;color:#808080;background:#F4F6F6;margin-top:-1px;border-left:1px solid #d2d5d6;border-bottom:3px solid #d2d5d6;border-right:2px solid #d2d5d6}.pagination li a.ico-after:after{margin-top:12px}.pagination li a[href]:hover,.pagination li a[href]:focus{background:#d2d5d6}.pagination li.prev a,.pagination li.next a{padding:0 15px}.pagination li.prev .ico-after{padding-left:30px}.pagination li.prev .ico-after:after{margin-left:8px}.pagination li.next{float:right}.pagination li.next .ico-after{padding-right:30px}.pagination li.next .ico-after:after{right:8px;left:auto}.pagination.pagination-top li a.current{margin-top:0;border-top:3px solid #d2d5d6;border-bottom:none;height:35px;line-height:35px;padding-bottom:3px}.pagination.pagination-chapter{margin-left:0}.pagination.pagination-chapter li{max-width:45%}.pagination.pagination-chapter a{text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.topic-list{margin-top:50px !important;margin-bottom:50px !important}.topic-list .topic{position:relative;height:81px;line-height:25px;border-top:1px solid #FFF;border-bottom:1px solid #CCC;background:#eff9fe;overflow:hidden}.topic-list .topic:first-child:after{display:block;content:" ";width:100%;height:1px;background:#CCC;margin-top:-2px}.topic-list .topic:nth-child(2n){background:none}.topic-list .topic:nth-child(2n).unread{background:#feeed5}.topic-list .topic.unread{background:#fde8c6}.topic-list .topic:hover:before,.topic-list .topic.active:before{content:" ";display:block;position:absolute;background:#0e77a8;height:100%;width:5px}.topic-list .topic:hover.unread:before,.topic-list .topic.active.unread:before{background:#f8ad32}.topic-list a{text-decoration:none;color:#0e77a8}.topic-list a:hover,.topic-list a:focus{color:#0e77a8;text-decoration:underline;outline:none}.topic-list .topic-infos,.topic-list .topic-description,.topic-list .topic-answers,.topic-list .topic-last-answer{display:block;float:left;padding:4px 0;margin:0}.topic-list .topic-infos{width:5%}.topic-list .topic-infos input[type=checkbox]{margin:29px 25% 0}.topic-list .topic-infos .ico-after{display:block;text-indent:-9999px}.topic-list .topic-infos .ico-after:after{margin:4px 0 0 15px}.topic-list .topic-description{position:relative;width:60%}.topic-list .topic-description .topic-title-link:hover,.topic-list .topic-description .topic-title-link:after{text-decoration:none}.topic-list .topic-description .topic-title-link:hover .topic-title,.topic-list .topic-description .topic-title-link:after .topic-title{text-decoration:underline}.topic-list .topic-description .topic-title,.topic-list .topic-description .topic-subtitle{display:block;text-overflow:ellipsis;white-space:nowrap;overflow:hidden;margin:0;padding:0}.topic-list .topic-description .topic-title{font-size:16px;font-size:1.6rem}.topic-list .topic-description .topic-subtitle{height:24px;line-height:1.3em;color:#777}.topic-list .topic-description .topic-members{margin:0;color:#777}.topic-list .topic-description .topic-tag:before{content:"#"}.topic-list .topic-answers{width:13%;text-align:center;padding-top:25px}.topic-list .topic-last-answer{width:22%}.topic-list .topic-last-answer .topic-no-last-answer{display:block;margin-top:24px;color:#084561;filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=50);opacity:0.5}.no-cssmask .topic-list .topic-description[style]:before{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=10);opacity:0.1}.forum-list .group-title{width:100%;height:50px;margin-top:30px !important;clear:both;border-bottom:1px solid #CCC;color:#f8ad32}.forum-list .topic{height:60px}.forum-list .topic-description{padding-left:1.5%}.forum-list .topic-description .topic-title{font-weight:normal}.forum-list .topic-answers{padding-top:17px}.forum-list .topic-answers span{display:block;float:left;width:50%}.forum-list .topic-last-answer{width:18%}.forum-list .topic-last-answer .topic-no-last-answer{margin-top:13px}.forum-list .topic-last-answer .forum-last-message{color:#777;display:block}.forum-list .topic-last-answer .forum-last-message-title{display:block;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.main .content-container .content-wrapper{max-width:960px;margin:0 auto}.main .content-container .content-wrapper.article-content,.main .content-container .content-wrapper.authors{padding-left:2%;padding-right:2%}.main .content-container .article-content p,.main .content-container .article-content ul:not(.pagination){font-family:"Liberation Serif","Times New Roman",Times,Georgia,FreeSerif,serif}.main .content-container .article-content,.main .content-container .message-content{margin-top:20px;color:#424242}.main .content-container .article-content h2,.main .content-container .article-content h2 a,.main .content-container .article-content h3,.main .content-container .article-content h3 a,.main .content-container .message-content h2,.main .content-container .message-content h2 a,.main .content-container .message-content h3,.main .content-container .message-content h3 a{color:#ee8709;margin-top:40px;text-decoration:none}.main .content-container .article-content h2 a:hover,.main .content-container .article-content h2 a:focus,.main .content-container .article-content h3 a:hover,.main .content-container .article-content h3 a:focus,.main .content-container .message-content h2 a:hover,.main .content-container .message-content h2 a:focus,.main .content-container .message-content h3 a:hover,.main .content-container .message-content h3 a:focus{text-decoration:underline}.main .content-container .article-content h2,.main .content-container .message-content h2{font-size:22px;font-size:2.2rem;line-height:50px;margin-bottom:20px;background:#FFF;border-top:1px solid #e0e4e5;padding-left:1%;font-weight:400}.main .content-container .article-content h3,.main .content-container .message-content h3{font-size:20px;font-size:2.0rem;margin-bottom:14px}.main .content-container .article-content h4,.main .content-container .message-content h4{font-size:18px;font-size:1.8rem;margin-bottom:12px}.main .content-container .article-content .actions-title,.main .content-container .message-content .actions-title{float:right;margin:-60px 10px 0 0}.main .content-container .article-content .actions-title .btn,.main .content-container .message-content .actions-title .btn{height:30px;line-height:30px;margin-left:3px;text-transform:uppercase;filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=70);opacity:0.7}.main .content-container .article-content .actions-title .btn.ico-after:after,.main .content-container .message-content .actions-title .btn.ico-after:after{margin-top:7px}.main .content-container .article-content .actions-title .btn:hover,.main .content-container .article-content .actions-title .btn:focus,.main .content-container .message-content .actions-title .btn:hover,.main .content-container .message-content .actions-title .btn:focus{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=100);opacity:1}.main .content-container .article-content .information,.main .content-container .article-content .question,.main .content-container .article-content .error,.main .content-container .article-content .warning,.main .content-container .article-content .spoiler,.main .content-container .message-content .information,.main .content-container .message-content .question,.main .content-container .message-content .error,.main .content-container .message-content .warning,.main .content-container .message-content .spoiler{margin:25px 0;padding:7px 15px 7px 45px}.main .content-container .article-content .information.ico-after:after,.main .content-container .article-content .question.ico-after:after,.main .content-container .article-content .error.ico-after:after,.main .content-container .article-content .warning.ico-after:after,.main .content-container .article-content .spoiler.ico-after:after,.main .content-container .message-content .information.ico-after:after,.main .content-container .message-content .question.ico-after:after,.main .content-container .message-content .error.ico-after:after,.main .content-container .message-content .warning.ico-after:after,.main .content-container .message-content .spoiler.ico-after:after{position:absolute;top:50%;left:23px;margin:-11px 0 0 -11px;height:22px;width:22px}.main .content-container .article-content .information,.main .content-container .message-content .information{background:#daeaee}.main .content-container .article-content .information.ico-after:after,.main .content-container .message-content .information.ico-after:after{background-position:0 -2960px}.main .content-container .article-content .question,.main .content-container .message-content .question{background:#e2daee}.main .content-container .article-content .question.ico-after:after,.main .content-container .message-content .question.ico-after:after{background-position:0 -4240px}.main .content-container .article-content .error,.main .content-container .message-content .error{background:#eedada}.main .content-container .article-content .error.ico-after:after,.main .content-container .message-content .error.ico-after:after{background-position:0 -2320px}.main .content-container .article-content .warning,.main .content-container .message-content .warning{background:#eee7da}.main .content-container .article-content .warning.ico-after:after,.main .content-container .message-content .warning.ico-after:after{background-position:0 -5920px}.main .content-container .article-content .spoiler-title,.main .content-container .message-content .spoiler-title{display:block;background:#EEE;margin-top:15px;padding:3px 15px 3px 40px;text-decoration:none;border-bottom:1px solid #DDD;color:#555}.main .content-container .article-content .spoiler-title.ico-after:after,.main .content-container .message-content .spoiler-title.ico-after:after{margin:8px 0 0 10px}.main .content-container .article-content .spoiler-title:nth-last-child(2),.main .content-container .message-content .spoiler-title:nth-last-child(2){margin-bottom:15px}.main .content-container .article-content .spoiler-title:hover,.main .content-container .message-content .spoiler-title:hover{text-decoration:underline}.main .content-container .article-content .spoiler,.main .content-container .message-content .spoiler{margin-top:0;padding-left:15px;background:#EEE}.main .content-container .article-content figure,.main .content-container .message-content figure{margin:25px 0;max-width:100%;padding:10px;background:#DDD;text-align:center}.main .content-container .article-content figure img,.main .content-container .article-content figure video,.main .content-container .article-content figure pre,.main .content-container .article-content figure code,.main .content-container .article-content figure table,.main .content-container .article-content figure blockquote,.main .content-container .article-content figure embed,.main .content-container .article-content figure video,.main .content-container .message-content figure img,.main .content-container .message-content figure video,.main .content-container .message-content figure pre,.main .content-container .message-content figure code,.main .content-container .message-content figure table,.main .content-container .message-content figure blockquote,.main .content-container .message-content figure embed,.main .content-container .message-content figure video{max-width:100%;margin:0 auto;text-align:left}.main .content-container .article-content figure img,.main .content-container .article-content figure video,.main .content-container .article-content figure pre,.main .content-container .article-content figure code,.main .content-container .message-content figure img,.main .content-container .message-content figure video,.main .content-container .message-content figure pre,.main .content-container .message-content figure code{display:block}.main .content-container .article-content figure figcaption,.main .content-container .message-content figure figcaption{display:block;padding-top:10px}.main .content-container .reactions-title{margin:50px 0 20px;color:#084561;border-bottom:1px solid #f8ad32;text-transform:uppercase;font-weight:normal;font-size:22px;font-size:2.2rem;line-height:30px}.wf-active .main .content-container .article-content p,.wf-active .main .content-container .article-content ul:not(.pagination){font-family:"Droid Serif","Liberation Serif","Times New Roman",Times,Georgia,FreeSerif,serif}.js .spoiler{display:none}table{margin:15px 0;border-top:1px solid #DDD}table thead{background:#DDD;color:#084561}table th,table td{text-align:left;padding:5px 15px 5px 7px;border-right:1px solid #DDD}table th:first-child,table td:first-child{border-left:1px solid #DDD}table tbody tr{border-bottom:1px solid #DDD}table tbody tr:nth-child(2n+1){background:#F7F7F7}table.fullwidth{width:100%}.topic-message{position:relative}.topic-message.helpful .message{background-color:#e9f9dc}.topic-message.helpful .message:after{border-right-color:#e9f9dc}.topic-message .user .avatar-link{display:block;height:58px;width:58px;z-index:0;position:absolute;top:0;border:1px solid #DDD}.topic-message .user .avatar-link[href]:hover,.topic-message .user .avatar-link[href]:focus{border-color:#FFF;overflow:hidden;-moz-box-shadow:rgba(0,0,0,0.3) 0 1px 7px;-webkit-box-shadow:rgba(0,0,0,0.3) 0 1px 7px;box-shadow:rgba(0,0,0,0.3) 0 1px 7px}.topic-message .user .avatar-link img{height:58px;width:58px}.topic-message .user .badge{display:block;width:60px;height:25px;line-height:25px;text-align:center;text-transform:uppercase;color:#EEE;text-shadow:rgba(0,0,0,0.25) 0 0 3px;background:#777}.topic-message .user .badge.staff{background:#48a200}.topic-message .user .user-metadata{width:60px;height:25px}.topic-message .user .user-metadata a{display:block;float:left;border:1px solid #D2D5D6;border-top:0;text-align:center;background-color:#edefef;text-decoration:none;color:#424242;height:25px;line-height:26px;width:28px;color:#777;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}.topic-message .user .user-metadata a:first-child{border-right:0;width:29px}.topic-message .user .user-metadata a:hover,.topic-message .user .user-metadata a:focus{border-bottom-width:1px;border-bottom-color:#777;background:#FFF}.topic-message .user .user-metadata a.positive{color:#48a200}.topic-message .user .user-metadata a.negative{color:#c0392b}.topic-message .message{position:relative;background-color:#FDFDFD;border:1px solid #D2D5D6;border-right-width:2px;border-bottom-width:3px;min-height:75px}.topic-message .message .message-metadata{display:inline-block;font-size:14px;font-size:1.4rem;margin-left:5px}.topic-message .message .message-metadata a{display:block;float:left;color:#999;text-decoration:none;height:30px;line-height:30px;padding:0 5px;border-bottom:1px solid #D2D5D6;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}.topic-message .message .message-metadata a:hover,.topic-message .message .message-metadata a:focus{border-bottom:1px solid #0e77a8;color:#0e77a8;outline:none}.topic-message .message .message-metadata .username{color:#484848;font-size:16px;font-size:1.6rem;margin-right:3px}.topic-message .message .message-metadata .date{line-height:32px}.topic-message .message .message-actions{margin:0;padding:0;list-style:none;position:absolute;top:0;right:0;text-transform:uppercase}.topic-message .message .message-actions li{float:left}.topic-message .message .message-content{clear:both;margin:0 10px 0;padding-top:1px}.topic-message .message .message-content>p:first-child{margin-top:7px}.topic-message .message .message-content .message-hidden-content{display:none}.topic-message .message .message-content .message-edited,.topic-message .message .message-content .message-hidden,.topic-message .message .message-content .message-helpful{padding:3px 0 0}.topic-message .message .message-content .message-edited.ico-after,.topic-message .message .message-content .message-hidden.ico-after,.topic-message .message .message-content .message-helpful.ico-after{text-indent:20px}.topic-message .message .message-content .message-edited.ico-after:after,.topic-message .message .message-content .message-hidden.ico-after:after,.topic-message .message .message-content .message-helpful.ico-after:after{margin:7px 0}.topic-message .message .message-content .message-edited,.topic-message .message .message-content .message-hidden{font-style:italic;color:#999}.topic-message .message .message-content .message-edited:after,.topic-message .message .message-content .message-hidden:after{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=50);opacity:0.5}.topic-message .message .message-content .message-hidden{margin-top:1px}.topic-message .message .message-content .message-helpful{color:#48A200;text-indent:20px}.topic-message .message .message-content textarea{margin:10px 0 10px -1px;background-color:transparent;min-height:150px}.topic-message .message .markdown-help .open-markdown-help{display:block;position:absolute;bottom:0;left:8px}.topic-message .message .markdown-help .open-markdown-help .close-markdown-help-text{display:none}.topic-message .message .markdown-help .markdown-help-more{display:none;background:#EEE;padding:15px;margin-bottom:5px}.topic-message .message .markdown-help .markdown-help-more pre{margin:0}.topic-message .message .markdown-help .markdown-help-more.show-markdown-help{display:block}.topic-message .message .markdown-help .show-markdown-help+.open-markdown-help .close-markdown-help-text{display:inline}.topic-message .message .markdown-help .show-markdown-help+.open-markdown-help .open-markdown-help-text{display:none}.topic-message .message .message-bottom{display:-webkit-box;display:-ms-flexbox;display:-webkit-flex;display:-moz-box;display:flex;-webkit-box-align:start;-moz-box-align:start;-ms-flex-align:start;-webkit-align-items:flex-start;align-items:flex-start;min-height:30px}.topic-message .message .message-bottom .signature{border-top:1px solid #D2D5D6;padding:3px 0 0 10px;margin:0 10px 0 0;font-size:12px;font-size:1.2rem;color:#999;flex:1}.topic-message .message .message-bottom .signature p{margin:0;padding:0}.topic-message .message .message-bottom .signature a{color:#999;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}.topic-message .message .message-bottom .signature a:hover,.topic-message .message .message-bottom .signature a:focus{text-decoration:none;color:#555}.topic-message .message .message-bottom .message-karma{margin-left:auto;margin-bottom:-2px}.topic-message .message .message-bottom .message-karma a{border-bottom-width:3px}.topic-message .message .message-bottom .message-karma .tick{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.topic-message .message .message-bottom .message-karma .tick:hover,.topic-message .message .message-bottom .message-karma .tick:focus{color:#555}.topic-message .message .message-bottom .message-karma .tick.active{color:#48a200}.topic-message .message .message-bottom .message-karma .tick.active:after{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=100);opacity:1}.topic-message .message .message-bottom .message-karma .upvote{color:#48a200}.topic-message .message .message-bottom .message-karma .downvote{color:#c0392b}.topic-message .message .message-bottom .message-karma .voted{font-weight:bold}.topic-message .message .message-bottom .message-karma .voted:after{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=100);opacity:1}.topic-message .message .message-buttons{margin:0 0 0 10px;padding:0;list-style:none;border-bottom:none}.topic-message .message .message-buttons a{text-indent:-9999px;width:0}.topic-message .message .message-buttons a:after{left:12px !important}.topic-message .message .message-submit{margin-left:auto;margin-right:10px}.topic-message .message .message-actions,.topic-message .message .message-buttons,.topic-message .message .message-karma,.topic-message .message .message-submit{display:-webkit-box;display:-ms-flexbox;display:-webkit-flex;display:-moz-box;display:flex}.topic-message .message .message-actions a,.topic-message .message .message-actions span,.topic-message .message .message-actions button,.topic-message .message .message-buttons a,.topic-message .message .message-buttons span,.topic-message .message .message-buttons button,.topic-message .message .message-karma a,.topic-message .message .message-karma span,.topic-message .message .message-karma button,.topic-message .message .message-submit a,.topic-message .message .message-submit span,.topic-message .message .message-submit button{display:block;float:left;margin-left:3px;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}.topic-message .message .message-actions a.ico-after,.topic-message .message .message-actions span.ico-after,.topic-message .message .message-actions button.ico-after,.topic-message .message .message-buttons a.ico-after,.topic-message .message .message-buttons span.ico-after,.topic-message .message .message-buttons button.ico-after,.topic-message .message .message-karma a.ico-after,.topic-message .message .message-karma span.ico-after,.topic-message .message .message-karma button.ico-after,.topic-message .message .message-submit a.ico-after,.topic-message .message .message-submit span.ico-after,.topic-message .message .message-submit button.ico-after{padding-left:30px}.topic-message .message .message-actions a:after,.topic-message .message .message-actions span:after,.topic-message .message .message-actions button:after,.topic-message .message .message-buttons a:after,.topic-message .message .message-buttons span:after,.topic-message .message .message-buttons button:after,.topic-message .message .message-karma a:after,.topic-message .message .message-karma span:after,.topic-message .message .message-karma button:after,.topic-message .message .message-submit a:after,.topic-message .message .message-submit span:after,.topic-message .message .message-submit button:after{top:7px;left:7px;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s;filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=50);opacity:0.5}.topic-message .message .message-actions a,.topic-message .message .message-actions span,.topic-message .message .message-buttons a,.topic-message .message .message-buttons span,.topic-message .message .message-karma a,.topic-message .message .message-karma span,.topic-message .message .message-submit a,.topic-message .message .message-submit span{border-bottom:1px solid #D2D5D6;text-decoration:none;color:#999;height:29px;line-height:30px;padding:0 10px}.topic-message .message .message-actions a,.topic-message .message .message-buttons a,.topic-message .message .message-karma a,.topic-message .message .message-submit a{cursor:pointer}.topic-message .message .message-actions a:hover,.topic-message .message .message-actions a:focus,.topic-message .message .message-buttons a:hover,.topic-message .message .message-buttons a:focus,.topic-message .message .message-karma a:hover,.topic-message .message .message-karma a:focus,.topic-message .message .message-submit a:hover,.topic-message .message .message-submit a:focus{border-bottom-color:#0e77a8;outline:none}.topic-message .message .message-actions a:hover:after,.topic-message .message .message-actions a:focus:after,.topic-message .message .message-buttons a:hover:after,.topic-message .message .message-buttons a:focus:after,.topic-message .message .message-karma a:hover:after,.topic-message .message .message-karma a:focus:after,.topic-message .message .message-submit a:hover:after,.topic-message .message .message-submit a:focus:after{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=100);opacity:1}.topic-message .message .message-actions a:hover,.topic-message .message .message-actions a:focus,.topic-message .message .message-buttons a:hover,.topic-message .message .message-buttons a:focus,.topic-message .message .message-karma button:hover,.topic-message .message .message-karma button:focus{color:#555;text-decoration:none}form.topic-message{margin-top:50px}.page-footer{background:#042332;height:50px;line-height:50px;border-top:3px solid #f8ad32;font-size:14px;font-size:1.4rem}.page-footer .wrapper{max-width:960px;margin:0 auto}.page-footer p{float:left;color:#EEE;text-transform:uppercase;margin:0}.page-footer ul{list-style:none;float:right;margin:0;padding:0}.page-footer ul li{display:inline-block;margin-left:25px}.page-footer ul li a{text-decoration:none;color:#EEE;text-transform:uppercase;border-bottom:1px solid transparent}.page-footer ul li a:hover,.page-footer ul li a:focus{border-bottom-color:#f8ad32}.modal{display:none}#modals .modal{position:fixed;z-index:50;width:auto !important;top:0;right:0;bottom:0;left:0;background:#EEE;min-height:220px;font-size:16px;font-size:1.6rem}#modals .modal .modal-title{display:block;border-bottom:3px solid #f8ad32;line-height:53px;height:50px;text-indent:15px;margin-bottom:20px;background:#084561;color:#FFF;font-size:1.6rem;font-size:16px;text-transform:uppercase;text-shadow:rgba(0,0,0,0.75) 0 0 3px}#modals .modal .modal-title.ico-after{text-indent:40px}#modals .modal .modal-title.ico-after:after{margin:18px 0 0 15px}#modals .modal p,#modals .modal input,#modals .modal select,#modals .modal textarea{margin:10px 15px}#modals .modal p:not([type=checkbox]):not([type=radio]),#modals .modal input:not([type=checkbox]):not([type=radio]),#modals .modal select:not([type=checkbox]):not([type=radio]),#modals .modal textarea:not([type=checkbox]):not([type=radio]){width:calc(98% - 32px) !important}#modals .modal label{margin:0 15px}#modals .modal textarea{margin-top:0}#modals .modal [type=submit],#modals .modal .btn{position:absolute;width:50%;height:50px;line-height:50px;bottom:0;right:0;margin:0 !important;padding:0 !important;text-align:center;background:none !important;border-top:1px solid #CCC;color:#333}#modals .modal .btn-submit,#modals .modal [type=submit]{height:51px;color:#084561;font-weight:bold}#modals .modal .btn-cancel{right:auto;left:0;border-right:1px solid #CCC;color:#555}.enable-mobile-menu #modals .modal{top:25px;right:25px;bottom:25px;left:25px;-moz-box-shadow:0 0 5px #000;-webkit-box-shadow:0 0 5px #000;box-shadow:0 0 5px #000}.enable-mobile-menu #modals .modal.modal-small{top:50%;bottom:auto;height:220px;margin:-110px auto 0;max-width:400px}.enable-mobile-menu #modals .modal.modal-medium{top:50%;bottom:auto;height:250px;margin:-125px auto 0;max-width:400px}.enable-mobile-menu #modals .modal.modal-medium textarea{height:80px}.enable-mobile-menu #modals-overlay{position:fixed;display:none;z-index:49;top:0;right:0;bottom:0;left:0;background:rgba(0,0,0,0.7)}.ico-after.view:after{background-position:0 -5840px}.ico-after.view.blue:after{background-position:0 -5680px}.ico-after.edit:after{background-position:0 -2240px}.ico-after.alert:after{background-position:0 -160px}.ico-after.cite:after{background-position:0 -1360px}.ico-after.tick:after{background-position:0 -5520px}.ico-after.tick.green:after{background-position:0 -5360px}.ico-after.upvote:after{background-position:0 -5280px}.ico-after.upvote.voted:after{background-position:0 -5200px}.ico-after.downvote:after{background-position:0 -5120px}.ico-after.downvote.voted:after{background-position:0 -5040px}.ico-after.lock:after{background-position:0 -3200px}.ico-after.lock.blue:after{background-position:0 -3040px}.ico-after.cross:after{background-position:0 -1760px}.ico-after.cross.blue:after{background-position:0 -1440px}.ico-after.cross.red:after{background-position:0 -1600px}.ico-after.cross.white:after{background-position:0 -1680px}.ico-after.pin:after{background-position:0 -4160px}.ico-after.pin.blue:after{background-position:0 -4000px}.ico-after.arrow-right:after{background-position:0 -800px}.ico-after.arrow-right.blue:after{background-position:0 -640px}.ico-after.star:after{background-position:0 -4960px}.ico-after.star.yellow:after{background-position:0 -4880px}.ico-after.star.blue:after{background-position:0 -4720px}.footer-container footer{color:#424242;padding:20px 0}.screen,.wide{display:none}.content-container form,#modals form{width:100%}.content-container form p,#modals form p{position:relative}.content-container fieldset,#modals fieldset{border-top:1px solid #DDD;border-bottom:3px solid #DDD;background:#EFEFEF;padding:0 4%}.content-container fieldset legend,#modals fieldset legend{padding:0 10px;border-top:1px solid #DDD;border-bottom:3px solid #DDD;background:#EFEFEF}.content-container label,#modals label{display:block;color:#555;height:30px;line-height:30px}.content-container label .asteriskField,#modals label .asteriskField{color:#C0392B;margin-left:4px}.content-container .form-error,#modals .form-error{display:block;font-size:13px;color:#C0392B}.content-container input,.content-container textarea,#modals input,#modals textarea{border:1px solid #D2D5D6}.content-container input:focus,.content-container textarea:focus,#modals input:focus,#modals textarea:focus{outline-color:#999}.content-container input.field-error,.content-container input:invalid,.content-container textarea.field-error,.content-container textarea:invalid,#modals input.field-error,#modals input:invalid,#modals textarea.field-error,#modals textarea:invalid{border-color:#C0392B}.content-container input.field-error:focus,.content-container input:invalid:focus,.content-container textarea.field-error:focus,.content-container textarea:invalid:focus,#modals input.field-error:focus,#modals input:invalid:focus,#modals textarea.field-error:focus,#modals textarea:invalid:focus{outline-color:#C0392B}.content-container input[disabled],.content-container textarea[disabled],#modals input[disabled],#modals textarea[disabled]{background:#DDD !important;color:#555}.content-container input,.content-container textarea,.content-container button,.content-container .btn,#modals input,#modals textarea,#modals button,#modals .btn{-webkit-appearance:none;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}.content-container input:not([type=submit]):not([type=reset]):not([type=radio]):not([type=checkbox]),#modals input:not([type=submit]):not([type=reset]):not([type=radio]):not([type=checkbox]){width:calc(98% - 2px);padding:0 1%}.content-container textarea,#modals textarea{width:calc(98% - 2px);padding:10px 1%;font-family:Courier, "Lucida Sans Typewriter", "Lucida Typewriter", "DejaVu Sans Mono", monospace}.content-container input,.content-container button,.content-container .btn,#modals input,#modals button,#modals .btn{display:block;height:30px}.content-container input.ico-after,.content-container button.ico-after,.content-container .btn.ico-after,#modals input.ico-after,#modals button.ico-after,#modals .btn.ico-after{padding-left:30px}.content-container input.ico-after:after,.content-container button.ico-after:after,.content-container .btn.ico-after:after,#modals input.ico-after:after,#modals button.ico-after:after,#modals .btn.ico-after:after{margin:12px 0 0 7px}.content-container input[type=submit],.content-container button,.content-container .btn,#modals input[type=submit],#modals button,#modals .btn{height:40px;line-height:40px;cursor:pointer}.content-container input[type=radio],.content-container input[type=checkbox],#modals input[type=radio],#modals input[type=checkbox]{float:left;margin-right:5px;height:15px;width:15px;border:1px solid #BBB;background:#FCFCFC}.content-container input[type=radio]:checked,.content-container input[type=checkbox]:checked,#modals input[type=radio]:checked,#modals input[type=checkbox]:checked{background:#555}.content-container [type=submit],.content-container button,.content-container .btn,#modals [type=submit],#modals button,#modals .btn{color:#DDD;padding:0 15px;border:none;float:right;text-decoration:none;margin-left:1px;outline:none}.content-container [type=submit],.content-container .btn-submit,#modals [type=submit],#modals .btn-submit{color:#FFF;background:#084561}.content-container [type=submit]:not([disabled]):hover,.content-container [type=submit]:not([disabled]):focus,.content-container .btn-submit:not([disabled]):hover,.content-container .btn-submit:not([disabled]):focus,#modals [type=submit]:not([disabled]):hover,#modals [type=submit]:not([disabled]):focus,#modals .btn-submit:not([disabled]):hover,#modals .btn-submit:not([disabled]):focus{background:#396A81}.content-container .btn-cancel,#modals .btn-cancel{background:#c0392b}.content-container .btn-cancel:not([disabled]):hover,.content-container .btn-cancel:not([disabled]):focus,#modals .btn-cancel:not([disabled]):hover,#modals .btn-cancel:not([disabled]):focus{background:#e74c3c}.content-container .btn-grey,#modals .btn-grey{background:#EEE;color:#555}.content-container .btn-grey:not([disabled]):hover,.content-container .btn-grey:not([disabled]):focus,#modals .btn-grey:not([disabled]):hover,#modals .btn-grey:not([disabled]):focus{background:#CCC;color:#333}.content-container [disabled],#modals [disabled]{cursor:default;background:#F7F7F7;color:#CCC}.content-container .form-sub-link,#modals .form-sub-link{display:block;display:inline-block;margin-top:8px}.content-container .checkbox,#modals .checkbox{padding:10px 0}.content-container .checkbox input,#modals .checkbox input{margin-top:8px}.zform-toolbar{margin:0;padding:2px;list-style-position:initial;list-style-image:none;list-style-type:none;border-bottom:none}.zform-toolbar a,.zform-toolbar button{display:block;float:left;cursor:pointer;background-color:#FFF;border-bottom:1px solid transparent;text-decoration:none;color:#999;height:27px;line-height:30px;padding:0 10px;margin-left:1px;text-indent:-9999px;width:0}.zform-toolbar a .zform-popup,.zform-toolbar button .zform-popup{text-indent:0;line-height:20px}.zform-toolbar a.ico-after,.zform-toolbar button.ico-after{padding-left:30px}.zform-toolbar a:after,.zform-toolbar button:after{top:7px;left:12px;display:none}.zform-toolbar button{padding:0 15px;height:30px;border-top:none;border-right:none;border-left:none}.zform-toolbar button[type=submit]{background:#084561;border-bottom-color:#084561;color:#DDD}.zform-toolbar button[type=submit]:hover,.zform-toolbar button[type=submit]:focus{color:#FFF;background:#396A81;border-bottom-color:#396A81}.zform-toolbar a:hover,.zform-toolbar a:focus,.zform-toolbar button:hover,.zform-toolbar button:focus{border-bottom-color:#1088bf;outline:none;background-color:#EEE}.zform-button{background-repeat:no-repeat;background-position:center center}.zform-button-bold{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAAwklEQVQoz2P4z4AfMlBLQXlC+fmS/wXvs+tT1ye8j5wfLIBhQnF95v+s/SBWxPyQ/17nMRTk1qf+TwYr8K/3++/4H0NBen38/2igAl8Bt/tu/y3mYyhIqI/8H3zfp971vMt/s/1YfBFRH/zfCyxhMt/iv9p5eQE0Bf71vv8dwQq0BdT+6/4XL0BT4FYPtBlqtMx/zf8C9WgKbOsd/uuDPSddoPKf/z2XAooCmwST9br71fbL90v2C+/n7edUoHpc4IYASlr8ehOQ9V8AAAAASUVORK5CYII=")}.zform-button-italic{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAAcUlEQVQoz2P4z4AfMlBbQXZD6oeE/5Efgg/gNCHuQeT/wAScJsQYhP/3/4DHipAJQf/dFuBR4PPA879tAE4FXgau/20+4PGF4wSX/0YL8CiweGDxXysApwIzB9P/Gv9xBpRJg+4BtQPyByQ30DguMCEAC2D/O2OrpxIAAAAASUVORK5CYII=")}.zform-button-strike{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAAn0lEQVQoz2P4z4AfMlBTQYlgwczstNTyhJmRu7EqyHuXVQ6iI8oD/2NRkJuW9j+5A8L2wGZCukvC/+j/ITN9jf8z2LtgtSJyd+j/wP8e/23PmKEqKC8t/w+D8f9t/ksguRvJBH9BCG2Upn3X6L/cGQwr3NLsy2Fsmf9idzEU2KaZ/9eHmiLyjr8cQ4FJmu47tTPy5ZJpwuW8HTSKC+wQAFs6/D/QOXeIAAAAAElFTkSuQmCC")}.zform-button-abbr{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACTUlEQVR42pWR4UtTYRTGB/0FgyBckZREI8SyElEEEyW0RJoxbaK2raYmaVMnt6ZYzpbTudqW091arqZoKYEVjWgFFRhCg77Ymt7J3d6522rh9yJ6eufHcOXOt3Nenuf8nveIRH9V10wY7dMEre4wNM7gN1G61TYtPB6aJ7g8F0cDG21J20DDrkDp5D3NngTkjlhhWmK1i6DB+vldLZvYXjsaQ5WZ6LYsVk7ER1rGA5AbPw7LeheLFaME5YPhyS2JG1zxgyp7ENX9/pJkr32jedD4cAilA6uL/xXXOWNjcjuBzPgJJy3CDu3b827rBxPM7wcgu9OPalfFtnKbIlZqJ8wxK/EVWYiv0ExmCwYjTZsatr48azEtXIM3NI/eF904brv588TYGlSTcRSZCeonBFx69BU17BoOGfjNTepmZMN6bwesC17I7wrQTMVRMERMybe867xJ5RZwxhnDgZ5VJmW0ClvJj86nr9B4P458w+vfeUZenJzn9PGsilJU2SPYx3BNqcSxYmMB8vW5OKy/ipwrjl8U15fdx+OUPYobzxKQMiFkdnLilAT5gxExxfXVUNTTjg1c/36Gmz13T0AbjbRbu+z/53VyDbxfwQqQj69B2sNtZN2j45jKkQgqzBHsvBhMnZ/ilpVZCEzPvyNbH0KWjhNT3L1062rHlICjdCZpDpalNKC4TZW3Ihh4kkCVLYqsrhVIdSsoN4Wh9XxB/e0ojnRzkKgDm5vQ3xVTXDZTu4xd7ctJXL/kQpChWxmJJrBOhesZ6iU2Q7kk/gOYnkYcn8opfQAAAABJRU5ErkJggg==")}.zform-button-key{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABe0lEQVR42pWSQW5TQQyGv/GMX1KVsmJLeggEO+ACCHEJWOQKBSo19ADZpoIFN0CIY9BK0G1DuyebIlGSzNgsXt5LiKia/tJItmR/M7894dPnLy/NbGTmgHOzAkECEsKrF8+fHaWc8+jRwwfc3dnB3W5uD8Llr0uOT76NgKNkZpydjXn65DGb6uvxCXe2twFIZsbWVgeAfr9Pp9NBRDAzZrMZe6/fkHMGwN3Z7d2nqpTfV39qQClGShUABwcDut0u+/tvGQzeMZ1OyTkjqgDUc4KUFLOrBlDQpsCtPmZtLFHap4s3gISbNRYK1QIQYyTGiLu38ap8AahUKVZWLcR/AOvxOkA1Lu2sWogxIiLM53NE5FpAPQNbbkE11UmMYMZwOMRKqfP/AVSx1oIZKWk7nKYwiBCv+QeaEt5YsDULm0hVKcWWMyCEek0imwEqXdpxd0QC309PgbBBu9Pr9ZhMJjXgx3h8+P7Dxz1uqYvz80MWV94Ddrm9LoCffwHdG70wvg5ZlgAAAABJRU5ErkJggg==")}.zform-button-sup{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABLElEQVR42mNgGDkgZMoDhdJVzy+0bH75wbfrbgPJBiTPe7wBqFHBq+1WQ8P65//JdknirIcXUuY9eoAhUV5efqC4uPhAbm7ugbS0tAPx8fEK4eHhB/z8/A64uroeAKmxr7jWEDbp3gXznEsGGAYANQcANX9ISUn5D9Q8ASQG1NwA1LzAxsZGwbroSoBT9bUFJhkXBAyTLzjoxZ9VwDAEaLMDUPP/yMjI/0DNBTCbQcC79eaB9LkP/yfPevA/bOLdDzj9CHT2hMDAwP9ubm7/gTYLkBxIQJsFQJpdXFz+GxkZTSDZAJCzgTYXWFtb/zcwMPivoKDgQLTN0AArAPE1NTUnAF3wX0JC4oOgoKABsTYfADkbqNkAaPMBoOYDQM0HuLi4DrCwsBgMzjwCAMHEeHCN9BV5AAAAAElFTkSuQmCC")}.zform-button-sub{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABLElEQVR42mNgGD6gvLz8QHFx8YHc3NwDaWlpB+Lj4xXCw8MP+Pn5HXB1dT1A0ACg5gCg5g8pKSn/gZongMSAmhuAmhfY2NgoEOUKoM0OQM3/IyMj/wM1FxBlMzoAOntCYGDgfzc3t/9AmwVINgBoswBIs4uLy38jI6MJJBsAcjbQ5gJra+v/BgYG/xUUFBxA4iFTHiiUrnp+oWXzyw++XXcbsNoMDbACEF9TU3MC0AX/JSQkPggKChokz3u8AahRwavtVkPD+uf/cdl8AORsoGYDoM0HgJoPADUf4OLiOsDCwmIAUpc46+GFlHmPHpCVVuwrrjWETbp3wTznkgHJmq2LrgQ4VV9bYJJxQcAw+YKDXvxZBZIM8G69eSB97sP/ybMe/A+bePfD4MlDAC7MeHCrEeunAAAAAElFTkSuQmCC")}.zform-button-center{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAAfElEQVR42mP8z4AfMDFQqoAFRJT//8fwBwx/g+EvMP7FsJeRgYHxPzEmMDDkZP+eAtMNhTnHpoJkiDMh9T+yzQh4iwQ3BGf/moKsF2hWziMS3OD9H9Xu31D4mRg3MPwHQ9Ns/f+a/1X+y/2X/C/yn/8/93/2bIgMI8WxCQClCFYAGIFCIgAAAABJRU5ErkJggg==")}.zform-button-right{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAAY0lEQVR42mP8z4AfMDFQqoAFRJT//8fwBwx/g+EvMP7FsJeRgYHxPzEmQEDS/99QnTB4hmgTUv8j24yAt0h0g/t/hF6Iec+JNsH7P6rdv6HwM4lu0Pr/G64bEq5/iDGBYGQBABNITB8iVnJIAAAAAElFTkSuQmCC")}.zform-button-ul{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAA1UlEQVR42mNgGBQgZ/7jgqm7Xj8A0aTqZQERmtIcBQqibPJAJsiACeXl5dlAesrfv38Z/vz5w/D792+GX79+gemfP3+C2WvXrmWkigsGCUiZ+aigc9PLByE9d8kLRCUx1gIZIRb5N5Ic4ECMi4vLBgbUFFCAIeMfP37A2bdu3UIEYkDHrYKSxY8fuFZeG6qBaJt/qSB+2r0H1nmXyAxEdZ4CAwVucEo8CgxEIyOjbGBATYGlOhCNnBpBqROYShnhBty58WUCSDOUZjh37txUIDWVLt4HAP/ViGJIIAyXAAAAAElFTkSuQmCC")}.zform-button-ol{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAA70lEQVR42mNgoAbImf9YZdHhd//JNgCkmSIDYIbA2OXl5dlA/L+kpOR/QUHB/+zs7P+pqan/ExIS/kdGRv4PDg7+j9UFiw5S6Aqywdz9b//P2vP6f8TEeypkGxLae0+ld8tL8rwQ1HVHpXPTc7jmuLi47IiIiP+BgYH/vby8/js7O/+3sbH5b2Ji8l9XV/e/mpoaqkVt65//b1zz9H/NqqcDFIjlyx7/L136+H/x4sfkuwCk2TrvEvmxANIMc4GRkVG2trb2fxUVlf9ycnL/xcXF/wsJCf3n4eH5z87O/p+Zmfk/hu0gbFd0pYPu4QcAKY588QFUIAIAAAAASUVORK5CYII=")}.zform-button-quote{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABH0lEQVR42mNgGDQgon2HEBAvBeKfQPwfD94FxCrYDNi48uCt/7///P2PD2w5eR9kyG0gZkPWzAPEf/7++/f/w7d//19++vf/2cd//5+8//f/4bt//++9+ff/9qu//++8ghheveA4yBAzZAPkcqYeAEu+AGp89uHf/8dAzQ/e/vt/F6r5+ou//68+gxjQueosyABvrAY8BWp+9A6q+fW//7deQjRfAWq++AS3AXAvgJx/H2jrndd//98Ear72/O//y0DNF56ADPgDNqB20QmQAZZYAxFkCDIAuebC479gg9ECkRNXNP6BRdncHVfhBr3//APMB4pfxhqNONLGnefvvsI0fgfiWlISVu/MbVdAGr8AcSGpqVIJiO8BcQrD8AcAGopyopBVAH0AAAAASUVORK5CYII=")}.zform-button-link{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAA6UlEQVQoz2P4z4AfMtBJgR13Vmnru3n/ax7mmOdI1Nyd97/1XVapHTdUgRGbT9fE/y/+3/1/8H/jvepDN3/c/X/k/8T/Pl1GbGAFhn7FH66+i9jm/Sf1/6T/lf9T/3v/idi24mHxB0M/iAldTd8np/tz2X/e+//c/0P/1/63/+zPNTm96btRF1iBbmb6+2klQTsdf7n9DwRCt/+Ov4J2TitJf6+bCVagqel7vff9qrfr/k//X/i/Akiu+7/qbe973+uammAFasz2Bl73U75kf8/+GR4X7pz9Kft7yhev+/YGasz0C0mKFAAASj0PpKVVf4oAAAAASUVORK5CYII=")}.zform-button-image{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAB8ElEQVQ4y6WTPWtUQRSGnzP33r33Jgu7kI1hI1GSgGARxFRG/4CFhY1VUlhI+oCNP8LfIKaz0MpCLEz+QUCwCIQVQc0X+dhsNtm5O3PGImbJboIIGaabmeec9533SAiB66wYYPnj2mtVmT8pNLPuilsDNZIYsoQ3L57OLsUAGmThyaOJ0SzLRCT6Z8WOgnddPnzZeA6cAU6spmmayfLqAR32aMk6k2M75EkTF5T9o5xvGxWGwl1iRnj5bBKvIj0JhQNjIoxAYbaYrO2Qln7QtC2cd8RpytREne+NYaqlGqoDHgAoYIxgwy6l5IDD0ybWdyicw4U2aZrStjkjuSEQesb0A0QITrG+S8dZTruWQh1eAekS1BMb4eLPmZ7R4QyQMUqrPUwgwarHOo9IiXarTLk0ThQZCHJZQghnEsrRTX5tbVPJNhkaNqTiON4fYnurTr0yRWzkcg7CRUByg/H8Pj/XVqiWfyPek3RGuTW9QDmr41X7YtHXwfreIl4Vr8odu8vcxG0UaGxu8+n4FXqkqCrweaCDEBDg8exS7yCaOeSkvUe2+ZXaw0Xmo6Qvmec+xgByRV59XsXnVWxt+oo8DpiYJdJEu5V7Yw9A5C8qnO9Lj50riCMJPUAplnfvVxpzhQ8z/zOccQSJ4S2AXHec/wAGb9qTrxXEvwAAAABJRU5ErkJggg==")}.zform-button-attention{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACJklEQVR42qVTTUiUYRB+vh93dX903bKUYMNlMWHJBC1WW8GjZVCnfpa6Fp1i6dilQwcJglgrDEKiQqhDRYWVCEsSFJ0Ksh8zKjJZ3V0WU3G/73tnpoNrFGkZzmHmMDPPPM8wA6zRtJUSuXSHISSvhLnALJ21Xc9ouTp9JQAhSblqd0VdG7viQnz0v2hlh+PBqaH272TPiF0Ylcl72/MTd1qCq2bAxNcqQgm/puswvUF46hNBIT6zqulTj9ubMw9jJGSJNXVB7Gy/sJ2TLze3qc8DW5v/yUCYb/gakzqrOXwcuoXxR1fBTgaBppMGE/f+FSAzGEuUVbdFvZv3YeFrEiKACFCc6IE/0g13bUf8w5WGxLIAmcGYj5lTnvABsMoDXOoWAbMDLo6hqvEgmPjsu0th3x8ATNzvCe1f564Ow8ndBiAoD3iWhMHKXERFTQiVWw5tUkXn1G+HNHl/R0SY39btTpu08BLO9GUwA3pZOeZzs3B7GYYhMCo7Yfj3YrS31SZLRVtO58f1xaPhAV/DcVN4DjT7HBAGIPg08h7TbyYBCCAMVRiGps+jJpZ0Kcs5DwDat7ut3UZV04MNHSmo2SdwstcXJbFARAME0A2BJjZECLqxHuX1PXjdl8DM2Mgek4n6ApHDAADT1w7T11YSpy3JLzn5uQ9oLtTtPIbCaPqcKcTp7NMTR4QYTIxfIzkEshwoywFZDshSIFuBHAIrAit6sdZvxg9QwSUHEnNo0gAAAABJRU5ErkJggg==")}.zform-button-error{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACU0lEQVR42q2T7U9SYRjG/VvgQys313pbc81WW80EBT1EICDiIdMjNlojTcdhptlWzoY0PzS11F618kumMWPlS7bUXpmWx0ohTsGK0HNAIN0V0oK51E9e2/Xt+f2ePffuJyVls+MqLxfOUWXmT1QJM6MnuWm9jvtIaphJUmV2FimEG8JuQznxhaLYn7ZGhIcciLwfR2RsGPzDLriMxXhbQLCvNFJiXXi2lOIX7ndheeYDovYHiHZaEW29hN93W7A0aoe32ohxlZh/qchcLZkzGAQx2MPd7sQy40T06gUErBbMN1YhfMWCSBONcMMZhB/dgfskidFjhzwj8gOChCAG075aM5acE/EbF200/BdNCNUZVpU7SyLccwNvJBkYlGXQCcFn6gQT7LmJaHcrAg0V+KGVrdmFChJ8Yw28lko8JdKZhIAp1Ycij3sQtVkQOG/EevEqs+GnCjDf2gyHZE8oIZgmtaHF7naE640InSvZUOArVmO+pRkD0h1JwVSRmvE31GDRSoM7rYkfXLMqCQK11XBVm2AXpSWf4CxU0IxchFB3BwJ6OfzFef/BrEIMNj8Pwc5rGJbuQn/WtuQQ32llgtc6wuMu0yF4rz0+MJ9a+hdU5oCVx2C5FHxHGyYLZSuwp1e0VbBqFybys4kx5RF+9rgawVvt+FVPw0uq8E2jhL/ODP56G6Y0uejLSuVj8Nrb+EJxmHh+9CA7nrcP36tM8Dddjvdr5Sk8y965ArPrwv8yJNsvHJSmmx3EXuZJ7m5uQLSd689JY/rEqebezC3CTf+9fwCiP9Om7nIiOAAAAABJRU5ErkJggg==")}.zform-button-question{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACZUlEQVR42r2SXUiTURjH34K6CbryIgi62FXQVezOZLM2isLSQbFljVpOvOgmRelmQUblkD7VssFGgRSrtQyKUvrYLEdI5XQfVtIWS833Zeac22xra/+e854FE7vppgN/zsPz/P7Ped7zHkEoW6mLxnXpzvqelNWwlOrQI3W+JBZTTq4RI/xtLVrrry12HkbO04vizBBQ/Az8Kolilst5roMxjF1mTpzVOzN3LEDaD/wYA+YfA5IDiN/kEh08tzQmM4xlHtk8d0Z/LmlvBvJBggaBqW7gy2WIV00IG9QIH1Qjbm8CvvUAX7s4QyzzMK8gWnRZfB8Gki+AGRsw60DG14HQ/iqaxoms/xJGddvI2EdN7MC0jbPkEU/psoJ0Wk/fGQDm3DQqQdJtKjoJctHI/ciHehE1aYAFF68xhrHkEU/WQpi1HKBLogaJR1S4z4vzD1AUXYi01NEklUD2CTV4SI3dnEnQfSCA6da9EGLNNTks+GjcNwQRmCAlB+j05wS95mJx8imvMUZmfYi11OQET4PWLnYdJ/ADkBsBUl66aS8y/lsI1ikRrFVSnpqkPXIeP0dklnk8Zq2d/YiNbxu1g5KtlUD6Tflx2t8DBRLGuQqjJKphgvYgJFsbmId5/zwFxctDqr5I+zGCYiR6PIiWYq5CfBiJgW5ET+zDqyM77jHPssdkVW2pllwXCE4j+c6NgL4Sn0zbMdmgwaRZg4+N2qzXWH13c8X6KsI3rXjKE22GG8ViBFL/FYSMauxWbNhJaWWZtpaMq1eYw0171obNuxA6qsGQQfWsZFgj/MNaVXaSQvif6zcxVDmUf47DnQAAAABJRU5ErkJggg==")}.zform-button-information,.zform-button-infoblocks{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACl0lEQVR42q2T7U9SURzH/VvgT+je3rQ2fdFcmw2StBeZNUwTtZWuwmywSjERnwAFbAiGmg+UT1CSIiRwJSPkITT15sAM2trqDWQty29H2jA35yvP9nl3v5/f+Z57TlbWUa983Qr3jCYiyVOF2VMt/mSOwpfMViyw2Qqv5ORDhntomKddFpxWhhIVvUH0OmMYZTbTGO1RCLWvQUtmE7TULjgwTKYKclsDqZbJVdj8CfRMxyAzv8eD4WUoLaswuzbQOBoBXTmRoq9P7JfkqcOc3LbF+G7Y8iYBCQndGQhhyPMRQ+4N3DYFIe4PwTS7DtnTIOgyc5wuHeZkBLnKRWm53g+r7zPqBiIQkwo3DQF8/7mdptrgQ3WPD+LHfgy8iuJC80tQRf3SjCCnzcca7TGoLSxu9QZQY/CjWu9Dn3MdJkJlN/MPnYfUCkE7vQK60MBmBCdkzNb4wifU9QXJpLeoeuQlHzPYXTsEkcaN8s45ggvXdG6YmSgoQddWRkBLnVtj3s10191JFVoPCXkQiX1D6sc2yjqcKG134ApBpHJgZJ4I+Kr/BXZWb2chf7aEKp0Xoi43rqrn8C76lQh+oUQxgxLSW9hsQ20PA7UtDPpsx14FutYmLVY6MeSKoUrDQKR0webbwO8/O+kKwQ9fUCyzEizofh5B4d1RImjfO0T6xhiHFpnj90cCMNnXUKZ0QNgyjUvyKRQ3WHCxfgJF9eNoHfGT3ztPti+P03w5Z99doISDgmMFxpRk0AfjzArEejfZ8gtcbrSiRuOA1hKCuI8BzWtIkfDBt5EqNAqogu7E+XuTUE8t4YmbJayhwxpGfp0ZFK8xQfObBIe+B/qclksJOiVUvoql+M1JiteUJBNZguQ4v4F75K/3L7zz0NlKPuwgAAAAAElFTkSuQmCC")}.zform-button-secret{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACfUlEQVR42m1Sz2sTQRh9u9nml02M2hoapaZNtaIg4q0eBMEeRPGg3jyJhyK00EvpsZBr/wqhAfHQs3fBglRa0EYTm5YYFEqKSRvTJDs7s77ZbdpYHfiYmW++7817b8bAiZHL5fqVUnNSygnGWQYYvxgrjuMszs7O/u6tN3o3S0tLN9m8nEqlRuLxOEzTBPdot9uoVqvY5iDQ4/n5+fV/ANjcz8O1TCYzZts2KpUKms2mvh2WZSGZTHp1+Xx+k7kbCwsLLb03uwBMvhwaGhoTQqBYLG41Go0010Edel0oFH5qYLIbo5Tpbp/VXTA5EY1GUSqVwKaHMzMz5R515Ww2e69cLufT6bRX+z+AQa2Zt+n19klzdU6z0zVkO/iXB+V3z92V0jh29iKe5kfXVxFwBVzpwHX8EELi1fotz9RkuIYHF1ZxdWrN8Bm4Lp4+uUs0E0Ygwvk+oIhthfUhDRKQTgPZySbzwmvZfP3+WIK+SRc6u29ghQZgGP0s7AMiCaYVcLAHuf8NdusHlHOAyMg0XLvTA0CKUPomG/WNj9R5Colrt1F5u8j+8xi+M4n61w0C1BBLnyFhCVfYvQDCk+GSamL8CszgAN1RkB2JT7sRDMNGIjOCdjPE2gOPVRfA+wcu3dWoWmvt8zpZfOCJA9VW6LRI1SWzwhfUi999uUp5PccM9EajUkLichqB6DkC2Bh9NoVRwYb9HZzOpBDc7/MZUO4JANtDVY72YIMAMSBMI60g8xqgjlatCtFsIDYcp93Kl90LoCWELr5A5FIARjDkP6HJl1CUZrcQazWosEOi0vdLG38EwCfZWp7zvfA+jjgM52jmD/M/lpT+WgNx/AHLKabZiPgg0gAAAABJRU5ErkJggg==")}.zform-button-blockcode,.zform-button-monospace{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABD0lEQVR42mNgGAVYgWHPQ36yNRt0PuD3nPf0WMSq5x9gYnGbX3/wXvz8GEgOr2b9tntCHrOfHiva9vq//9yn92DiIate3ivb/eY/SE679o4QVs16Lfciole//F649dV/v1lP76kX3JBGkpMOWPTsHkguYunz70C5CBTNug132cKXP/9YueMNUMGz36o514zRLdAsv2UMkivd9PJ/4MzHHxWSrrChKFAvvhkROv/p96xVL/579D24Jx93SRpJTtp76qN7ILmgmY++A+UisHpDMeWKkG3DnWOpi5/+d225Cw8Dr0mP7mWseP4fJCcXfVEIb0DKRFzgtyy/ecy78x48FvynPPxgU3vnGEhuNJFjAgDXGIoQBpiXVgAAAABJRU5ErkJggg==")}.zform-button-titles{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAApklEQVQoz73QsQ2EMAwFUEsUNDRUVJeKLh1VdE2qSIiGAikZxBNkAja4Cf4iLOI1uCjkdKkokSvrP9lO6KT7okeAjx4eWzhpCQ4WJp6k53GvJnjZcLUplhS/RyipwCZrAQZTDhQPNVhlORxbNjwdOgcD9zVYxJUJGmMOeu5q4MQW8NvdcVsDK6YAhWt3y80f2JhOg07PVGFAjy62ofkQaKfXU199X1/TU/Qkt2QxeAAAAABJRU5ErkJggg==")}.zform-button-title1{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAApklEQVQoz73QsQ2EMAwFUEsUNDRUVJeKLh1VdE2qSIiGAikZxBNkAja4Cf4iLOI1uCjkdKkokSvrP9lO6KT7okeAjx4eWzhpCQ4WJp6k53GvJnjZcLUplhS/RyipwCZrAQZTDhQPNVhlORxbNjwdOgcD9zVYxJUJGmMOeu5q4MQW8NvdcVsDK6YAhWt3y80f2JhOg07PVGFAjy62ofkQaKfXU199X1/TU/Qkt2QxeAAAAABJRU5ErkJggg==")}.zform-button-title2{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAAwklEQVQoz73QsQ2DMBAF0JMoaBARiiIqXER07lxZNK4sIRoKJHuCTMAETJANmOBvkAnYIBPcGsQCh5ISXfmfvs9HK50PXQLc5OAw+JU6b2GgJyXlXEO0R4PjAbs3UKwqudST+Dy4qCIYuI9A48nS1yEomxtnTQQ9d4sdzahHtUjeaYHsm+YRdGxjg0S9geKdIZXHDpZNBGE13uLXSklO/x0M6wgE7lw0oRwJaKF2A2bSUJDhm8KXCG/PWwyarzv1+fwAYArrjnYCa/AAAAAASUVORK5CYII=")}.zform-button-title3{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAAxElEQVQoz73QsanDMBSF4cPzAxdx4KkSBKxKnTpXIo0qg3GTwmBPkAk0gSbIBpngbuAJsshZ46Z4wmXK8LcfpzhQfA5fAWtZZZVlU8zbKEliGUJ4enHTsbBykX+fJFIRdl/cbnmAhbcKogxU+F5h72Y/wI3za8wpxzy8AhWut3Jmlw8wc6wLQTwVCtN3e8tmqmBkqsDLhTaYu6Ltf4lcQWKswMkfTT6xvTbhh7gqoEglyiBhU7jNipHu0ZbmiQem7139uTdX8exNUqtqywAAAABJRU5ErkJggg==")}.zform-button-title4{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAAt0lEQVQoz73QsQ2DMBAFUEsUNAiJiipXRNfRUSE3rty4oYhkBmGCmyAbZILbgAnYgAluDXIBJ6SiRNdY+k/fPpvVnI+5BESKrDOsph8Ce3b0CZob0q8hSuTdayxbXOIE/AceCTjuNoAvmOsDPKSfw+hHN3ZzqwCfYGuuDtBLSA0t3wUtLBovxZJTAkF8Ao0CKGtb2WLKp6xJwItLABlkP+Wcfa/wpE/jVtfEAVjLt/UyMnTdV5/PG1Cu8REDzPeUAAAAAElFTkSuQmCC")}.zform-button-table{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAByElEQVR42q2TzytEURTHv/Pe85sFGVPIRpHflKYRC8rCilCKspKlvZVY8H/IQhQldiyEyUSKUhKxUH7MTH7LNO/+cO6b8d4bWRCn3jvv3nfO53zvufcCfzSPes1tPUxIiVEuRakQAlwATHmuviUYeefh4EzSvNifGa7wGwogpBzr9+cV/qby5MJ5vfIWgGhW8srFLFVmVIXBJG9y0/E09/lvvGUapskzXABpUYeqR35U/S1GUMbhANSiyeZ3wj8CdDcXIO4GsCRA2WBbERaDdxho9dlzS6E79AeccfQ5lqrAJAA1EoZOwbth6LqG5VAYHg3Qkkkre6SOYtIoo6okG3HzyxJUFwzdg16/l4Ij6PEXpShwj8+vn8GYSFUgaWxQubWDCClIeCtAcyAGnRqVVl2cSQXdAKJJJY8Su5q82DiKorPBORbrhxEEKvORl2WF4/TqCTkZhquJIkHTNY+VrOzT0xSdBWD75MEGlnvT7Z1LABhL9IDkdtQVYvM4ivZaR8FyKIK+gNceKwV6cmlOD2gJtWW5uLl/R7kvC5e3r/ZdqClJt5LcJoQUrl2Qwan5s8Y4Fzlqf9XDqS+mdXnYt4fp8SW2iv+wD9RSCSl9jwFVAAAAAElFTkSuQmCC")}.zform-button-math{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1+jfqAAAAs0lEQVQoz2P4z4AfMhCpoNGh8X/d/+oz5UeLz+T/yPqfchTDhLrz+/6XnSnqye3JmJzcEzsfQ0GlQff/Cf9zHCC8sP1Y3FBQP/9/2v0EATyOTDk/+39kAR4FsQkR74Nm4VQQIxB2P/A2nnAIXe9/xrMHwjb5j6EgOMHvvMdpEMsC6Ez992gKggx83ru/cay3qTfvN7qv918L3ZveCa77HfZb7Tfdb7hfd7/mfrV+UuOCAgUAOHoB5MLjQikAAAAASUVORK5CYII=")}.zform-button-footnote{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABlUlEQVR42qWTx05CURCGeQCfwBIX+gY+i23v3qCIEevCaMB7uY0LF9SoxF5iiSshlmCjG2kixRghajQRrjuJ+T3XFRukOMkkM2dyvjP/nIxKVWSL9uWC6j82v7AE+/IqZucXGmoCSLY55PIy1je3YbHOdVUNEMwSvgoFyJ+f2NrZhVmyrVUF4AQzZFnGbShMIDIczmMIoiVTMYDhRby9vePiyg1fIIjnl1dcu71geRNEi7X8XBhOQCabhc8f+PVA8Abph0eEozEFQLqR/p4LzXBIpdMIEQmKjFA4gmgsRs4ecBdPYNG+At5k2S0JoIwcuRDHfSIJt8eDRDIFhhNhoBjQjECkiAoAJQEGmkU4EsPpmQtGRc5T9neQfRqtRMptRV4CQF5ye/2gWeF7QDu04Tq/xBOBUEY2X9EvzNAMTGYr2js6e0jaxJNvzX3kcORwYlpPdZcFGCgWupHxPRLWKXmvut/q8fiQz+UxOaVHJU0o+pqL8npelLB/cAjd6MRJTfuh1gyu6IbHXCRsqXVJG4m3lir+AKcgCFAzJG3uAAAAAElFTkSuQmCC")}div.zform-popup{top:18px;z-index:100;background:transparent;background-color:#fff;background-image:linear-gradient(to center top, #ebebe5 8%,#f9f9f6 75%);border:1px solid #CCCCCC;border-radius:3px;padding:2px}.zform-code-col{display:inline-block;vertical-align:top;margin:2px;min-width:100px}.zform-code-col>span{display:block;color:#2677C9;cursor:pointer}.zform-code-col>span[data-zform-selected='true']{color:blue;font-weight:bold}.zform-code-col>span:hover,.zform-code-col>span:focus{color:#C87B02}#zform-modal-overlay{position:fixed;top:0;left:0;width:100%;height:100%;background:#000;opacity:0.5;filter:alpha(opacity=50);display:none;z-index:99}#zform-modal-wrapper{position:fixed;top:0;left:0;width:100%;height:100%;display:none;margin-top:10%;text-align:center;z-index:100}#zform-modal-wrapper>div{display:inline-block;background:#f4f6f6;border:1px solid #555;border-radius:2px;box-shadow:0 2px 26px rgba(0,0,0,0.3),0 0 0 1px rgba(0,0,0,0.1)}#zform-modal-wrapper>div>header{background:#084561;color:#fff;font-weight:bold;line-height:27px;text-align:left;padding-left:6px;padding-right:6px;white-space:nowrap}#zform-modal-wrapper>div>footer{background:#e7ebec;text-align:right;padding-right:6px;line-height:32px;border-top:2px solid #d1d4d5}#zform-modal-wrapper>div>footer>a{cursor:pointer}#zform-modal-wrapper section{display:block;margin:8px;min-width:200px;min-height:50px}.zform-modal label{display:inline-block;width:70px;text-align:left}@media only screen and (max-width: 760px){html.dropdown-active{overflow:hidden}html.dropdown-active .page-container{width:100%}html.dropdown-active .main-container{display:none}.header-menu-dropdown{display:none !important}.dropdown{width:100%;top:180px;bottom:0;border-bottom:none}.dropdown .dropdown-list{overflow:auto;position:absolute;top:36px;bottom:50px}.dropdown .dropdown-link-all{position:absolute;left:0;right:0;bottom:0;height:50px;line-height:50px}form.forum-message .message{padding-top:0 !important}.message-actions a{width:0px}.message-bottom .message-karma a{border-bottom-width:1px !important}.message-submit{display:block !important;width:calc(100% - 16px);margin:0 8px !important}.message-submit button{float:left;display:block;width:49.5%}.message-submit button[type=submit]{float:right}}@media only screen and (max-width: 959px){body{background:#222}body:not(.swipping) .page-container,body:not(.swipping) .mobile-menu{-moz-transition-property:-moz-transform;-o-transition-property:-o-transform;-webkit-transition-property:-webkit-transform;transition-property:transform;-moz-transition-duration:0.3s;-o-transition-duration:0.3s;-webkit-transition-duration:0.3s;transition-duration:0.3s;-moz-transition-timing-function:ease;-o-transition-timing-function:ease;-webkit-transition-timing-function:ease;transition-timing-function:ease}body.swipping *{-moz-user-select:-moz-none;-ms-user-select:none;-webkit-user-select:none;user-select:none;-webkit-pointer-events:none;-moz-pointer-events:none;pointer-events:none}.js .page-container{position:absolute;z-index:10;-moz-transform:translate3d(0, 0, 0);-ms-transform:translate3d(0, 0, 0);-o-transform:translate3d(0, 0, 0);-webkit-transform:translate3d(0, 0, 0);transform:translate3d(0, 0, 0)}.js .mobile-menu{display:block;position:absolute;position:fixed;overflow-x:hidden;overflow-y:auto;z-index:1;-moz-transform:translate3d(-20%, 0, 0);-ms-transform:translate3d(-20%, 0, 0);-o-transform:translate3d(-20%, 0, 0);-webkit-transform:translate3d(-20%, 0, 0);transform:translate3d(-20%, 0, 0);width:90%;height:100%;padding-bottom:20px;background:#222;-moz-user-select:-moz-none;-ms-user-select:none;-webkit-user-select:none;user-select:none}.js .mobile-menu .search{height:50px;position:relative;top:0;left:0;width:100%}.js .mobile-menu .search input{color:#EEE;background-color:#333;width:76%;height:30px;padding:10px 5%;font-size:16px;font-size:1.6rem}.js .mobile-menu .search input:hover,.js .mobile-menu .search input:focus{padding-bottom:7px;border-bottom:3px solid #084561;background-color:#333}.js .mobile-menu .search button{display:none}.js .mobile-menu .search .search-more{background-color:#3F3F3F;width:14%;height:50px;line-height:50px;color:#CCC}.js .mobile-menu .mobile-menu-bloc,.js .mobile-menu .mobile-menu-link{width:90%;line-height:40px;text-indent:0}.js .mobile-menu .mobile-menu-bloc{margin:0 5% 15px}.js .mobile-menu .mobile-menu-bloc:nth-child(2){margin-top:15px}.js .mobile-menu .mobile-menu-bloc ul,.js .mobile-menu .mobile-menu-bloc li{margin:0;padding:0}.js .mobile-menu .mobile-menu-bloc .mobile-menu-link{margin:0;width:100%}.js .mobile-menu .mobile-menu-bloc:not(.mobile-show-ico) .ico-after:after{display:none}.js .mobile-menu .mobile-menu-bloc[data-title]:before{display:block;content:attr(data-title);height:30px;font-size:14px;font-size:1.4rem;text-transform:uppercase;padding-bottom:3px;border-bottom:2px solid #3F3F3F;font-weight:bold;color:#666}.js .mobile-menu .mobile-menu-bloc.mobile-show-ico .ico-after{padding-left:30px;width:calc(100% - 30px)}.js .mobile-menu .mobile-menu-bloc.mobile-show-ico .ico-after:after{top:12px;left:2px}.js .mobile-menu .mobile-menu-link{display:block;height:40px;text-decoration:none;color:#CCC;font-size:16px;font-size:1.6rem;text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.js .mobile-menu .mobile-menu-link.mobile-menu-sublink{width:90%;margin:0 0 0 10%}.js .mobile-menu .mobile-menu-link.mobile-menu-bloc[data-title]{height:80px}.js .mobile-menu .mobile-menu-link.mobile-menu-bloc:not([data-title]){margin-bottom:0}.js .mobile-menu .mobile-menu-link:not(:last-child):not(.mobile-menu-bloc){border-bottom:1px solid #2C2C2C}.js .mobile-menu .mobile-menu-link[data-prefix]:before{content:"[" attr(data-prefix) "] "}.js .mobile-menu .mobile-menu-link.unread{font-weight:bold;color:#EEE}.js .mobile-menu .mobile-menu-link img{float:left;margin:5px 5px 5px 0;width:30px;height:30px}.js .mobile-menu .mobile-menu-link .label{padding:0 0 0 50px}.js .mobile-menu .mobile-menu-link img+.label{padding:0 0 0 10px}.js.show-mobile-menu{width:100%}.js.show-mobile-menu body{position:fixed}.js.show-mobile-menu .page-container{height:100%;-moz-transform:translate3d(90%, 0, 0);-ms-transform:translate3d(90%, 0, 0);-o-transform:translate3d(90%, 0, 0);-webkit-transform:translate3d(90%, 0, 0);transform:translate3d(90%, 0, 0);overflow:hidden;-moz-box-shadow:0 0 7px rgba(0,0,0,0.25);-webkit-box-shadow:0 0 7px rgba(0,0,0,0.25);box-shadow:0 0 7px rgba(0,0,0,0.25)}.js.show-mobile-menu .mobile-menu{-moz-transform:translate3d(0, 0, 0);-ms-transform:translate3d(0, 0, 0);-o-transform:translate3d(0, 0, 0);-webkit-transform:translate3d(0, 0, 0);transform:translate3d(0, 0, 0)}.js.enable-mobile-menu .mobile-menu-hide{display:none}.js.enable-mobile-menu .page-container .mobile-menu-bloc,.js.enable-mobile-menu .page-container .mobile-menu-link,.js.enable-mobile-menu .page-container .search{display:none}.js.enable-mobile-menu .page-container .mobile-menu-btn+.header-logo{margin-left:0}.js.enable-mobile-menu .page-container .mobile-menu-btn{display:block;float:left;height:50px;width:50px}.js.enable-mobile-menu .page-container .mobile-menu-btn:after{display:block;content:" ";position:absolute;top:15px;left:13px;height:22px;width:22px;background-image:url('../images/[email protected]');background-repeat:no-repeat;background-position:0 -3280px}.page-container .header-logo{width:40px;height:50px;margin-left:50px;float:left}.page-container .header-logo-link{background-image:url("../images/[email protected]") !important;background-size:100%;width:100%;height:100%}.page-container .header-logo-link:after{display:block;content:attr(data-title);position:absolute;top:0;left:95px;right:155px;line-height:50px;text-indent:0;text-align:left;font-weight:normal;font-size:17px;font-size:1.7rem;text-overflow:ellipsis;white-space:nowrap;overflow:hidden;max-width:200px}.page-container .header-container .header-menu{height:30px}.page-container .header-container .header-menu .header-menu-list{padding-top:50px}.page-container .header-container .header-menu .header-menu-list>li>a{line-height:50px}.page-container .logbox{float:right;width:150px;background:none}.page-container .logbox .notifs-links{width:100%}.page-container .logbox .notifs-links .ico-link{height:50px;width:50px}.page-container .logbox .dropdown{top:50px}.page-container .logbox .dropdown.my-account-dropdown .dropdown-list{bottom:0}.page-container .logbox .dropdown.my-account-dropdown .dropdown-list li{height:45px;line-height:45px}.page-container .logbox.unlogged{font-size:13px;font-size:1.3rem}.page-container .logbox.unlogged a{background-color:rgba(255,255,255,0.1);line-height:30px;height:30px;margin:10px 0;width:74px;margin-right:1px}html:not(.enable-mobile-menu) .header-container{border-bottom:1px solid #CCC}html:not(.enable-mobile-menu) .page-container .header-logo{margin-left:10px}html:not(.enable-mobile-menu) .page-container .header-logo-link:after{left:55px;right:205px}html:not(.enable-mobile-menu) .logbox .notifs-links .ico-link,html:not(.enable-mobile-menu) .logbox .my-account{position:absolute;top:0;right:0;height:50px;width:50px}html:not(.enable-mobile-menu) .logbox .notifs-links .ico-link .avatar,html:not(.enable-mobile-menu) .logbox .my-account .avatar{height:50px;width:50px}html:not(.enable-mobile-menu) .logbox .notifs-links :nth-child(1) .ico-link{right:150px}html:not(.enable-mobile-menu) .logbox .notifs-links :nth-child(2) .ico-link{right:100px}html:not(.enable-mobile-menu) .logbox .notifs-links :nth-child(3) .ico-link,html:not(.enable-mobile-menu) .logbox .notifs-links .ico-link:nth-child(3){right:50px}html:not(.enable-mobile-menu) .logbox.unlogged{position:absolute;top:0;right:0}.main{width:100%}.main .content-container .content-col:not(:first-child),.main .sidebar{margin-top:50px}.home .main .content-container article{padding:20px 4%}.main .sidebar{width:102.5%}.main .sidebar h3,.main .sidebar h4,.main .sidebar ul li{padding-left:5.5%}.main .sidebar h3 a,.main .sidebar h4 a,.main .sidebar ul li a{white-space:normal}.content-col-2:not(:first-child),.content-col-3:not(:first-child){margin-top:50px}.header-menu-dropdown{display:none !important}.topic-list .topic{background:none !important}.main .content-container .topic-message{padding:20px 0}.main .content-container .topic-message .user{position:absolute;top:7px;z-index:10;width:100%}.main .content-container .topic-message .user .avatar-link{float:left;display:none}.main .content-container .topic-message .user .badge{float:left;height:20px;line-height:20px;font-size:12px;width:50px;margin-left:10px}.main .content-container .topic-message .user .user-metadata{float:right;width:140px;margin-right:10px}.main .content-container .topic-message .user .user-metadata a{float:left;height:20px;line-height:20px;border-bottom:none;width:68px}.main .content-container .topic-message .message{border-right:0;border-left:0;padding-top:65px}.main .content-container .topic-message .message .message-metadata{position:absolute;top:0;left:0;right:10px;z-index:15;height:30px;line-height:30px}.main .content-container .topic-message .message .message-metadata .date{float:right}.main .content-container .topic-message .message .message-actions{margin:35px 10px 0 0}.main .content-container .topic-message .message .message-actions a{text-indent:-9999px}.main .content-container .topic-message .message .message-actions a:after{left:12px}.main .content-container .topic-message .message .message-bottom{min-height:0}.main .content-container .topic-message .message .message-bottom .signature{display:none}.main .content-container .topic-message .message .message-bottom .message-karma{position:absolute;top:35px;left:10px}.main .content-container .topic-message .message .message-bottom .message-karma a{margin-right:1px;margin-left:0}.main .content-container .topic-message .message .message-bottom .message-karma .tick{text-indent:-9999px;margin-right:1px}.main .content-container .topic-message .message .message-bottom .message-karma .tick:after{left:12px}.main .content-container .topic-message .message .message-bottom .message-karma .upvote,.main .content-container .topic-message .message .message-bottom .message-karma .downvote{padding:0 7px;text-align:center;min-width:30px}.main .content-container .topic-message .message .message-bottom .message-karma .upvote:after,.main .content-container .topic-message .message .message-bottom .message-karma .downvote:after{display:none}.main .content-container .article-content p,.main .content-container .article-content ul:not(.pagination){font-size:15px;font-size:1.5rem;font-size:1.8ex}.main .content-container .content-wrapper h1,.main .content-container .content-wrapper h2,.main .content-container .content-wrapper h3,.main .content-container .content-wrapper h4,.main .content-container .content-wrapper h5,.main .content-container .content-wrapper h6,.main .content-container .content-wrapper .subtitle,.main .content-container .content-wrapper .authors,.main .content-container .content-wrapper p{padding-left:15px;padding-right:15px}.page-footer{text-align:center;height:auto}.page-footer p{border-bottom:1px solid #5b3a03}.page-footer p,.page-footer ul{display:block;float:none}.page-footer ul{line-height:30px}.page-footer ul li{margin:0 5px}}@media only screen and (min-width: 760px){.dropdown{-moz-box-shadow:0 5px 7px rgba(0,0,0,0.3);-webkit-box-shadow:0 5px 7px rgba(0,0,0,0.3);box-shadow:0 5px 7px rgba(0,0,0,0.3)}.header-right .dropdown{width:350px;left:auto;padding:0}.header-right .dropdown .dropdown-list{max-height:270px;overflow-x:hidden;overflow-y:auto}.header-right .dropdown .dropdown-list::-webkit-scrollbar{width:10px;height:10px}.header-right .dropdown .dropdown-list::-webkit-scrollbar-track{background-color:#06354a}.header-right .dropdown .dropdown-list::-webkit-scrollbar-thumb{background-color:#396a81;border:1px solid #06354a;-moz-transition:all 0.15s;-o-transition:all 0.15s;-webkit-transition:all 0.15s;transition:all 0.15s}.header-right .dropdown .dropdown-list::-webkit-scrollbar-thumb:hover{background-color:#5196b6}.header-right .dropdown .dropdown-list::-webkit-scrollbar-thumb:active{background-color:#71b4d3}.header-right .dropdown.my-account-dropdown{width:230px}}@media only screen and (min-width: 960px){html,body,.page-container{height:100%}.main-container{min-height:calc(100% - 146px)}.screen{display:inline}.wrapper{width:95%;margin:0 2.5%}.header-container{z-index:1;position:relative;-moz-box-shadow:0 0 4px rgba(0,0,0,0.3);-webkit-box-shadow:0 0 4px rgba(0,0,0,0.3);box-shadow:0 0 4px rgba(0,0,0,0.3)}.header-container header{background-image:-moz-linear-gradient(left, rgba(0,0,0,0) 20%,rgba(255,255,255,0.07) 40%,rgba(255,255,255,0.07) 60%,rgba(0,0,0,0) 80%);background-image:-o-linear-gradient(left, rgba(0,0,0,0) 20%,rgba(255,255,255,0.07) 40%,rgba(255,255,255,0.07) 60%,rgba(0,0,0,0) 80%);background-image:-webkit-linear-gradient(left, rgba(0,0,0,0) 20%,rgba(255,255,255,0.07) 40%,rgba(255,255,255,0.07) 60%,rgba(0,0,0,0) 80%);background-image:linear-gradient(to right, rgba(0,0,0,0) 20%,rgba(255,255,255,0.07) 40%,rgba(255,255,255,0.07) 60%,rgba(0,0,0,0) 80%)}.header-logo{float:left;text-align:left;width:240px}.header-container .header-menu{float:left;width:34%;margin-left:.5%}.header-container .header-menu .header-menu-list>li>a{max-width:150px;font-size:1.6rem;font-size:16px}.dropdown{top:60px}.has-dropdown{position:relative;text-indent:-7px}.has-dropdown:after{content:" ";display:block;position:absolute;top:47%;left:83%;height:0;width:0;border:6px solid transparent;border-top:6px solid rgba(255,255,255,0.7)}.has-dropdown:hover:after,.has-dropdown:focus:after,.has-dropdown.active:after{border-top:6px solid #FFF}.logbox .dropdown.my-account-dropdown ul li{height:30px;line-height:30px}.lt-ie9 .dropdown{top:90px}.header-right{float:right;width:230px}.header-right .dropdown{right:2.5%}.breadcrumb{position:relative;display:block;float:left;width:calc(100% - 230px);height:30px}.breadcrumb:after{content:" ";display:block;position:absolute;top:0;right:0;width:50px;height:100%;background-image:-moz-linear-gradient(left, rgba(231,235,236,0),rgba(231,235,236,0.75));background-image:-o-linear-gradient(left, rgba(231,235,236,0),rgba(231,235,236,0.75));background-image:-webkit-linear-gradient(left, rgba(231,235,236,0),rgba(231,235,236,0.75));background-image:linear-gradient(to right, rgba(231,235,236,0),rgba(231,235,236,0.75))}.breadcrumb ul{margin:0;padding:0;list-style:none;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.breadcrumb ul li{position:relative;display:inline-block;padding-right:30px;line-height:30px}.breadcrumb ul li a{text-decoration:none;color:#084561}.breadcrumb ul li a:hover,.breadcrumb ul li a:focus{text-decoration:underline;outline:none}.breadcrumb ul li:not(:last-child):after{display:block;position:absolute;top:0;right:7px;content:" ";height:30px;width:15px;background-image:url('../images/[email protected]');background-repeat:no-repeat;background-position:0 -320px;filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=20);opacity:0.2}.search:before{content:" ";display:block;position:absolute;left:-20px;height:30px;width:20px;background:-moz-linear-gradient(right, rgba(0,0,0,0.03),rgba(0,0,0,0));background:-o-linear-gradient(right, rgba(0,0,0,0.03),rgba(0,0,0,0));background:-webkit-linear-gradient(right, rgba(0,0,0,0.03),rgba(0,0,0,0));background:linear-gradient(to left, rgba(0,0,0,0.03),rgba(0,0,0,0))}.search form input{padding:8px 10px;height:14px;width:150px}.search form button{height:30px;line-height:30px;width:30px}.search form button:after{top:7px}.search .search-more{width:30px;height:30px;line-height:30px}body.no-sidebar .main .content-container{width:100%}body.no-sidebar .main .sidebar{display:none}.main{display:-webkit-box;display:-ms-flexbox;display:-webkit-flex;display:-moz-box;display:flex;-webkit-box-orient:horizontal;-webkit-box-direction:reverse;-moz-box-orient:horizontal;-moz-box-direction:reverse;-ms-flex-direction:row-reverse;-webkit-flex-direction:row-reverse;flex-direction:row-reverse;height:100%;margin-left:0;padding-left:2.5%}.main .content-container{width:80%;margin-right:0}.main .content-container h1,.main .content-container h2{margin-left:1px}.main .content-container .content-col-2{width:49.5%;margin:0 0 0 1%}.main .content-container .content-col-3{width:32%;margin:0 0 0 2%}.main .content-container .content-col-2,.main .content-container .content-col-3{float:left}.main .content-container .content-col-2:first-child,.main .content-container .content-col-3:first-child{margin:0}.main .sidebar{width:22.5%;border-bottom:none}.main .sidebar h3,.main .sidebar h4,.main .sidebar ul li{padding-left:11.5%}.main .sidebar h3:first-child{margin-top:31px}.main .sidebar h4[data-num]{padding-left:calc(11% + 25px)}.main .sidebar h4[data-num]:before{left:11%}.main .sidebar.sommaire ul li.current ul{margin-left:calc(-11% - 10px);width:calc(111% + 10px);background:-moz-linear-gradient(top, rgba(0,0,0,0.07),rgba(0,0,0,0) 3px);background:-o-linear-gradient(top, rgba(0,0,0,0.07),rgba(0,0,0,0) 3px);background:-webkit-linear-gradient(top, rgba(0,0,0,0.07),rgba(0,0,0,0) 3px);background:linear-gradient(to bottom, rgba(0,0,0,0.07),rgba(0,0,0,0) 3px)}.main .sidebar.sommaire ul li.current ul a{padding-left:calc(11% + 30px)}.content-cols .main .content-container{width:79%;margin-left:1.5%}.home .main .sidebar{margin-top:30px;border-top:1px solid #FFF}.home .main .sidebar h3:first-child{margin-top:0}.full-content-wrapper .tutorial-list article{width:46%;float:left}.topic-list .topic .topic-description{background-size:0 0}.topic-list .topic .topic-description[style]:before{display:block;position:absolute;content:" ";right:0;background-image:inherit;background-repeat:no-repeat;background-position:top right;background-size:80px 80px;height:100%;width:80px;margin-top:-5px;-webkit-mask-box-image:-webkit-linear-gradient(right, #000, transparent);filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=20);opacity:0.2}.topic-message{margin:0 0 25px}.topic-message .user:after,.topic-message .message:after{content:" ";display:block;position:absolute;top:10px;height:0;width:0;border:20px solid transparent;border-left:0}.topic-message .user{position:absolute;padding-top:60px;top:0;left:0}.topic-message .user:after{left:60px;border-right-color:#D2D4D6}.topic-message .message{margin-left:80px}.topic-message .message:after{top:9px;left:-19px;border-right-color:#FDFDFD}.pagination{border:1px solid #d2d5d6}.content-wrapper{margin:0 0 0 1.5%}.full-content-wrapper{margin:0 0 0 2%}.enable-mobile-menu #modals .modal{-moz-box-shadow:0 2px 7px rgba(0,0,0,0.7);-webkit-box-shadow:0 2px 7px rgba(0,0,0,0.7);box-shadow:0 2px 7px rgba(0,0,0,0.7)}.enable-mobile-menu #modals .modal .modal-title{line-height:50px}.enable-mobile-menu #modals .modal [type=submit]:hover,.enable-mobile-menu #modals .modal [type=submit]:focus,.enable-mobile-menu #modals .modal .btn:hover,.enable-mobile-menu #modals .modal .btn:focus{color:#EEE !important;background:#084561 !important}}@media only screen and (min-width: 1140px){.wide{display:inline}table .wide{display:table-cell}.header-container .header-menu{width:40%;margin-left:5%}.full-content-wrapper .tutorial-list article{width:29.3%}}.header-logo-link{background-size:100%;background-image:url("../images/[email protected]")}.ico,.ico-after:after,.breadcrumb ul li:not(:last-child):after{background-size:40px 3000px !important;background-image:url('../images/[email protected]') !important}.js.enable-mobile-menu .page-container .mobile-menu-btn:after{background-position:0 -1640px}.logbox .notifs-links .ico-link .notif-text.ico-messages{background-position:0 -1680px}.logbox .notifs-links .ico-link .notif-text.ico-notifs{background-position:0 -1960px}.logbox .notifs-links .ico-link .notif-text.ico-alerts{background-position:0 -120px}.logbox .notifs-links .ico-link .notif-text.ico-gear{background-position:0 -1200px}.breadcrumb ul li:not(:last-child):after{background-position:0 -160px}.search form button:after{background-position:0 -2320px}.main .content-container h2.ico-articles:after{background-position:0 -440px}.main .content-container h2.ico-tutorials:after{background-position:0 -2800px}.main .content-container .article-content .information.ico-after:after,.main .content-container .message-content .information.ico-after:after{background-position:0 -1480px}.main .content-container .article-content .question.ico-after:after,.main .content-container .message-content .question.ico-after:after{background-position:0 -2120px}.main .content-container .article-content .error.ico-after:after,.main .content-container .message-content .error.ico-after:after{background-position:0 -1160px}.main .content-container .article-content .warning.ico-after:after,.main .content-container .message-content .warning.ico-after:after{background-position:0 -2960px}.ico-after.online:after,.ico-after.view:after{background-position:0 -2920px}.ico-after.online.blue:after,.ico-after.view.blue:after{background-position:0 -2840px}.ico-after.online.light:after,.ico-after.view.light:after{background-position:0 -2880px}.ico-after.edit:after{background-position:0 -1120px}.ico-after.edit.blue:after{background-position:0 -1040px}.ico-after.edit.light:after{background-position:0 -1080px}.ico-after.alert:after{background-position:0 -80px}.ico-after.alert.blue:after{background-position:0 0}.ico-after.alert.light:after{background-position:0 -40px}.ico-after.cite:after{background-position:0 -680px}.ico-after.cite.blue:after{background-position:0 -600px}.ico-after.cite.light:after{background-position:0 -640px}.ico-after.tick:after{background-position:0 -2760px}.ico-after.tick.green:after{background-position:0 -2680px}.ico-after.tick.light:after{background-position:0 -2720px}.ico-after.upvote:after{background-position:0 -2640px}.ico-after.upvote.voted:after{background-position:0 -2600px}.ico-after.downvote:after{background-position:0 -2560px}.ico-after.downvote.voted:after{background-position:0 -2520px}.ico-after.lock:after{background-position:0 -1600px}.ico-after.lock.blue:after{background-position:0 -1520px}.ico-after.lock.light:after{background-position:0 -1560px}.ico-after.more:after{background-position:0 -1800px}.ico-after.more.blue:after{background-position:0 -1720px}.ico-after.more.light:after{background-position:0 -1760px}.ico-after.cross:after{background-position:0 -880px}.ico-after.cross.blue:after{background-position:0 -720px}.ico-after.cross.red:after{background-position:0 -800px}.ico-after.cross.light:after{background-position:0 -760px}.ico-after.cross.white:after{background-position:0 -840px}.ico-after.pin:after{background-position:0 -2080px}.ico-after.pin.blue:after{background-position:0 -2000px}.ico-after.pin.light:after{background-position:0 -2040px}.ico-after.beta:after{background-position:0 -560px}.ico-after.beta.blue:after{background-position:0 -480px}.ico-after.beta.light:after{background-position:0 -520px}.ico-after.offline:after,.ico-after.arrow-right:after{background-position:0 -400px}.ico-after.offline.blue:after,.ico-after.arrow-right.blue:after{background-position:0 -320px}.ico-after.offline.light:after,.ico-after.arrow-right.light:after{background-position:0 -360px}.ico-after.arrow-left:after{background-position:0 -280px}.ico-after.arrow-left.blue:after{background-position:0 -200px}.ico-after.arrow-left.light:after{background-position:0 -240px}.ico-after.move:after{background-position:0 -1920px}.ico-after.move.blue:after{background-position:0 -1840px}.ico-after.move.light:after{background-position:0 -1880px}.ico-after.star:after{background-position:0 -2480px}.ico-after.star.yellow:after{background-position:0 -2440px}.ico-after.star.blue:after{background-position:0 -2360px}.ico-after.star.light:after{background-position:0 -2400px}.ico-after.download:after{background-position:0 -1000px}.ico-after.download.blue:after{background-position:0 -920px}.ico-after.download.light:after{background-position:0 -960px}.ico-after.import:after{background-position:0 -1440px}.ico-after.import.blue:after{background-position:0 -1360px}.ico-after.import.light:after{background-position:0 -1400px}.ico-after.history:after{background-position:0 -1320px}.ico-after.history.blue:after{background-position:0 -1240px}.ico-after.history.light:after{background-position:0 -1280px}.ico-after.rss:after{background-position:0 -2280px}.ico-after.rss.blue:after{background-position:0 -2160px}.ico-after.rss.orange:after{background-position:0 -2240px}.ico-after.rss.light:after{background-position:0 -2200px}.codehilite .hll{background-color:#ffc}.codehilite{background:#f8f8f8}.codehilite .c{color:#408080;font-style:italic}.codehilite .err{border:1px solid red}.codehilite .k{color:#008000;font-weight:bold}.codehilite .o{color:#666}.codehilite .cm{color:#408080;font-style:italic}.codehilite .cp{color:#bc7a00}.codehilite .c1{color:#408080;font-style:italic}.codehilite .cs{color:#408080;font-style:italic}.codehilite .gd{color:#a00000}.codehilite .ge{font-style:italic}.codehilite .gr{color:red}.codehilite .gh{color:#000080;font-weight:bold}.codehilite .gi{color:#00a000}.codehilite .go{color:gray}.codehilite .gp{color:#000080;font-weight:bold}.codehilite .gs{font-weight:bold}.codehilite .gu{color:#800080;font-weight:bold}.codehilite .gt{color:#0040d0}.codehilite .kc{color:#008000;font-weight:bold}.codehilite .kd{color:#008000;font-weight:bold}.codehilite .kn{color:#008000;font-weight:bold}.codehilite .kp{color:green}.codehilite .kr{color:#008000;font-weight:bold}.codehilite .kt{color:#b00040}.codehilite .m{color:#666}.codehilite .s{color:#ba2121}.codehilite .na{color:#7d9029}.codehilite .nb{color:green}.codehilite .nc{color:#0000FF;font-weight:bold}.codehilite .no{color:#800}.codehilite .nd{color:#a2f}.codehilite .ni{color:#999999;font-weight:bold}.codehilite .ne{color:#D2413A;font-weight:bold}.codehilite .nf{color:blue}.codehilite .nl{color:#a0a000}.codehilite .nn{color:#0000FF;font-weight:bold}.codehilite .nt{color:#008000;font-weight:bold}.codehilite .nv{color:#19177c}.codehilite .ow{color:#AA22FF;font-weight:bold}.codehilite .w{color:#bbb}.codehilite .mf{color:#666}.codehilite .mh{color:#666}.codehilite .mi{color:#666}.codehilite .mo{color:#666}.codehilite .sb{color:#ba2121}.codehilite .sc{color:#ba2121}.codehilite .sd{color:#BA2121;font-style:italic}.codehilite .s2{color:#ba2121}.codehilite .se{color:#BB6622;font-weight:bold}.codehilite .sh{color:#ba2121}.codehilite .si{color:#BB6688;font-weight:bold}.codehilite .sx{color:green}.codehilite .sr{color:#b68}.codehilite .s1{color:#ba2121}.codehilite .ss{color:#19177c}.codehilite .bp{color:green}.codehilite .vc{color:#19177c}.codehilite .vg{color:#19177c}.codehilite .vi{color:#19177c}.codehilite .il{color:#666}.codehilitetable{width:100% !important;table-layout:fixed;border-color:rgba(0,0,0,0.15)}.codehilitetable td{padding:0}.codehilitetable .linenos{background-color:#fbfbfc;border-right:1px solid #ececf0;width:46px}.codehilitetable .codehilite,.codehilitetable .linenos{padding-top:15px;padding-bottom:15px}.codehilitetable .linenodiv pre{text-align:right;padding-right:emCalc(6px);color:#bebec5}.codehilitetable .codehilite pre{padding-left:emCalc(6px)}.codehilitetable .codehilite{width:100%;height:auto;overflow:auto}.codehilitetable .codehilite pre{white-space:pre;overflow:auto;overflow:auto}@media print{*{background:transparent !important;color:#000 !important;box-shadow:none !important;text-shadow:none !important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:0.5cm}p,h2,h3{orphans:3;widows:3}h2,h3{page-break-after:avoid}.dropdown{display:none !important}}
diff --git a/assets/css/scss/_all-supports.scss b/assets/css/scss/_all-supports.scss
index cab7f78d3f..0fd0bd4083 100644
--- a/assets/css/scss/_all-supports.scss
+++ b/assets/css/scss/_all-supports.scss
@@ -1859,6 +1859,42 @@ table {
}
}
+ .markdown-help {
+ .open-markdown-help {
+ display: block;
+ position: absolute;
+ bottom: 0;
+ left: 8px;
+
+ .close-markdown-help-text {
+ display: none;
+ }
+ }
+
+ .markdown-help-more {
+ display: none;
+ background: #EEE;
+ padding: 15px;
+ margin-bottom: 5px;
+
+ pre {
+ margin: 0;
+ }
+
+ &.show-markdown-help {
+ display: block;
+ }
+ }
+ .show-markdown-help + .open-markdown-help {
+ .close-markdown-help-text {
+ display: inline;
+ }
+ .open-markdown-help-text {
+ display: none;
+ }
+ }
+ }
+
.message-bottom {
@include display(flex);
@include align-items(flex-start);
diff --git a/assets/js/custom/find-solved-topics.js b/assets/js/custom/find-solved-topics.js
new file mode 100644
index 0000000000..b19ef9d605
--- /dev/null
+++ b/assets/js/custom/find-solved-topics.js
@@ -0,0 +1,11 @@
+/* ===== Zeste de Savoir ====================================================
+ Author: Alex-D / Alexandre Demode
+ ---------------------------------
+ Search for solved topics when create a new topic
+ ========================================================================== */
+
+var $solvedTopicsElem = $('main [data-solved-topics-url]');
+if($solvedTopicsElem.length > 0){
+ var solvedTopicsUrl = $solvedTopicsElem.attr('data-solved-topics-url');
+ // TODO : back-end HS, impossible de dev ça pour l'instant
+}
\ No newline at end of file
diff --git a/assets/js/custom/karma-ajax.js b/assets/js/custom/karma-ajax.js
new file mode 100644
index 0000000000..86b172ee36
--- /dev/null
+++ b/assets/js/custom/karma-ajax.js
@@ -0,0 +1,28 @@
+/* ===== Zeste de Savoir ====================================================
+ Author: Alex-D / Alexandre Demode
+ ---------------------------------
+ Manage karma AJAX requests (+1/-1 on messages)
+ ========================================================================== */
+
+$('.upvote, .downvote').click(function(e){
+ var $thumb = $(this),
+ $karma = $thumb.parents('.message-karma:first'),
+ $otherThumb = $thumb.hasClass('downvote')
+ ? $karma.children('.upvote')
+ : $karma.children('.downvote');
+
+ $.ajax({
+ url: $thumb.attr('href'),
+ type: 'GET', // TODO : use POST method (CSRF in GET)
+ dataType: 'json',
+ success: function(data){
+ $karma.children('.upvote').text("+" + data.upvotes);
+ $karma.children('.downvote').text("-" + data.downvotes);
+ $thumb.toggleClass('voted');
+ $otherThumb.removeClass('voted');
+ }
+ });
+
+ e.stopPropagation();
+ e.preventDefault();
+});
\ No newline at end of file
diff --git a/assets/js/custom/markdown-help.js b/assets/js/custom/markdown-help.js
new file mode 100644
index 0000000000..11d3063cd6
--- /dev/null
+++ b/assets/js/custom/markdown-help.js
@@ -0,0 +1,25 @@
+/* ===== Zeste de Savoir ====================================================
+ Author: Alex-D / Alexandre Demode
+ ---------------------------------
+ Ugly markdown help block management
+ TEMP : Add this to the future awesome Markdown editor directly
+ ========================================================================== */
+
+$('.md-editor').each(function(){
+ var $help = $('<div/>', {
+ 'class': 'markdown-help',
+ 'html': '<div class="markdown-help-more">' +
+ '<pre><code>**gras** \n*italique* \n \n> citation \n+ liste a puces </code></pre>' +
+ '<a href="URL_A_METTRE">Voir la documentation complète</a></div>' +
+ '<a href="#open-markdown-help" class="open-markdown-help btn btn-grey ico-after view">'+
+ '<span class="close-markdown-help-text">Masquer</span>' +
+ '<span class="open-markdown-help-text">Afficher</span> l\'aide Markdown' +
+ '</a>'
+ });
+ $(this).after($help);
+ $('.open-markdown-help, .close-markdown-help', $help).click(function(e){
+ $('.markdown-help-more', $help).toggleClass('show-markdown-help');
+ e.preventDefault();
+ e.stopPropagation();
+ });
+});
\ No newline at end of file
diff --git a/templates/forum/topic/new.html b/templates/forum/topic/new.html
index bf3e8540c0..49efc26a3c 100644
--- a/templates/forum/topic/new.html
+++ b/templates/forum/topic/new.html
@@ -25,7 +25,9 @@
{% block content %}
- {% crispy form %}
+ <div data-solved-topics-url="{% url "zds.forum.views.complete_topic" %}">
+ {% crispy form %}
+ </div>
{% if text %}
{% include "misc/previsualization.part.html" %}
diff --git a/templates/tutorial/tutorial/view.html b/templates/tutorial/tutorial/view.html
index 8a1969da42..66c915d754 100644
--- a/templates/tutorial/tutorial/view.html
+++ b/templates/tutorial/tutorial/view.html
@@ -173,18 +173,37 @@ <h3>Validation</h3>
{% endif %}
{% if tutorial.in_validation %}
- <li>
- <a href="#valid-publish" class="open-modal ico-after tick green">Valider et publier</a>
- <div class="modal modal-small" id="valid-publish">
- {% crispy formValid %}
- </div>
- </li>
- <li>
- <a href="#reject" class="open-modal ico-after cross red">Rejeter</a>
- <div class="modal modal-small" id="reject">
- {% crispy formReject %}
- </div>
- </li>
+ {% if validation.is_pending %}
+ <li>
+ <a href="{% url "zds.tutorial.views.reservation" validation.pk %}" class="ico-after lock blue">Réserver</a>
+ </li>
+ {% elif validation.is_pending_valid %}
+ {% if validation.validator = user %}
+ <li>
+ <a href="{% url "zds.tutorial.views.reservation" validation.pk %}" class="open-modal ico-after lock blue">
+ Se retirer
+ </a>
+ </li>
+ <li>
+ <a href="#valid-publish" class="open-modal ico-after tick green">Valider et publier</a>
+ <div class="modal modal-small" id="valid-publish">
+ {% crispy formValid %}
+ </div>
+ </li>
+ <li>
+ <a href="#reject" class="open-modal ico-after cross red">Rejeter</a>
+ <div class="modal modal-small" id="reject">
+ {% crispy formReject %}
+ </div>
+ </li>
+ {% else %}
+ <li>
+ <a href="{% url "zds.tutorial.views.reservation" validation.pk %}" class="open-modal ico-after lock blue">
+ Réservé par {{ validation.validator.username }}, le retirer
+ </a>
+ </li>
+ {% endif %}
+ {% endif %}
{% endif %}
</ul>
</div>
diff --git a/zds/settings.py b/zds/settings.py
index a8c154c47e..98798cc7ff 100644
--- a/zds/settings.py
+++ b/zds/settings.py
@@ -129,6 +129,8 @@
'js/custom/keyboard-navigation.js',
'js/custom/message-hidden.js',
'js/custom/spoiler.js',
+ 'js/custom/karma-ajax.js',
+ 'js/custom/markdown-help.js',
),
'output_filename': 'js/main.js'
}
|
geopandas__geopandas-2172 | Empty overlay intersection causes error
The new implementation of ``overlay`` generates an error if the intersection is empty. Here is a reproducible code:
```python
import geopandas as gpd
from geopandas.tools import overlay
from shapely.geometry import Polygon
polys1 = gpd.GeoSeries([Polygon([(0,0), (2,0), (2,2), (0,2)]),Polygon([(2,2), (4,2), (4,4), (2,4)])])
polys2 = gpd.GeoSeries([Polygon([(-1,-1), (-3,-1), (-3,-3), (-1,-3)]),Polygon([(-3,-3), (-5,-3), (-5,-5), (-3,-5)])])
df1 = gpd.GeoDataFrame({'geometry': polys1, 'df1':[1,2]}, crs={'init': 'epsg:4326', 'no_defs': True})
df2 = gpd.GeoDataFrame({'geometry': polys2, 'df2':[1,2]}, crs={'init': 'epsg:4326', 'no_defs': True})
gpd.tools.overlay(df1, df2, 'intersection')
```
I will try to provide a PR tonight.
| [
{
"content": "import warnings\nfrom functools import reduce\n\nimport numpy as np\nimport pandas as pd\n\nfrom geopandas import GeoDataFrame, GeoSeries\nfrom geopandas.array import _check_crs, _crs_mismatch_warn\n\n\ndef _ensure_geometry_column(df):\n \"\"\"\n Helper function to ensure the geometry column is called 'geometry'.\n If another column with that name exists, it will be dropped.\n \"\"\"\n if not df._geometry_column_name == \"geometry\":\n if \"geometry\" in df.columns:\n df.drop(\"geometry\", axis=1, inplace=True)\n df.rename(\n columns={df._geometry_column_name: \"geometry\"}, copy=False, inplace=True\n )\n df.set_geometry(\"geometry\", inplace=True)\n\n\ndef _overlay_intersection(df1, df2):\n \"\"\"\n Overlay Intersection operation used in overlay function\n \"\"\"\n # Spatial Index to create intersections\n idx1, idx2 = df2.sindex.query_bulk(df1.geometry, predicate=\"intersects\", sort=True)\n # Create pairs of geometries in both dataframes to be intersected\n if idx1.size > 0 and idx2.size > 0:\n left = df1.geometry.take(idx1)\n left.reset_index(drop=True, inplace=True)\n right = df2.geometry.take(idx2)\n right.reset_index(drop=True, inplace=True)\n intersections = left.intersection(right)\n poly_ix = intersections.type.isin([\"Polygon\", \"MultiPolygon\"])\n intersections.loc[poly_ix] = intersections[poly_ix].buffer(0)\n\n # only keep actual intersecting geometries\n pairs_intersect = pd.DataFrame({\"__idx1\": idx1, \"__idx2\": idx2})\n geom_intersect = intersections\n\n # merge data for intersecting geometries\n df1 = df1.reset_index(drop=True)\n df2 = df2.reset_index(drop=True)\n dfinter = pairs_intersect.merge(\n df1.drop(df1._geometry_column_name, axis=1),\n left_on=\"__idx1\",\n right_index=True,\n )\n dfinter = dfinter.merge(\n df2.drop(df2._geometry_column_name, axis=1),\n left_on=\"__idx2\",\n right_index=True,\n suffixes=(\"_1\", \"_2\"),\n )\n\n return GeoDataFrame(dfinter, geometry=geom_intersect, crs=df1.crs)\n else:\n result = df1.iloc[:0].merge(\n df2.iloc[:0].drop(df2.geometry.name, axis=1),\n left_index=True,\n right_index=True,\n suffixes=(\"_1\", \"_2\"),\n )\n return result[\n result.columns.drop(df1.geometry.name).tolist() + [df1.geometry.name]\n ]\n\n\ndef _overlay_difference(df1, df2):\n \"\"\"\n Overlay Difference operation used in overlay function\n \"\"\"\n # spatial index query to find intersections\n idx1, idx2 = df2.sindex.query_bulk(df1.geometry, predicate=\"intersects\", sort=True)\n idx1_unique, idx1_unique_indices = np.unique(idx1, return_index=True)\n idx2_split = np.split(idx2, idx1_unique_indices[1:])\n sidx = [\n idx2_split.pop(0) if idx in idx1_unique else []\n for idx in range(df1.geometry.size)\n ]\n # Create differences\n new_g = []\n for geom, neighbours in zip(df1.geometry, sidx):\n new = reduce(\n lambda x, y: x.difference(y), [geom] + list(df2.geometry.iloc[neighbours])\n )\n new_g.append(new)\n differences = GeoSeries(new_g, index=df1.index, crs=df1.crs)\n poly_ix = differences.type.isin([\"Polygon\", \"MultiPolygon\"])\n differences.loc[poly_ix] = differences[poly_ix].buffer(0)\n geom_diff = differences[~differences.is_empty].copy()\n dfdiff = df1[~differences.is_empty].copy()\n dfdiff[dfdiff._geometry_column_name] = geom_diff\n return dfdiff\n\n\ndef _overlay_symmetric_diff(df1, df2):\n \"\"\"\n Overlay Symmetric Difference operation used in overlay function\n \"\"\"\n dfdiff1 = _overlay_difference(df1, df2)\n dfdiff2 = _overlay_difference(df2, df1)\n dfdiff1[\"__idx1\"] = range(len(dfdiff1))\n dfdiff2[\"__idx2\"] = range(len(dfdiff2))\n dfdiff1[\"__idx2\"] = np.nan\n dfdiff2[\"__idx1\"] = np.nan\n # ensure geometry name (otherwise merge goes wrong)\n _ensure_geometry_column(dfdiff1)\n _ensure_geometry_column(dfdiff2)\n # combine both 'difference' dataframes\n dfsym = dfdiff1.merge(\n dfdiff2, on=[\"__idx1\", \"__idx2\"], how=\"outer\", suffixes=(\"_1\", \"_2\")\n )\n geometry = dfsym.geometry_1.copy()\n geometry.name = \"geometry\"\n # https://github.com/pandas-dev/pandas/issues/26468 use loc for now\n geometry.loc[dfsym.geometry_1.isnull()] = dfsym.loc[\n dfsym.geometry_1.isnull(), \"geometry_2\"\n ]\n dfsym.drop([\"geometry_1\", \"geometry_2\"], axis=1, inplace=True)\n dfsym.reset_index(drop=True, inplace=True)\n dfsym = GeoDataFrame(dfsym, geometry=geometry, crs=df1.crs)\n return dfsym\n\n\ndef _overlay_union(df1, df2):\n \"\"\"\n Overlay Union operation used in overlay function\n \"\"\"\n dfinter = _overlay_intersection(df1, df2)\n dfsym = _overlay_symmetric_diff(df1, df2)\n dfunion = pd.concat([dfinter, dfsym], ignore_index=True, sort=False)\n # keep geometry column last\n columns = list(dfunion.columns)\n columns.remove(\"geometry\")\n columns = columns + [\"geometry\"]\n return dfunion.reindex(columns=columns)\n\n\ndef overlay(df1, df2, how=\"intersection\", keep_geom_type=None, make_valid=True):\n \"\"\"Perform spatial overlay between two GeoDataFrames.\n\n Currently only supports data GeoDataFrames with uniform geometry types,\n i.e. containing only (Multi)Polygons, or only (Multi)Points, or a\n combination of (Multi)LineString and LinearRing shapes.\n Implements several methods that are all effectively subsets of the union.\n\n See the User Guide page :doc:`../../user_guide/set_operations` for details.\n\n Parameters\n ----------\n df1 : GeoDataFrame\n df2 : GeoDataFrame\n how : string\n Method of spatial overlay: 'intersection', 'union',\n 'identity', 'symmetric_difference' or 'difference'.\n keep_geom_type : bool\n If True, return only geometries of the same geometry type as df1 has,\n if False, return all resulting geometries. Default is None,\n which will set keep_geom_type to True but warn upon dropping\n geometries.\n make_valid : bool, default True\n If True, any invalid input geometries are corrected with a call to `buffer(0)`,\n if False, a `ValueError` is raised if any input geometries are invalid.\n\n Returns\n -------\n df : GeoDataFrame\n GeoDataFrame with new set of polygons and attributes\n resulting from the overlay\n\n Examples\n --------\n >>> from shapely.geometry import Polygon\n >>> polys1 = geopandas.GeoSeries([Polygon([(0,0), (2,0), (2,2), (0,2)]),\n ... Polygon([(2,2), (4,2), (4,4), (2,4)])])\n >>> polys2 = geopandas.GeoSeries([Polygon([(1,1), (3,1), (3,3), (1,3)]),\n ... Polygon([(3,3), (5,3), (5,5), (3,5)])])\n >>> df1 = geopandas.GeoDataFrame({'geometry': polys1, 'df1_data':[1,2]})\n >>> df2 = geopandas.GeoDataFrame({'geometry': polys2, 'df2_data':[1,2]})\n\n >>> geopandas.overlay(df1, df2, how='union')\n df1_data df2_data geometry\n 0 1.0 1.0 POLYGON ((2.00000 2.00000, 2.00000 1.00000, 1....\n 1 2.0 1.0 POLYGON ((2.00000 2.00000, 2.00000 3.00000, 3....\n 2 2.0 2.0 POLYGON ((4.00000 4.00000, 4.00000 3.00000, 3....\n 3 1.0 NaN POLYGON ((2.00000 0.00000, 0.00000 0.00000, 0....\n 4 2.0 NaN MULTIPOLYGON (((3.00000 3.00000, 4.00000 3.000...\n 5 NaN 1.0 MULTIPOLYGON (((2.00000 2.00000, 3.00000 2.000...\n 6 NaN 2.0 POLYGON ((3.00000 5.00000, 5.00000 5.00000, 5....\n\n >>> geopandas.overlay(df1, df2, how='intersection')\n df1_data df2_data geometry\n 0 1 1 POLYGON ((2.00000 2.00000, 2.00000 1.00000, 1....\n 1 2 1 POLYGON ((2.00000 2.00000, 2.00000 3.00000, 3....\n 2 2 2 POLYGON ((4.00000 4.00000, 4.00000 3.00000, 3....\n\n >>> geopandas.overlay(df1, df2, how='symmetric_difference')\n df1_data df2_data geometry\n 0 1.0 NaN POLYGON ((2.00000 0.00000, 0.00000 0.00000, 0....\n 1 2.0 NaN MULTIPOLYGON (((3.00000 3.00000, 4.00000 3.000...\n 2 NaN 1.0 MULTIPOLYGON (((2.00000 2.00000, 3.00000 2.000...\n 3 NaN 2.0 POLYGON ((3.00000 5.00000, 5.00000 5.00000, 5....\n\n >>> geopandas.overlay(df1, df2, how='difference')\n geometry df1_data\n 0 POLYGON ((2.00000 0.00000, 0.00000 0.00000, 0.... 1\n 1 MULTIPOLYGON (((3.00000 3.00000, 4.00000 3.000... 2\n\n >>> geopandas.overlay(df1, df2, how='identity')\n df1_data df2_data geometry\n 0 1.0 1.0 POLYGON ((2.00000 2.00000, 2.00000 1.00000, 1....\n 1 2.0 1.0 POLYGON ((2.00000 2.00000, 2.00000 3.00000, 3....\n 2 2.0 2.0 POLYGON ((4.00000 4.00000, 4.00000 3.00000, 3....\n 3 1.0 NaN POLYGON ((2.00000 0.00000, 0.00000 0.00000, 0....\n 4 2.0 NaN MULTIPOLYGON (((3.00000 3.00000, 4.00000 3.000...\n\n See also\n --------\n sjoin : spatial join\n GeoDataFrame.overlay : equivalent method\n\n Notes\n ------\n Every operation in GeoPandas is planar, i.e. the potential third\n dimension is not taken into account.\n \"\"\"\n # Allowed operations\n allowed_hows = [\n \"intersection\",\n \"union\",\n \"identity\",\n \"symmetric_difference\",\n \"difference\", # aka erase\n ]\n # Error Messages\n if how not in allowed_hows:\n raise ValueError(\n \"`how` was '{0}' but is expected to be in {1}\".format(how, allowed_hows)\n )\n\n if isinstance(df1, GeoSeries) or isinstance(df2, GeoSeries):\n raise NotImplementedError(\n \"overlay currently only implemented for \" \"GeoDataFrames\"\n )\n\n if not _check_crs(df1, df2):\n _crs_mismatch_warn(df1, df2, stacklevel=3)\n\n if keep_geom_type is None:\n keep_geom_type = True\n keep_geom_type_warning = True\n else:\n keep_geom_type_warning = False\n\n polys = [\"Polygon\", \"MultiPolygon\"]\n lines = [\"LineString\", \"MultiLineString\", \"LinearRing\"]\n points = [\"Point\", \"MultiPoint\"]\n for i, df in enumerate([df1, df2]):\n poly_check = df.geom_type.isin(polys).any()\n lines_check = df.geom_type.isin(lines).any()\n points_check = df.geom_type.isin(points).any()\n if sum([poly_check, lines_check, points_check]) > 1:\n raise NotImplementedError(\n \"df{} contains mixed geometry types.\".format(i + 1)\n )\n\n if how == \"intersection\":\n box_gdf1 = df1.total_bounds\n box_gdf2 = df2.total_bounds\n\n if not (\n ((box_gdf1[0] <= box_gdf2[2]) and (box_gdf2[0] <= box_gdf1[2]))\n and ((box_gdf1[1] <= box_gdf2[3]) and (box_gdf2[1] <= box_gdf1[3]))\n ):\n result = df1.iloc[:0].merge(\n df2.iloc[:0].drop(df2.geometry.name, axis=1),\n left_index=True,\n right_index=True,\n suffixes=(\"_1\", \"_2\"),\n )\n return result[\n result.columns.drop(df1.geometry.name).tolist() + [df1.geometry.name]\n ]\n\n # Computations\n def _make_valid(df):\n df = df.copy()\n if df.geom_type.isin(polys).all():\n mask = ~df.geometry.is_valid\n col = df._geometry_column_name\n if make_valid:\n df.loc[mask, col] = df.loc[mask, col].buffer(0)\n elif mask.any():\n raise ValueError(\n \"You have passed make_valid=False along with \"\n f\"{mask.sum()} invalid input geometries. \"\n \"Use make_valid=True or make sure that all geometries \"\n \"are valid before using overlay.\"\n )\n return df\n\n df1 = _make_valid(df1)\n df2 = _make_valid(df2)\n\n with warnings.catch_warnings(): # CRS checked above, suppress array-level warning\n warnings.filterwarnings(\"ignore\", message=\"CRS mismatch between the CRS\")\n if how == \"difference\":\n return _overlay_difference(df1, df2)\n elif how == \"intersection\":\n result = _overlay_intersection(df1, df2)\n elif how == \"symmetric_difference\":\n result = _overlay_symmetric_diff(df1, df2)\n elif how == \"union\":\n result = _overlay_union(df1, df2)\n elif how == \"identity\":\n dfunion = _overlay_union(df1, df2)\n result = dfunion[dfunion[\"__idx1\"].notnull()].copy()\n\n if keep_geom_type:\n geom_type = df1.geom_type.iloc[0]\n\n # First we filter the geometry types inside GeometryCollections objects\n # (e.g. GeometryCollection([polygon, point]) -> polygon)\n # we do this separately on only the relevant rows, as this is an expensive\n # operation (an expensive no-op for geometry types other than collections)\n is_collection = result.geom_type == \"GeometryCollection\"\n if is_collection.any():\n geom_col = result._geometry_column_name\n collections = result[[geom_col]][is_collection]\n\n exploded = collections.reset_index(drop=True).explode(index_parts=True)\n exploded = exploded.reset_index(level=0)\n\n orig_num_geoms_exploded = exploded.shape[0]\n if geom_type in polys:\n exploded = exploded.loc[exploded.geom_type.isin(polys)]\n elif geom_type in lines:\n exploded = exploded.loc[exploded.geom_type.isin(lines)]\n elif geom_type in points:\n exploded = exploded.loc[exploded.geom_type.isin(points)]\n else:\n raise TypeError(\n \"`keep_geom_type` does not support {}.\".format(geom_type)\n )\n num_dropped_collection = orig_num_geoms_exploded - exploded.shape[0]\n\n # level_0 created with above reset_index operation\n # and represents the original geometry collections\n # TODO avoiding dissolve to call unary_union in this case could further\n # improve performance (we only need to collect geometries in their\n # respective Multi version)\n dissolved = exploded.dissolve(by=\"level_0\")\n result.loc[is_collection, geom_col] = dissolved[geom_col].values\n else:\n num_dropped_collection = 0\n\n # Now we filter all geometries (in theory we don't need to do this\n # again for the rows handled above for GeometryCollections, but filtering\n # them out is probably more expensive as simply including them when this\n # is typically about only a few rows)\n orig_num_geoms = result.shape[0]\n if geom_type in polys:\n result = result.loc[result.geom_type.isin(polys)]\n elif geom_type in lines:\n result = result.loc[result.geom_type.isin(lines)]\n elif geom_type in points:\n result = result.loc[result.geom_type.isin(points)]\n else:\n raise TypeError(\"`keep_geom_type` does not support {}.\".format(geom_type))\n num_dropped = orig_num_geoms - result.shape[0]\n\n if (num_dropped > 0 or num_dropped_collection > 0) and keep_geom_type_warning:\n warnings.warn(\n \"`keep_geom_type=True` in overlay resulted in {} dropped \"\n \"geometries of different geometry types than df1 has. \"\n \"Set `keep_geom_type=False` to retain all \"\n \"geometries\".format(num_dropped + num_dropped_collection),\n UserWarning,\n stacklevel=2,\n )\n\n result.reset_index(drop=True, inplace=True)\n result.drop([\"__idx1\", \"__idx2\"], axis=1, inplace=True)\n return result\n",
"path": "geopandas/tools/overlay.py"
}
] | [
{
"content": "import warnings\nfrom functools import reduce\n\nimport numpy as np\nimport pandas as pd\n\nfrom geopandas import GeoDataFrame, GeoSeries\nfrom geopandas.array import _check_crs, _crs_mismatch_warn\n\n\ndef _ensure_geometry_column(df):\n \"\"\"\n Helper function to ensure the geometry column is called 'geometry'.\n If another column with that name exists, it will be dropped.\n \"\"\"\n if not df._geometry_column_name == \"geometry\":\n if \"geometry\" in df.columns:\n df.drop(\"geometry\", axis=1, inplace=True)\n df.rename(\n columns={df._geometry_column_name: \"geometry\"}, copy=False, inplace=True\n )\n df.set_geometry(\"geometry\", inplace=True)\n\n\ndef _overlay_intersection(df1, df2):\n \"\"\"\n Overlay Intersection operation used in overlay function\n \"\"\"\n # Spatial Index to create intersections\n idx1, idx2 = df2.sindex.query_bulk(df1.geometry, predicate=\"intersects\", sort=True)\n # Create pairs of geometries in both dataframes to be intersected\n if idx1.size > 0 and idx2.size > 0:\n left = df1.geometry.take(idx1)\n left.reset_index(drop=True, inplace=True)\n right = df2.geometry.take(idx2)\n right.reset_index(drop=True, inplace=True)\n intersections = left.intersection(right)\n poly_ix = intersections.type.isin([\"Polygon\", \"MultiPolygon\"])\n intersections.loc[poly_ix] = intersections[poly_ix].buffer(0)\n\n # only keep actual intersecting geometries\n pairs_intersect = pd.DataFrame({\"__idx1\": idx1, \"__idx2\": idx2})\n geom_intersect = intersections\n\n # merge data for intersecting geometries\n df1 = df1.reset_index(drop=True)\n df2 = df2.reset_index(drop=True)\n dfinter = pairs_intersect.merge(\n df1.drop(df1._geometry_column_name, axis=1),\n left_on=\"__idx1\",\n right_index=True,\n )\n dfinter = dfinter.merge(\n df2.drop(df2._geometry_column_name, axis=1),\n left_on=\"__idx2\",\n right_index=True,\n suffixes=(\"_1\", \"_2\"),\n )\n\n return GeoDataFrame(dfinter, geometry=geom_intersect, crs=df1.crs)\n else:\n result = df1.iloc[:0].merge(\n df2.iloc[:0].drop(df2.geometry.name, axis=1),\n left_index=True,\n right_index=True,\n suffixes=(\"_1\", \"_2\"),\n )\n result[\"__idx1\"] = None\n result[\"__idx2\"] = None\n return result[\n result.columns.drop(df1.geometry.name).tolist() + [df1.geometry.name]\n ]\n\n\ndef _overlay_difference(df1, df2):\n \"\"\"\n Overlay Difference operation used in overlay function\n \"\"\"\n # spatial index query to find intersections\n idx1, idx2 = df2.sindex.query_bulk(df1.geometry, predicate=\"intersects\", sort=True)\n idx1_unique, idx1_unique_indices = np.unique(idx1, return_index=True)\n idx2_split = np.split(idx2, idx1_unique_indices[1:])\n sidx = [\n idx2_split.pop(0) if idx in idx1_unique else []\n for idx in range(df1.geometry.size)\n ]\n # Create differences\n new_g = []\n for geom, neighbours in zip(df1.geometry, sidx):\n new = reduce(\n lambda x, y: x.difference(y), [geom] + list(df2.geometry.iloc[neighbours])\n )\n new_g.append(new)\n differences = GeoSeries(new_g, index=df1.index, crs=df1.crs)\n poly_ix = differences.type.isin([\"Polygon\", \"MultiPolygon\"])\n differences.loc[poly_ix] = differences[poly_ix].buffer(0)\n geom_diff = differences[~differences.is_empty].copy()\n dfdiff = df1[~differences.is_empty].copy()\n dfdiff[dfdiff._geometry_column_name] = geom_diff\n return dfdiff\n\n\ndef _overlay_symmetric_diff(df1, df2):\n \"\"\"\n Overlay Symmetric Difference operation used in overlay function\n \"\"\"\n dfdiff1 = _overlay_difference(df1, df2)\n dfdiff2 = _overlay_difference(df2, df1)\n dfdiff1[\"__idx1\"] = range(len(dfdiff1))\n dfdiff2[\"__idx2\"] = range(len(dfdiff2))\n dfdiff1[\"__idx2\"] = np.nan\n dfdiff2[\"__idx1\"] = np.nan\n # ensure geometry name (otherwise merge goes wrong)\n _ensure_geometry_column(dfdiff1)\n _ensure_geometry_column(dfdiff2)\n # combine both 'difference' dataframes\n dfsym = dfdiff1.merge(\n dfdiff2, on=[\"__idx1\", \"__idx2\"], how=\"outer\", suffixes=(\"_1\", \"_2\")\n )\n geometry = dfsym.geometry_1.copy()\n geometry.name = \"geometry\"\n # https://github.com/pandas-dev/pandas/issues/26468 use loc for now\n geometry.loc[dfsym.geometry_1.isnull()] = dfsym.loc[\n dfsym.geometry_1.isnull(), \"geometry_2\"\n ]\n dfsym.drop([\"geometry_1\", \"geometry_2\"], axis=1, inplace=True)\n dfsym.reset_index(drop=True, inplace=True)\n dfsym = GeoDataFrame(dfsym, geometry=geometry, crs=df1.crs)\n return dfsym\n\n\ndef _overlay_union(df1, df2):\n \"\"\"\n Overlay Union operation used in overlay function\n \"\"\"\n dfinter = _overlay_intersection(df1, df2)\n dfsym = _overlay_symmetric_diff(df1, df2)\n dfunion = pd.concat([dfinter, dfsym], ignore_index=True, sort=False)\n # keep geometry column last\n columns = list(dfunion.columns)\n columns.remove(\"geometry\")\n columns = columns + [\"geometry\"]\n return dfunion.reindex(columns=columns)\n\n\ndef overlay(df1, df2, how=\"intersection\", keep_geom_type=None, make_valid=True):\n \"\"\"Perform spatial overlay between two GeoDataFrames.\n\n Currently only supports data GeoDataFrames with uniform geometry types,\n i.e. containing only (Multi)Polygons, or only (Multi)Points, or a\n combination of (Multi)LineString and LinearRing shapes.\n Implements several methods that are all effectively subsets of the union.\n\n See the User Guide page :doc:`../../user_guide/set_operations` for details.\n\n Parameters\n ----------\n df1 : GeoDataFrame\n df2 : GeoDataFrame\n how : string\n Method of spatial overlay: 'intersection', 'union',\n 'identity', 'symmetric_difference' or 'difference'.\n keep_geom_type : bool\n If True, return only geometries of the same geometry type as df1 has,\n if False, return all resulting geometries. Default is None,\n which will set keep_geom_type to True but warn upon dropping\n geometries.\n make_valid : bool, default True\n If True, any invalid input geometries are corrected with a call to `buffer(0)`,\n if False, a `ValueError` is raised if any input geometries are invalid.\n\n Returns\n -------\n df : GeoDataFrame\n GeoDataFrame with new set of polygons and attributes\n resulting from the overlay\n\n Examples\n --------\n >>> from shapely.geometry import Polygon\n >>> polys1 = geopandas.GeoSeries([Polygon([(0,0), (2,0), (2,2), (0,2)]),\n ... Polygon([(2,2), (4,2), (4,4), (2,4)])])\n >>> polys2 = geopandas.GeoSeries([Polygon([(1,1), (3,1), (3,3), (1,3)]),\n ... Polygon([(3,3), (5,3), (5,5), (3,5)])])\n >>> df1 = geopandas.GeoDataFrame({'geometry': polys1, 'df1_data':[1,2]})\n >>> df2 = geopandas.GeoDataFrame({'geometry': polys2, 'df2_data':[1,2]})\n\n >>> geopandas.overlay(df1, df2, how='union')\n df1_data df2_data geometry\n 0 1.0 1.0 POLYGON ((2.00000 2.00000, 2.00000 1.00000, 1....\n 1 2.0 1.0 POLYGON ((2.00000 2.00000, 2.00000 3.00000, 3....\n 2 2.0 2.0 POLYGON ((4.00000 4.00000, 4.00000 3.00000, 3....\n 3 1.0 NaN POLYGON ((2.00000 0.00000, 0.00000 0.00000, 0....\n 4 2.0 NaN MULTIPOLYGON (((3.00000 3.00000, 4.00000 3.000...\n 5 NaN 1.0 MULTIPOLYGON (((2.00000 2.00000, 3.00000 2.000...\n 6 NaN 2.0 POLYGON ((3.00000 5.00000, 5.00000 5.00000, 5....\n\n >>> geopandas.overlay(df1, df2, how='intersection')\n df1_data df2_data geometry\n 0 1 1 POLYGON ((2.00000 2.00000, 2.00000 1.00000, 1....\n 1 2 1 POLYGON ((2.00000 2.00000, 2.00000 3.00000, 3....\n 2 2 2 POLYGON ((4.00000 4.00000, 4.00000 3.00000, 3....\n\n >>> geopandas.overlay(df1, df2, how='symmetric_difference')\n df1_data df2_data geometry\n 0 1.0 NaN POLYGON ((2.00000 0.00000, 0.00000 0.00000, 0....\n 1 2.0 NaN MULTIPOLYGON (((3.00000 3.00000, 4.00000 3.000...\n 2 NaN 1.0 MULTIPOLYGON (((2.00000 2.00000, 3.00000 2.000...\n 3 NaN 2.0 POLYGON ((3.00000 5.00000, 5.00000 5.00000, 5....\n\n >>> geopandas.overlay(df1, df2, how='difference')\n geometry df1_data\n 0 POLYGON ((2.00000 0.00000, 0.00000 0.00000, 0.... 1\n 1 MULTIPOLYGON (((3.00000 3.00000, 4.00000 3.000... 2\n\n >>> geopandas.overlay(df1, df2, how='identity')\n df1_data df2_data geometry\n 0 1.0 1.0 POLYGON ((2.00000 2.00000, 2.00000 1.00000, 1....\n 1 2.0 1.0 POLYGON ((2.00000 2.00000, 2.00000 3.00000, 3....\n 2 2.0 2.0 POLYGON ((4.00000 4.00000, 4.00000 3.00000, 3....\n 3 1.0 NaN POLYGON ((2.00000 0.00000, 0.00000 0.00000, 0....\n 4 2.0 NaN MULTIPOLYGON (((3.00000 3.00000, 4.00000 3.000...\n\n See also\n --------\n sjoin : spatial join\n GeoDataFrame.overlay : equivalent method\n\n Notes\n ------\n Every operation in GeoPandas is planar, i.e. the potential third\n dimension is not taken into account.\n \"\"\"\n # Allowed operations\n allowed_hows = [\n \"intersection\",\n \"union\",\n \"identity\",\n \"symmetric_difference\",\n \"difference\", # aka erase\n ]\n # Error Messages\n if how not in allowed_hows:\n raise ValueError(\n \"`how` was '{0}' but is expected to be in {1}\".format(how, allowed_hows)\n )\n\n if isinstance(df1, GeoSeries) or isinstance(df2, GeoSeries):\n raise NotImplementedError(\n \"overlay currently only implemented for \" \"GeoDataFrames\"\n )\n\n if not _check_crs(df1, df2):\n _crs_mismatch_warn(df1, df2, stacklevel=3)\n\n if keep_geom_type is None:\n keep_geom_type = True\n keep_geom_type_warning = True\n else:\n keep_geom_type_warning = False\n\n polys = [\"Polygon\", \"MultiPolygon\"]\n lines = [\"LineString\", \"MultiLineString\", \"LinearRing\"]\n points = [\"Point\", \"MultiPoint\"]\n for i, df in enumerate([df1, df2]):\n poly_check = df.geom_type.isin(polys).any()\n lines_check = df.geom_type.isin(lines).any()\n points_check = df.geom_type.isin(points).any()\n if sum([poly_check, lines_check, points_check]) > 1:\n raise NotImplementedError(\n \"df{} contains mixed geometry types.\".format(i + 1)\n )\n\n if how == \"intersection\":\n box_gdf1 = df1.total_bounds\n box_gdf2 = df2.total_bounds\n\n if not (\n ((box_gdf1[0] <= box_gdf2[2]) and (box_gdf2[0] <= box_gdf1[2]))\n and ((box_gdf1[1] <= box_gdf2[3]) and (box_gdf2[1] <= box_gdf1[3]))\n ):\n result = df1.iloc[:0].merge(\n df2.iloc[:0].drop(df2.geometry.name, axis=1),\n left_index=True,\n right_index=True,\n suffixes=(\"_1\", \"_2\"),\n )\n return result[\n result.columns.drop(df1.geometry.name).tolist() + [df1.geometry.name]\n ]\n\n # Computations\n def _make_valid(df):\n df = df.copy()\n if df.geom_type.isin(polys).all():\n mask = ~df.geometry.is_valid\n col = df._geometry_column_name\n if make_valid:\n df.loc[mask, col] = df.loc[mask, col].buffer(0)\n elif mask.any():\n raise ValueError(\n \"You have passed make_valid=False along with \"\n f\"{mask.sum()} invalid input geometries. \"\n \"Use make_valid=True or make sure that all geometries \"\n \"are valid before using overlay.\"\n )\n return df\n\n df1 = _make_valid(df1)\n df2 = _make_valid(df2)\n\n with warnings.catch_warnings(): # CRS checked above, suppress array-level warning\n warnings.filterwarnings(\"ignore\", message=\"CRS mismatch between the CRS\")\n if how == \"difference\":\n return _overlay_difference(df1, df2)\n elif how == \"intersection\":\n result = _overlay_intersection(df1, df2)\n elif how == \"symmetric_difference\":\n result = _overlay_symmetric_diff(df1, df2)\n elif how == \"union\":\n result = _overlay_union(df1, df2)\n elif how == \"identity\":\n dfunion = _overlay_union(df1, df2)\n result = dfunion[dfunion[\"__idx1\"].notnull()].copy()\n\n if keep_geom_type:\n geom_type = df1.geom_type.iloc[0]\n\n # First we filter the geometry types inside GeometryCollections objects\n # (e.g. GeometryCollection([polygon, point]) -> polygon)\n # we do this separately on only the relevant rows, as this is an expensive\n # operation (an expensive no-op for geometry types other than collections)\n is_collection = result.geom_type == \"GeometryCollection\"\n if is_collection.any():\n geom_col = result._geometry_column_name\n collections = result[[geom_col]][is_collection]\n\n exploded = collections.reset_index(drop=True).explode(index_parts=True)\n exploded = exploded.reset_index(level=0)\n\n orig_num_geoms_exploded = exploded.shape[0]\n if geom_type in polys:\n exploded = exploded.loc[exploded.geom_type.isin(polys)]\n elif geom_type in lines:\n exploded = exploded.loc[exploded.geom_type.isin(lines)]\n elif geom_type in points:\n exploded = exploded.loc[exploded.geom_type.isin(points)]\n else:\n raise TypeError(\n \"`keep_geom_type` does not support {}.\".format(geom_type)\n )\n num_dropped_collection = orig_num_geoms_exploded - exploded.shape[0]\n\n # level_0 created with above reset_index operation\n # and represents the original geometry collections\n # TODO avoiding dissolve to call unary_union in this case could further\n # improve performance (we only need to collect geometries in their\n # respective Multi version)\n dissolved = exploded.dissolve(by=\"level_0\")\n result.loc[is_collection, geom_col] = dissolved[geom_col].values\n else:\n num_dropped_collection = 0\n\n # Now we filter all geometries (in theory we don't need to do this\n # again for the rows handled above for GeometryCollections, but filtering\n # them out is probably more expensive as simply including them when this\n # is typically about only a few rows)\n orig_num_geoms = result.shape[0]\n if geom_type in polys:\n result = result.loc[result.geom_type.isin(polys)]\n elif geom_type in lines:\n result = result.loc[result.geom_type.isin(lines)]\n elif geom_type in points:\n result = result.loc[result.geom_type.isin(points)]\n else:\n raise TypeError(\"`keep_geom_type` does not support {}.\".format(geom_type))\n num_dropped = orig_num_geoms - result.shape[0]\n\n if (num_dropped > 0 or num_dropped_collection > 0) and keep_geom_type_warning:\n warnings.warn(\n \"`keep_geom_type=True` in overlay resulted in {} dropped \"\n \"geometries of different geometry types than df1 has. \"\n \"Set `keep_geom_type=False` to retain all \"\n \"geometries\".format(num_dropped + num_dropped_collection),\n UserWarning,\n stacklevel=2,\n )\n\n result.reset_index(drop=True, inplace=True)\n result.drop([\"__idx1\", \"__idx2\"], axis=1, inplace=True)\n return result\n",
"path": "geopandas/tools/overlay.py"
}
] | diff --git a/geopandas/tests/test_overlay.py b/geopandas/tests/test_overlay.py
index 1efee5dd2c..058da69b10 100644
--- a/geopandas/tests/test_overlay.py
+++ b/geopandas/tests/test_overlay.py
@@ -723,3 +723,14 @@ def test_non_overlapping(how):
)
assert_geodataframe_equal(result, expected)
+
+
+def test_no_intersection():
+ # overlapping bounds but non-overlapping geometries
+ gs = GeoSeries([Point(x, x).buffer(0.1) for x in range(3)])
+ gdf1 = GeoDataFrame({"foo": ["a", "b", "c"]}, geometry=gs)
+ gdf2 = GeoDataFrame({"bar": ["1", "3", "5"]}, geometry=gs.translate(1))
+
+ expected = GeoDataFrame(columns=["foo", "bar", "geometry"])
+ result = overlay(gdf1, gdf2, how="intersection")
+ assert_geodataframe_equal(result, expected, check_index_type=False)
diff --git a/geopandas/tools/overlay.py b/geopandas/tools/overlay.py
index 7973e2124b..75dcb55097 100644
--- a/geopandas/tools/overlay.py
+++ b/geopandas/tools/overlay.py
@@ -65,6 +65,8 @@ def _overlay_intersection(df1, df2):
right_index=True,
suffixes=("_1", "_2"),
)
+ result["__idx1"] = None
+ result["__idx2"] = None
return result[
result.columns.drop(df1.geometry.name).tolist() + [df1.geometry.name]
]
|
paperless-ngx__paperless-ngx-1666 | [BUG] Wrong version number string within docker 1.9.1
### Description
After a successful pull and deploy via docker with https://ghcr.io/paperless-ngx/paperless-ngx:1.9.1 the version string on the paperless-ngx Web-UI is still 1.9.0.

### Steps to reproduce
1. Pull the new version via docker, docker-compose or portainer via https://ghcr.io/paperless-ngx/paperless-ngx and tag 1.9.1
2. Access the Web-UI.
3. Login
4. Find the version string on the lower left side.
### Webserver logs
_No response_
### Paperless-ngx version
1.9.1
### Host OS
Alpine Linux x86-64
### Installation method
Docker - official image
### Browser
Chrome
### Configuration changes
_No response_
### Other
_No response_
| [
{
"content": "from typing import Final\nfrom typing import Tuple\n\n__version__: Final[Tuple[int, int, int]] = (1, 9, 0)\n# Version string like X.Y.Z\n__full_version_str__: Final[str] = \".\".join(map(str, __version__))\n# Version string like X.Y\n__major_minor_version_str__: Final[str] = \".\".join(map(str, __version__[:-1]))\n",
"path": "src/paperless/version.py"
}
] | [
{
"content": "from typing import Final\nfrom typing import Tuple\n\n__version__: Final[Tuple[int, int, int]] = (1, 9, 2)\n# Version string like X.Y.Z\n__full_version_str__: Final[str] = \".\".join(map(str, __version__))\n# Version string like X.Y\n__major_minor_version_str__: Final[str] = \".\".join(map(str, __version__[:-1]))\n",
"path": "src/paperless/version.py"
}
] | diff --git a/src-ui/src/environments/environment.prod.ts b/src-ui/src/environments/environment.prod.ts
index 92f388ec872..48ea3f98822 100644
--- a/src-ui/src/environments/environment.prod.ts
+++ b/src-ui/src/environments/environment.prod.ts
@@ -5,7 +5,7 @@ export const environment = {
apiBaseUrl: document.baseURI + 'api/',
apiVersion: '2',
appTitle: 'Paperless-ngx',
- version: '1.9.0',
+ version: '1.9.2',
webSocketHost: window.location.host,
webSocketProtocol: window.location.protocol == 'https:' ? 'wss:' : 'ws:',
webSocketBaseUrl: base_url.pathname + 'ws/',
diff --git a/src/paperless/version.py b/src/paperless/version.py
index 1642e3f8932..d196c358db2 100644
--- a/src/paperless/version.py
+++ b/src/paperless/version.py
@@ -1,7 +1,7 @@
from typing import Final
from typing import Tuple
-__version__: Final[Tuple[int, int, int]] = (1, 9, 0)
+__version__: Final[Tuple[int, int, int]] = (1, 9, 2)
# Version string like X.Y.Z
__full_version_str__: Final[str] = ".".join(map(str, __version__))
# Version string like X.Y
|
conda__conda-build-389 | MD5 checking argument not passed to update_index
I was happy to see that there was a `-c` argument to `conda index` which forces it to use md5 hashes instead of file modification times. However, looks like `main_index.py` never passes that argument on to the `update_index()` function, i.e.,
``` python
...
update_index(path, verbose=(not args.quiet), force=args.force)
...
```
should actually be:
``` python
...
update_index(path, verbose=(not args.quiet), force=args.force, check_md5=args.check_md5)
...
```
| [
{
"content": "from __future__ import absolute_import, division, print_function\n\nimport argparse\nimport os\nfrom locale import getpreferredencoding\nfrom os.path import abspath\n\nfrom conda.compat import PY3\n\nfrom conda_build.index import update_index\n\n\ndef main():\n p = argparse.ArgumentParser(\n description=\"Update package index metadata files in given directories\")\n\n p.add_argument('dir',\n help='Directory that contains an index to be updated.',\n nargs='*',\n default=[os.getcwd()])\n\n p.add_argument('-c', \"--check-md5\",\n action=\"store_true\",\n help=\"Use MD5 values instead of file modification times for\\\n determining if a package's metadata needs to be \\\n updated.\")\n\n p.add_argument('-f', \"--force\",\n action=\"store_true\",\n help=\"force reading all files\")\n\n p.add_argument('-q', \"--quiet\",\n action=\"store_true\")\n\n args = p.parse_args()\n\n dir_paths = [abspath(path) for path in args.dir]\n # Don't use byte strings in Python 2\n if not PY3:\n dir_paths = [path.decode(getpreferredencoding()) for path in dir_paths]\n\n for path in dir_paths:\n update_index(path, verbose=(not args.quiet), force=args.force)\n\n\nif __name__ == '__main__':\n main()\n",
"path": "conda_build/main_index.py"
}
] | [
{
"content": "from __future__ import absolute_import, division, print_function\n\nimport argparse\nimport os\nfrom locale import getpreferredencoding\nfrom os.path import abspath\n\nfrom conda.compat import PY3\n\nfrom conda_build.index import update_index\n\n\ndef main():\n p = argparse.ArgumentParser(\n description=\"Update package index metadata files in given directories\")\n\n p.add_argument('dir',\n help='Directory that contains an index to be updated.',\n nargs='*',\n default=[os.getcwd()])\n\n p.add_argument('-c', \"--check-md5\",\n action=\"store_true\",\n help=\"Use MD5 values instead of file modification times for\\\n determining if a package's metadata needs to be \\\n updated.\")\n\n p.add_argument('-f', \"--force\",\n action=\"store_true\",\n help=\"force reading all files\")\n\n p.add_argument('-q', \"--quiet\",\n action=\"store_true\")\n\n args = p.parse_args()\n\n dir_paths = [abspath(path) for path in args.dir]\n # Don't use byte strings in Python 2\n if not PY3:\n dir_paths = [path.decode(getpreferredencoding()) for path in dir_paths]\n\n for path in dir_paths:\n update_index(path, verbose=(not args.quiet), force=args.force, check_md5=args.check_md5)\n\n\nif __name__ == '__main__':\n main()\n",
"path": "conda_build/main_index.py"
}
] | diff --git a/conda_build/main_index.py b/conda_build/main_index.py
index e3a9b10c16..47310e8dd6 100644
--- a/conda_build/main_index.py
+++ b/conda_build/main_index.py
@@ -40,7 +40,7 @@ def main():
dir_paths = [path.decode(getpreferredencoding()) for path in dir_paths]
for path in dir_paths:
- update_index(path, verbose=(not args.quiet), force=args.force)
+ update_index(path, verbose=(not args.quiet), force=args.force, check_md5=args.check_md5)
if __name__ == '__main__':
|
adamchainz__django-perf-rec-266 | perf.yml files created with executable bit set
I've just spotted that my test.perf.yml are marked executable in git, which is surprising as they're YAML files (which aren't executable).
As far as I know, none of the the developers are using Windows (only a mixture of macOS and Linux), so I'm not expecting this to be a platform thing on our end.
Is this expected?
| [
{
"content": "import errno\nimport os\n\nimport yaml\nfrom django.core.files import locks\n\n\nclass KVFile:\n def __init__(self, file_name):\n self.file_name = file_name\n self.data = self.load(file_name)\n\n def __len__(self):\n return len(self.data)\n\n LOAD_CACHE = {}\n\n @classmethod\n def load(cls, file_name):\n if file_name not in cls.LOAD_CACHE:\n cls.LOAD_CACHE[file_name] = cls.load_file(file_name)\n return cls.LOAD_CACHE[file_name]\n\n @classmethod\n def load_file(cls, file_name):\n try:\n with open(file_name) as fp:\n locks.lock(fp, locks.LOCK_EX)\n content = fp.read()\n except OSError as exc:\n if exc.errno == errno.ENOENT:\n content = \"{}\"\n else:\n raise\n\n data = yaml.safe_load(content)\n\n if data is None:\n return {}\n elif not isinstance(data, dict):\n raise TypeError(\"YAML content of {} is not a dictionary\".format(file_name))\n\n return data\n\n @classmethod\n def _clear_load_cache(cls):\n # Should really only be used in testing this class\n cls.LOAD_CACHE = {}\n\n def get(self, key, default):\n return self.data.get(key, default)\n\n def set_and_save(self, key, value):\n if self.data.get(key, object()) == value:\n return\n\n fd = os.open(self.file_name, os.O_RDWR | os.O_CREAT)\n with os.fdopen(fd, \"r+\") as fp:\n locks.lock(fd, locks.LOCK_EX)\n\n data = yaml.safe_load(fp)\n if data is None:\n data = {}\n\n self.data[key] = value\n data[key] = value\n\n fp.seek(0)\n yaml.safe_dump(\n data, fp, default_flow_style=False, allow_unicode=True, width=10000\n )\n",
"path": "src/django_perf_rec/yaml.py"
}
] | [
{
"content": "import errno\nimport os\n\nimport yaml\nfrom django.core.files import locks\n\n\nclass KVFile:\n def __init__(self, file_name):\n self.file_name = file_name\n self.data = self.load(file_name)\n\n def __len__(self):\n return len(self.data)\n\n LOAD_CACHE = {}\n\n @classmethod\n def load(cls, file_name):\n if file_name not in cls.LOAD_CACHE:\n cls.LOAD_CACHE[file_name] = cls.load_file(file_name)\n return cls.LOAD_CACHE[file_name]\n\n @classmethod\n def load_file(cls, file_name):\n try:\n with open(file_name) as fp:\n locks.lock(fp, locks.LOCK_EX)\n content = fp.read()\n except OSError as exc:\n if exc.errno == errno.ENOENT:\n content = \"{}\"\n else:\n raise\n\n data = yaml.safe_load(content)\n\n if data is None:\n return {}\n elif not isinstance(data, dict):\n raise TypeError(\"YAML content of {} is not a dictionary\".format(file_name))\n\n return data\n\n @classmethod\n def _clear_load_cache(cls):\n # Should really only be used in testing this class\n cls.LOAD_CACHE = {}\n\n def get(self, key, default):\n return self.data.get(key, default)\n\n def set_and_save(self, key, value):\n if self.data.get(key, object()) == value:\n return\n\n fd = os.open(self.file_name, os.O_RDWR | os.O_CREAT, mode=0o666)\n with os.fdopen(fd, \"r+\") as fp:\n locks.lock(fd, locks.LOCK_EX)\n\n data = yaml.safe_load(fp)\n if data is None:\n data = {}\n\n self.data[key] = value\n data[key] = value\n\n fp.seek(0)\n yaml.safe_dump(\n data, fp, default_flow_style=False, allow_unicode=True, width=10000\n )\n",
"path": "src/django_perf_rec/yaml.py"
}
] | diff --git a/HISTORY.rst b/HISTORY.rst
index d82a49d6..3b783c05 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -1,6 +1,12 @@
History
=======
+* Create YAML files as non-executable. This will not be applied to existing
+ files, modify their permissions if necessary, or delete and recreate.
+
+ Thanks to Peter Law for the report in `Issue #264
+ <https://github.com/adamchainz/django-perf-rec/issues/264>`__.
+
4.6.0 (2020-05-20)
------------------
diff --git a/src/django_perf_rec/yaml.py b/src/django_perf_rec/yaml.py
index cde60fa5..0740835c 100644
--- a/src/django_perf_rec/yaml.py
+++ b/src/django_perf_rec/yaml.py
@@ -54,7 +54,7 @@ def set_and_save(self, key, value):
if self.data.get(key, object()) == value:
return
- fd = os.open(self.file_name, os.O_RDWR | os.O_CREAT)
+ fd = os.open(self.file_name, os.O_RDWR | os.O_CREAT, mode=0o666)
with os.fdopen(fd, "r+") as fp:
locks.lock(fd, locks.LOCK_EX)
|
encode__httpx-139 | API design question - `Response.url`
Currently our `Response.url` attribute exposes a `URL` instance.
This is a breaking change from the requests API where it just exposes a plain string.
It's feasible that we should instead only be exposing plain string URLs, in order to aim for drop-in replacement API compatibility w/ requests, *and* in order to keep the API surface area low.
Options here are:
* Expose `request.url` as a URL instance. (Richer information, URL class is also useful in its own right.)
* Expose `request.url` as a str. (Better API compat. Lower API surface area to maintain.)
* Expose `request.url` as a str, and `request.urlinfo` as a URL instance. (Better API compat. High API surface area.)
| [
{
"content": "import cgi\nimport email.message\nimport json as jsonlib\nimport typing\nimport urllib.request\nfrom collections.abc import MutableMapping\nfrom http.cookiejar import Cookie, CookieJar\nfrom urllib.parse import parse_qsl, urlencode\n\nimport chardet\nimport rfc3986\n\nfrom .config import USER_AGENT\nfrom .decoders import (\n ACCEPT_ENCODING,\n SUPPORTED_DECODERS,\n Decoder,\n IdentityDecoder,\n MultiDecoder,\n)\nfrom .exceptions import (\n CookieConflict,\n HttpError,\n InvalidURL,\n ResponseClosed,\n ResponseNotRead,\n StreamConsumed,\n)\nfrom .multipart import multipart_encode\nfrom .status_codes import StatusCode\nfrom .utils import (\n guess_json_utf,\n is_known_encoding,\n normalize_header_key,\n normalize_header_value,\n)\n\nURLTypes = typing.Union[\"URL\", str]\n\nQueryParamTypes = typing.Union[\n \"QueryParams\",\n typing.Mapping[str, str],\n typing.List[typing.Tuple[typing.Any, typing.Any]],\n str,\n]\n\nHeaderTypes = typing.Union[\n \"Headers\",\n typing.Dict[typing.AnyStr, typing.AnyStr],\n typing.List[typing.Tuple[typing.AnyStr, typing.AnyStr]],\n]\n\nCookieTypes = typing.Union[\"Cookies\", CookieJar, typing.Dict[str, str]]\n\nAuthTypes = typing.Union[\n typing.Tuple[typing.Union[str, bytes], typing.Union[str, bytes]],\n typing.Callable[[\"AsyncRequest\"], \"AsyncRequest\"],\n]\n\nAsyncRequestData = typing.Union[dict, str, bytes, typing.AsyncIterator[bytes]]\n\nRequestData = typing.Union[dict, str, bytes, typing.Iterator[bytes]]\n\nRequestFiles = typing.Dict[\n str,\n typing.Union[\n typing.IO[typing.AnyStr], # file\n typing.Tuple[str, typing.IO[typing.AnyStr]], # (filename, file)\n typing.Tuple[\n str, typing.IO[typing.AnyStr], str\n ], # (filename, file, content_type)\n ],\n]\n\nAsyncResponseContent = typing.Union[bytes, typing.AsyncIterator[bytes]]\n\nResponseContent = typing.Union[bytes, typing.Iterator[bytes]]\n\n\nclass URL:\n def __init__(\n self,\n url: URLTypes,\n allow_relative: bool = False,\n params: QueryParamTypes = None,\n ) -> None:\n if isinstance(url, rfc3986.uri.URIReference):\n self.components = url\n elif isinstance(url, str):\n self.components = rfc3986.api.uri_reference(url)\n else:\n self.components = url.components\n\n # Handle IDNA domain names.\n if self.components.authority:\n idna_authority = self.components.authority.encode(\"idna\").decode(\"ascii\")\n if idna_authority != self.components.authority:\n self.components = self.components.copy_with(authority=idna_authority)\n\n # Normalize scheme and domain name.\n self.components = self.components.normalize()\n\n # Add any query parameters.\n if params:\n query_string = str(QueryParams(params))\n self.components = self.components.copy_with(query=query_string)\n\n # Enforce absolute URLs by default.\n if not allow_relative:\n if not self.scheme:\n raise InvalidURL(\"No scheme included in URL.\")\n if not self.host:\n raise InvalidURL(\"No host included in URL.\")\n\n @property\n def scheme(self) -> str:\n return self.components.scheme or \"\"\n\n @property\n def authority(self) -> str:\n return self.components.authority or \"\"\n\n @property\n def username(self) -> str:\n userinfo = self.components.userinfo or \"\"\n return userinfo.partition(\":\")[0]\n\n @property\n def password(self) -> str:\n userinfo = self.components.userinfo or \"\"\n return userinfo.partition(\":\")[2]\n\n @property\n def host(self) -> str:\n return self.components.host or \"\"\n\n @property\n def port(self) -> int:\n port = self.components.port\n if port is None:\n return {\"https\": 443, \"http\": 80}[self.scheme]\n return int(port)\n\n @property\n def path(self) -> str:\n return self.components.path or \"/\"\n\n @property\n def query(self) -> str:\n return self.components.query or \"\"\n\n @property\n def full_path(self) -> str:\n path = self.path\n if self.query:\n path += \"?\" + self.query\n return path\n\n @property\n def fragment(self) -> str:\n return self.components.fragment or \"\"\n\n @property\n def is_ssl(self) -> bool:\n return self.components.scheme == \"https\"\n\n @property\n def is_absolute_url(self) -> bool:\n \"\"\"\n Return `True` for absolute URLs such as 'http://example.com/path',\n and `False` for relative URLs such as '/path'.\n \"\"\"\n # We don't use rfc3986's `is_absolute` because it treats\n # URLs with a fragment portion as not absolute.\n # What we actually care about is if the URL provides\n # a scheme and hostname to which connections should be made.\n return self.components.scheme and self.components.host\n\n @property\n def is_relative_url(self) -> bool:\n return not self.is_absolute_url\n\n @property\n def origin(self) -> \"Origin\":\n return Origin(self)\n\n def copy_with(self, **kwargs: typing.Any) -> \"URL\":\n return URL(self.components.copy_with(**kwargs))\n\n def join(self, relative_url: URLTypes) -> \"URL\":\n \"\"\"\n Return an absolute URL, using given this URL as the base.\n \"\"\"\n if self.is_relative_url:\n return URL(relative_url)\n\n # We drop any fragment portion, because RFC 3986 strictly\n # treats URLs with a fragment portion as not being absolute URLs.\n base_components = self.components.copy_with(fragment=None)\n relative_url = URL(relative_url, allow_relative=True)\n return URL(relative_url.components.resolve_with(base_components))\n\n def __hash__(self) -> int:\n return hash(str(self))\n\n def __eq__(self, other: typing.Any) -> bool:\n return isinstance(other, URL) and str(self) == str(other)\n\n def __str__(self) -> str:\n return self.components.unsplit()\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n url_str = str(self)\n return f\"{class_name}({url_str!r})\"\n\n\nclass Origin:\n \"\"\"\n The URL scheme and authority information, as a comparable, hashable object.\n \"\"\"\n\n def __init__(self, url: URLTypes) -> None:\n if not isinstance(url, URL):\n url = URL(url)\n self.is_ssl = url.is_ssl\n self.host = url.host\n self.port = url.port\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, self.__class__)\n and self.is_ssl == other.is_ssl\n and self.host == other.host\n and self.port == other.port\n )\n\n def __hash__(self) -> int:\n return hash((self.is_ssl, self.host, self.port))\n\n\nclass QueryParams(typing.Mapping[str, str]):\n \"\"\"\n URL query parameters, as a multi-dict.\n \"\"\"\n\n def __init__(self, *args: QueryParamTypes, **kwargs: typing.Any) -> None:\n assert len(args) < 2, \"Too many arguments.\"\n assert not (args and kwargs), \"Cannot mix named and unnamed arguments.\"\n\n value = args[0] if args else kwargs\n\n if isinstance(value, str):\n items = parse_qsl(value)\n elif isinstance(value, QueryParams):\n items = value.multi_items()\n elif isinstance(value, list):\n items = value\n else:\n items = value.items() # type: ignore\n\n self._list = [(str(k), str(v)) for k, v in items]\n self._dict = {str(k): str(v) for k, v in items}\n\n def getlist(self, key: typing.Any) -> typing.List[str]:\n return [item_value for item_key, item_value in self._list if item_key == key]\n\n def keys(self) -> typing.KeysView:\n return self._dict.keys()\n\n def values(self) -> typing.ValuesView:\n return self._dict.values()\n\n def items(self) -> typing.ItemsView:\n return self._dict.items()\n\n def multi_items(self) -> typing.List[typing.Tuple[str, str]]:\n return list(self._list)\n\n def get(self, key: typing.Any, default: typing.Any = None) -> typing.Any:\n if key in self._dict:\n return self._dict[key]\n return default\n\n def __getitem__(self, key: typing.Any) -> str:\n return self._dict[key]\n\n def __contains__(self, key: typing.Any) -> bool:\n return key in self._dict\n\n def __iter__(self) -> typing.Iterator[typing.Any]:\n return iter(self.keys())\n\n def __len__(self) -> int:\n return len(self._dict)\n\n def __eq__(self, other: typing.Any) -> bool:\n if not isinstance(other, self.__class__):\n return False\n return sorted(self._list) == sorted(other._list)\n\n def __str__(self) -> str:\n return urlencode(self._list)\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n query_string = str(self)\n return f\"{class_name}({query_string!r})\"\n\n\nclass Headers(typing.MutableMapping[str, str]):\n \"\"\"\n HTTP headers, as a case-insensitive multi-dict.\n \"\"\"\n\n def __init__(self, headers: HeaderTypes = None, encoding: str = None) -> None:\n if headers is None:\n self._list = [] # type: typing.List[typing.Tuple[bytes, bytes]]\n elif isinstance(headers, Headers):\n self._list = list(headers.raw)\n elif isinstance(headers, dict):\n self._list = [\n (normalize_header_key(k, encoding), normalize_header_value(v, encoding))\n for k, v in headers.items()\n ]\n else:\n self._list = [\n (normalize_header_key(k, encoding), normalize_header_value(v, encoding))\n for k, v in headers\n ]\n self._encoding = encoding\n\n @property\n def encoding(self) -> str:\n \"\"\"\n Header encoding is mandated as ascii, but we allow fallbacks to utf-8\n or iso-8859-1.\n \"\"\"\n if self._encoding is None:\n for encoding in [\"ascii\", \"utf-8\"]:\n for key, value in self.raw:\n try:\n key.decode(encoding)\n value.decode(encoding)\n except UnicodeDecodeError:\n break\n else:\n # The else block runs if 'break' did not occur, meaning\n # all values fitted the encoding.\n self._encoding = encoding\n break\n else:\n # The ISO-8859-1 encoding covers all 256 code points in a byte,\n # so will never raise decode errors.\n self._encoding = \"iso-8859-1\"\n return self._encoding\n\n @encoding.setter\n def encoding(self, value: str) -> None:\n self._encoding = value\n\n @property\n def raw(self) -> typing.List[typing.Tuple[bytes, bytes]]:\n \"\"\"\n Returns a list of the raw header items, as byte pairs.\n May be mutated in-place.\n \"\"\"\n return self._list\n\n def keys(self) -> typing.List[str]: # type: ignore\n return [key.decode(self.encoding) for key, value in self._list]\n\n def values(self) -> typing.List[str]: # type: ignore\n return [value.decode(self.encoding) for key, value in self._list]\n\n def items(self) -> typing.List[typing.Tuple[str, str]]: # type: ignore\n return [\n (key.decode(self.encoding), value.decode(self.encoding))\n for key, value in self._list\n ]\n\n def get(self, key: str, default: typing.Any = None) -> typing.Any:\n try:\n return self[key]\n except KeyError:\n return default\n\n def getlist(self, key: str, split_commas: bool = False) -> typing.List[str]:\n \"\"\"\n Return multiple header values.\n \"\"\"\n get_header_key = key.lower().encode(self.encoding)\n\n values = [\n item_value.decode(self.encoding)\n for item_key, item_value in self._list\n if item_key == get_header_key\n ]\n\n if not split_commas:\n return values\n\n split_values = []\n for value in values:\n split_values.extend([item.strip() for item in value.split(\",\")])\n return split_values\n\n def __getitem__(self, key: str) -> str:\n \"\"\"\n Return a single header value.\n\n If there are multiple headers with the same key, then we concatenate\n them with commas. See: https://tools.ietf.org/html/rfc7230#section-3.2.2\n \"\"\"\n normalized_key = key.lower().encode(self.encoding)\n\n items = []\n for header_key, header_value in self._list:\n if header_key == normalized_key:\n items.append(header_value.decode(self.encoding))\n\n if items:\n return \", \".join(items)\n\n raise KeyError(key)\n\n def __setitem__(self, key: str, value: str) -> None:\n \"\"\"\n Set the header `key` to `value`, removing any duplicate entries.\n Retains insertion order.\n \"\"\"\n set_key = key.lower().encode(self.encoding)\n set_value = value.encode(self.encoding)\n\n found_indexes = []\n for idx, (item_key, item_value) in enumerate(self._list):\n if item_key == set_key:\n found_indexes.append(idx)\n\n for idx in reversed(found_indexes[1:]):\n del self._list[idx]\n\n if found_indexes:\n idx = found_indexes[0]\n self._list[idx] = (set_key, set_value)\n else:\n self._list.append((set_key, set_value))\n\n def __delitem__(self, key: str) -> None:\n \"\"\"\n Remove the header `key`.\n \"\"\"\n del_key = key.lower().encode(self.encoding)\n\n pop_indexes = []\n for idx, (item_key, item_value) in enumerate(self._list):\n if item_key == del_key:\n pop_indexes.append(idx)\n\n for idx in reversed(pop_indexes):\n del self._list[idx]\n\n def __contains__(self, key: typing.Any) -> bool:\n get_header_key = key.lower().encode(self.encoding)\n for header_key, header_value in self._list:\n if header_key == get_header_key:\n return True\n return False\n\n def __iter__(self) -> typing.Iterator[typing.Any]:\n return iter(self.keys())\n\n def __len__(self) -> int:\n return len(self._list)\n\n def __eq__(self, other: typing.Any) -> bool:\n if not isinstance(other, Headers):\n return False\n return sorted(self._list) == sorted(other._list)\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n\n encoding_str = \"\"\n if self.encoding != \"ascii\":\n encoding_str = f\", encoding={self.encoding!r}\"\n\n as_dict = dict(self.items())\n if len(as_dict) == len(self):\n return f\"{class_name}({as_dict!r}{encoding_str})\"\n as_list = self.items()\n return f\"{class_name}({as_list!r}{encoding_str})\"\n\n\nclass BaseRequest:\n def __init__(\n self,\n method: str,\n url: typing.Union[str, URL],\n *,\n params: QueryParamTypes = None,\n headers: HeaderTypes = None,\n cookies: CookieTypes = None,\n ):\n self.method = method.upper()\n self.url = URL(url, params=params)\n self.headers = Headers(headers)\n if cookies:\n self._cookies = Cookies(cookies)\n self._cookies.set_cookie_header(self)\n\n def encode_data(\n self, data: dict = None, files: RequestFiles = None, json: typing.Any = None\n ) -> typing.Tuple[bytes, str]:\n if json is not None:\n content = jsonlib.dumps(json).encode(\"utf-8\")\n content_type = \"application/json\"\n elif files is not None:\n content, content_type = multipart_encode(data or {}, files)\n elif data is not None:\n content = urlencode(data, doseq=True).encode(\"utf-8\")\n content_type = \"application/x-www-form-urlencoded\"\n else:\n content = b\"\"\n content_type = \"\"\n return content, content_type\n\n def prepare(self) -> None:\n content = getattr(self, \"content\", None) # type: bytes\n is_streaming = getattr(self, \"is_streaming\", False)\n\n auto_headers = [] # type: typing.List[typing.Tuple[bytes, bytes]]\n\n has_host = \"host\" in self.headers\n has_user_agent = \"user-agent\" in self.headers\n has_accept = \"accept\" in self.headers\n has_content_length = (\n \"content-length\" in self.headers or \"transfer-encoding\" in self.headers\n )\n has_accept_encoding = \"accept-encoding\" in self.headers\n has_connection = \"connection\" in self.headers\n\n if not has_host:\n auto_headers.append((b\"host\", self.url.authority.encode(\"ascii\")))\n if not has_user_agent:\n auto_headers.append((b\"user-agent\", USER_AGENT.encode(\"ascii\")))\n if not has_accept:\n auto_headers.append((b\"accept\", b\"*/*\"))\n if not has_content_length:\n if is_streaming:\n auto_headers.append((b\"transfer-encoding\", b\"chunked\"))\n elif content:\n content_length = str(len(content)).encode()\n auto_headers.append((b\"content-length\", content_length))\n if not has_accept_encoding:\n auto_headers.append((b\"accept-encoding\", ACCEPT_ENCODING.encode()))\n if not has_connection:\n auto_headers.append((b\"connection\", b\"keep-alive\"))\n\n for item in reversed(auto_headers):\n self.headers.raw.insert(0, item)\n\n @property\n def cookies(self) -> \"Cookies\":\n if not hasattr(self, \"_cookies\"):\n self._cookies = Cookies()\n return self._cookies\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n url = str(self.url)\n return f\"<{class_name}({self.method!r}, {url!r})>\"\n\n\nclass AsyncRequest(BaseRequest):\n def __init__(\n self,\n method: str,\n url: typing.Union[str, URL],\n *,\n params: QueryParamTypes = None,\n headers: HeaderTypes = None,\n cookies: CookieTypes = None,\n data: AsyncRequestData = None,\n files: RequestFiles = None,\n json: typing.Any = None,\n ):\n super().__init__(\n method=method, url=url, params=params, headers=headers, cookies=cookies\n )\n\n if data is None or isinstance(data, dict):\n content, content_type = self.encode_data(data, files, json)\n self.is_streaming = False\n self.content = content\n if content_type:\n self.headers[\"Content-Type\"] = content_type\n elif isinstance(data, (str, bytes)):\n data = data.encode(\"utf-8\") if isinstance(data, str) else data\n self.is_streaming = False\n self.content = data\n else:\n assert hasattr(data, \"__aiter__\")\n self.is_streaming = True\n self.content_aiter = data\n\n self.prepare()\n\n async def read(self) -> bytes:\n \"\"\"\n Read and return the response content.\n \"\"\"\n if not hasattr(self, \"content\"):\n self.content = b\"\".join([part async for part in self.stream()])\n return self.content\n\n async def stream(self) -> typing.AsyncIterator[bytes]:\n if self.is_streaming:\n async for part in self.content_aiter:\n yield part\n elif self.content:\n yield self.content\n\n\nclass Request(BaseRequest):\n def __init__(\n self,\n method: str,\n url: typing.Union[str, URL],\n *,\n params: QueryParamTypes = None,\n headers: HeaderTypes = None,\n cookies: CookieTypes = None,\n data: RequestData = None,\n files: RequestFiles = None,\n json: typing.Any = None,\n ):\n super().__init__(\n method=method, url=url, params=params, headers=headers, cookies=cookies\n )\n\n if data is None or isinstance(data, dict):\n content, content_type = self.encode_data(data, files, json)\n self.is_streaming = False\n self.content = content\n if content_type:\n self.headers[\"Content-Type\"] = content_type\n elif isinstance(data, (str, bytes)):\n data = data.encode(\"utf-8\") if isinstance(data, str) else data\n self.is_streaming = False\n self.content = data\n else:\n assert hasattr(data, \"__iter__\")\n self.is_streaming = True\n self.content_iter = data\n\n self.prepare()\n\n def read(self) -> bytes:\n if not hasattr(self, \"content\"):\n self.content = b\"\".join([part for part in self.stream()])\n return self.content\n\n def stream(self) -> typing.Iterator[bytes]:\n if self.is_streaming:\n for part in self.content_iter:\n yield part\n elif self.content:\n yield self.content\n\n\nclass BaseResponse:\n def __init__(\n self,\n status_code: int,\n *,\n protocol: str = None,\n headers: HeaderTypes = None,\n request: BaseRequest = None,\n on_close: typing.Callable = None,\n ):\n self.status_code = status_code\n self.protocol = protocol\n self.headers = Headers(headers)\n\n self.request = request\n self.on_close = on_close\n self.next = None # typing.Optional[typing.Callable]\n\n @property\n def reason_phrase(self) -> str:\n return StatusCode.get_reason_phrase(self.status_code)\n\n @property\n def url(self) -> typing.Optional[URL]:\n \"\"\"\n Returns the URL for which the request was made.\n\n Requires that `request` was provided when instantiating the response.\n \"\"\"\n return None if self.request is None else self.request.url\n\n @property\n def content(self) -> bytes:\n if not hasattr(self, \"_content\"):\n if hasattr(self, \"_raw_content\"):\n raw_content = getattr(self, \"_raw_content\") # type: bytes\n content = self.decoder.decode(raw_content)\n content += self.decoder.flush()\n self._content = content\n else:\n raise ResponseNotRead()\n return self._content\n\n @property\n def text(self) -> str:\n if not hasattr(self, \"_text\"):\n content = self.content\n if not content:\n self._text = \"\"\n else:\n encoding = self.encoding\n self._text = content.decode(encoding, errors=\"replace\")\n return self._text\n\n @property\n def encoding(self) -> str:\n if not hasattr(self, \"_encoding\"):\n encoding = self.charset_encoding\n if encoding is None or not is_known_encoding(encoding):\n encoding = self.apparent_encoding\n if encoding is None or not is_known_encoding(encoding):\n encoding = \"utf-8\"\n self._encoding = encoding\n return self._encoding\n\n @encoding.setter\n def encoding(self, value: str) -> None:\n self._encoding = value\n\n @property\n def charset_encoding(self) -> typing.Optional[str]:\n \"\"\"\n Return the encoding, as specified by the Content-Type header.\n \"\"\"\n content_type = self.headers.get(\"Content-Type\")\n if content_type is None:\n return None\n\n parsed = cgi.parse_header(content_type)\n media_type, params = parsed[0], parsed[-1]\n if \"charset\" in params:\n return params[\"charset\"].strip(\"'\\\"\")\n\n # RFC 2616 specifies that 'iso-8859-1' should be used as the default\n # for 'text/*' media types, if no charset is provided.\n # See: https://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1\n if media_type.startswith(\"text/\"):\n return \"iso-8859-1\"\n\n return None\n\n @property\n def apparent_encoding(self) -> typing.Optional[str]:\n \"\"\"\n Return the encoding, as it appears to autodetection.\n \"\"\"\n return chardet.detect(self.content)[\"encoding\"]\n\n @property\n def decoder(self) -> Decoder:\n \"\"\"\n Returns a decoder instance which can be used to decode the raw byte\n content, depending on the Content-Encoding used in the response.\n \"\"\"\n if not hasattr(self, \"_decoder\"):\n decoders = [] # type: typing.List[Decoder]\n values = self.headers.getlist(\"content-encoding\", split_commas=True)\n for value in values:\n value = value.strip().lower()\n decoder_cls = SUPPORTED_DECODERS[value]\n decoders.append(decoder_cls())\n\n if len(decoders) == 1:\n self._decoder = decoders[0]\n elif len(decoders) > 1:\n self._decoder = MultiDecoder(decoders)\n else:\n self._decoder = IdentityDecoder()\n\n return self._decoder\n\n @property\n def is_redirect(self) -> bool:\n return StatusCode.is_redirect(self.status_code) and \"location\" in self.headers\n\n def raise_for_status(self) -> None:\n \"\"\"\n Raise the `HttpError` if one occurred.\n \"\"\"\n message = (\n \"{0.status_code} {error_type}: {0.reason_phrase} for url: {0.url}\\n\"\n \"For more information check: https://httpstatuses.com/{0.status_code}\"\n )\n\n if StatusCode.is_client_error(self.status_code):\n message = message.format(self, error_type=\"Client Error\")\n elif StatusCode.is_server_error(self.status_code):\n message = message.format(self, error_type=\"Server Error\")\n else:\n message = \"\"\n\n if message:\n raise HttpError(message)\n\n def json(self, **kwargs: typing.Any) -> typing.Union[dict, list]:\n if self.charset_encoding is None and self.content and len(self.content) > 3:\n encoding = guess_json_utf(self.content)\n if encoding is not None:\n try:\n return jsonlib.loads(self.content.decode(encoding), **kwargs)\n except UnicodeDecodeError:\n pass\n return jsonlib.loads(self.text, **kwargs)\n\n @property\n def cookies(self) -> \"Cookies\":\n if not hasattr(self, \"_cookies\"):\n assert self.request is not None\n self._cookies = Cookies()\n self._cookies.extract_cookies(self)\n return self._cookies\n\n def __repr__(self) -> str:\n return f\"<Response [{self.status_code} {self.reason_phrase}]>\"\n\n\nclass AsyncResponse(BaseResponse):\n def __init__(\n self,\n status_code: int,\n *,\n protocol: str = None,\n headers: HeaderTypes = None,\n content: AsyncResponseContent = None,\n on_close: typing.Callable = None,\n request: AsyncRequest = None,\n history: typing.List[\"BaseResponse\"] = None,\n ):\n super().__init__(\n status_code=status_code,\n protocol=protocol,\n headers=headers,\n request=request,\n on_close=on_close,\n )\n\n self.history = [] if history is None else list(history)\n\n if content is None or isinstance(content, bytes):\n self.is_closed = True\n self.is_stream_consumed = True\n self._raw_content = content or b\"\"\n else:\n self.is_closed = False\n self.is_stream_consumed = False\n self._raw_stream = content\n\n async def read(self) -> bytes:\n \"\"\"\n Read and return the response content.\n \"\"\"\n if not hasattr(self, \"_content\"):\n self._content = b\"\".join([part async for part in self.stream()])\n return self._content\n\n async def stream(self) -> typing.AsyncIterator[bytes]:\n \"\"\"\n A byte-iterator over the decoded response content.\n This allows us to handle gzip, deflate, and brotli encoded responses.\n \"\"\"\n if hasattr(self, \"_content\"):\n yield self._content\n else:\n async for chunk in self.raw():\n yield self.decoder.decode(chunk)\n yield self.decoder.flush()\n\n async def raw(self) -> typing.AsyncIterator[bytes]:\n \"\"\"\n A byte-iterator over the raw response content.\n \"\"\"\n if hasattr(self, \"_raw_content\"):\n yield self._raw_content\n else:\n if self.is_stream_consumed:\n raise StreamConsumed()\n if self.is_closed:\n raise ResponseClosed()\n\n self.is_stream_consumed = True\n async for part in self._raw_stream:\n yield part\n await self.close()\n\n async def close(self) -> None:\n \"\"\"\n Close the response and release the connection.\n Automatically called if the response body is read to completion.\n \"\"\"\n if not self.is_closed:\n self.is_closed = True\n if self.on_close is not None:\n await self.on_close()\n\n\nclass Response(BaseResponse):\n def __init__(\n self,\n status_code: int,\n *,\n protocol: str = None,\n headers: HeaderTypes = None,\n content: ResponseContent = None,\n on_close: typing.Callable = None,\n request: Request = None,\n history: typing.List[\"BaseResponse\"] = None,\n ):\n super().__init__(\n status_code=status_code,\n protocol=protocol,\n headers=headers,\n request=request,\n on_close=on_close,\n )\n\n self.history = [] if history is None else list(history)\n\n if content is None or isinstance(content, bytes):\n self.is_closed = True\n self.is_stream_consumed = True\n self._raw_content = content or b\"\"\n else:\n self.is_closed = False\n self.is_stream_consumed = False\n self._raw_stream = content\n\n def read(self) -> bytes:\n \"\"\"\n Read and return the response content.\n \"\"\"\n if not hasattr(self, \"_content\"):\n self._content = b\"\".join([part for part in self.stream()])\n return self._content\n\n def stream(self) -> typing.Iterator[bytes]:\n \"\"\"\n A byte-iterator over the decoded response content.\n This allows us to handle gzip, deflate, and brotli encoded responses.\n \"\"\"\n if hasattr(self, \"_content\"):\n yield self._content\n else:\n for chunk in self.raw():\n yield self.decoder.decode(chunk)\n yield self.decoder.flush()\n\n def raw(self) -> typing.Iterator[bytes]:\n \"\"\"\n A byte-iterator over the raw response content.\n \"\"\"\n if hasattr(self, \"_raw_content\"):\n yield self._raw_content\n else:\n if self.is_stream_consumed:\n raise StreamConsumed()\n if self.is_closed:\n raise ResponseClosed()\n\n self.is_stream_consumed = True\n for part in self._raw_stream:\n yield part\n self.close()\n\n def close(self) -> None:\n \"\"\"\n Close the response and release the connection.\n Automatically called if the response body is read to completion.\n \"\"\"\n if not self.is_closed:\n self.is_closed = True\n if self.on_close is not None:\n self.on_close()\n\n\nclass Cookies(MutableMapping):\n \"\"\"\n HTTP Cookies, as a mutable mapping.\n \"\"\"\n\n def __init__(self, cookies: CookieTypes = None) -> None:\n if cookies is None or isinstance(cookies, dict):\n self.jar = CookieJar()\n if isinstance(cookies, dict):\n for key, value in cookies.items():\n self.set(key, value)\n elif isinstance(cookies, Cookies):\n self.jar = CookieJar()\n for cookie in cookies.jar:\n self.jar.set_cookie(cookie)\n else:\n self.jar = cookies\n\n def extract_cookies(self, response: BaseResponse) -> None:\n \"\"\"\n Loads any cookies based on the response `Set-Cookie` headers.\n \"\"\"\n assert response.request is not None\n urlib_response = self._CookieCompatResponse(response)\n urllib_request = self._CookieCompatRequest(response.request)\n\n self.jar.extract_cookies(urlib_response, urllib_request) # type: ignore\n\n def set_cookie_header(self, request: BaseRequest) -> None:\n \"\"\"\n Sets an appropriate 'Cookie:' HTTP header on the `Request`.\n \"\"\"\n urllib_request = self._CookieCompatRequest(request)\n self.jar.add_cookie_header(urllib_request)\n\n def set(self, name: str, value: str, domain: str = \"\", path: str = \"/\") -> None:\n \"\"\"\n Set a cookie value by name. May optionally include domain and path.\n \"\"\"\n kwargs = dict(\n version=0,\n name=name,\n value=value,\n port=None,\n port_specified=False,\n domain=domain,\n domain_specified=bool(domain),\n domain_initial_dot=domain.startswith(\".\"),\n path=path,\n path_specified=bool(path),\n secure=False,\n expires=None,\n discard=True,\n comment=None,\n comment_url=None,\n rest={\"HttpOnly\": None},\n rfc2109=False,\n )\n cookie = Cookie(**kwargs) # type: ignore\n self.jar.set_cookie(cookie)\n\n def get( # type: ignore\n self, name: str, default: str = None, domain: str = None, path: str = None\n ) -> typing.Optional[str]:\n \"\"\"\n Get a cookie by name. May optionally include domain and path\n in order to specify exactly which cookie to retrieve.\n \"\"\"\n value = None\n for cookie in self.jar:\n if cookie.name == name:\n if domain is None or cookie.domain == domain: # type: ignore\n if path is None or cookie.path == path:\n if value is not None:\n message = f\"Multiple cookies exist with name={name}\"\n raise CookieConflict(message)\n value = cookie.value\n\n if value is None:\n return default\n return value\n\n def delete(self, name: str, domain: str = None, path: str = None) -> None:\n \"\"\"\n Delete a cookie by name. May optionally include domain and path\n in order to specify exactly which cookie to delete.\n \"\"\"\n if domain is not None and path is not None:\n return self.jar.clear(domain, path, name)\n\n remove = []\n for cookie in self.jar:\n if cookie.name == name:\n if domain is None or cookie.domain == domain: # type: ignore\n if path is None or cookie.path == path:\n remove.append(cookie)\n\n for cookie in remove:\n self.jar.clear(cookie.domain, cookie.path, cookie.name) # type: ignore\n\n def clear(self, domain: str = None, path: str = None) -> None:\n \"\"\"\n Delete all cookies. Optionally include a domain and path in\n order to only delete a subset of all the cookies.\n \"\"\"\n args = []\n if domain is not None:\n args.append(domain)\n if path is not None:\n assert domain is not None\n args.append(path)\n self.jar.clear(*args)\n\n def update(self, cookies: CookieTypes = None) -> None: # type: ignore\n cookies = Cookies(cookies)\n for cookie in cookies.jar:\n self.jar.set_cookie(cookie)\n\n def __setitem__(self, name: str, value: str) -> None:\n return self.set(name, value)\n\n def __getitem__(self, name: str) -> str:\n value = self.get(name)\n if value is None:\n raise KeyError(name)\n return value\n\n def __delitem__(self, name: str) -> None:\n return self.delete(name)\n\n def __len__(self) -> int:\n return len(self.jar)\n\n def __iter__(self) -> typing.Iterator[str]:\n return (cookie.name for cookie in self.jar)\n\n def __bool__(self) -> bool:\n for cookie in self.jar:\n return True\n return False\n\n class _CookieCompatRequest(urllib.request.Request):\n \"\"\"\n Wraps a `Request` instance up in a compatability interface suitable\n for use with `CookieJar` operations.\n \"\"\"\n\n def __init__(self, request: BaseRequest) -> None:\n super().__init__(\n url=str(request.url),\n headers=dict(request.headers),\n method=request.method,\n )\n self.request = request\n\n def add_unredirected_header(self, key: str, value: str) -> None:\n super().add_unredirected_header(key, value)\n self.request.headers[key] = value\n\n class _CookieCompatResponse:\n \"\"\"\n Wraps a `Request` instance up in a compatability interface suitable\n for use with `CookieJar` operations.\n \"\"\"\n\n def __init__(self, response: BaseResponse):\n self.response = response\n\n def info(self) -> email.message.Message:\n info = email.message.Message()\n for key, value in self.response.headers.items():\n info[key] = value\n return info\n",
"path": "httpx/models.py"
}
] | [
{
"content": "import cgi\nimport email.message\nimport json as jsonlib\nimport typing\nimport urllib.request\nfrom collections.abc import MutableMapping\nfrom http.cookiejar import Cookie, CookieJar\nfrom urllib.parse import parse_qsl, urlencode\n\nimport chardet\nimport rfc3986\n\nfrom .config import USER_AGENT\nfrom .decoders import (\n ACCEPT_ENCODING,\n SUPPORTED_DECODERS,\n Decoder,\n IdentityDecoder,\n MultiDecoder,\n)\nfrom .exceptions import (\n CookieConflict,\n HttpError,\n InvalidURL,\n ResponseClosed,\n ResponseNotRead,\n StreamConsumed,\n)\nfrom .multipart import multipart_encode\nfrom .status_codes import StatusCode\nfrom .utils import (\n guess_json_utf,\n is_known_encoding,\n normalize_header_key,\n normalize_header_value,\n)\n\nURLTypes = typing.Union[\"URL\", str]\n\nQueryParamTypes = typing.Union[\n \"QueryParams\",\n typing.Mapping[str, str],\n typing.List[typing.Tuple[typing.Any, typing.Any]],\n str,\n]\n\nHeaderTypes = typing.Union[\n \"Headers\",\n typing.Dict[typing.AnyStr, typing.AnyStr],\n typing.List[typing.Tuple[typing.AnyStr, typing.AnyStr]],\n]\n\nCookieTypes = typing.Union[\"Cookies\", CookieJar, typing.Dict[str, str]]\n\nAuthTypes = typing.Union[\n typing.Tuple[typing.Union[str, bytes], typing.Union[str, bytes]],\n typing.Callable[[\"AsyncRequest\"], \"AsyncRequest\"],\n]\n\nAsyncRequestData = typing.Union[dict, str, bytes, typing.AsyncIterator[bytes]]\n\nRequestData = typing.Union[dict, str, bytes, typing.Iterator[bytes]]\n\nRequestFiles = typing.Dict[\n str,\n typing.Union[\n typing.IO[typing.AnyStr], # file\n typing.Tuple[str, typing.IO[typing.AnyStr]], # (filename, file)\n typing.Tuple[\n str, typing.IO[typing.AnyStr], str\n ], # (filename, file, content_type)\n ],\n]\n\nAsyncResponseContent = typing.Union[bytes, typing.AsyncIterator[bytes]]\n\nResponseContent = typing.Union[bytes, typing.Iterator[bytes]]\n\n\nclass URL:\n def __init__(\n self,\n url: URLTypes,\n allow_relative: bool = False,\n params: QueryParamTypes = None,\n ) -> None:\n if isinstance(url, rfc3986.uri.URIReference):\n self.components = url\n elif isinstance(url, str):\n self.components = rfc3986.api.uri_reference(url)\n else:\n self.components = url.components\n\n # Handle IDNA domain names.\n if self.components.authority:\n idna_authority = self.components.authority.encode(\"idna\").decode(\"ascii\")\n if idna_authority != self.components.authority:\n self.components = self.components.copy_with(authority=idna_authority)\n\n # Normalize scheme and domain name.\n self.components = self.components.normalize()\n\n # Add any query parameters.\n if params:\n query_string = str(QueryParams(params))\n self.components = self.components.copy_with(query=query_string)\n\n # Enforce absolute URLs by default.\n if not allow_relative:\n if not self.scheme:\n raise InvalidURL(\"No scheme included in URL.\")\n if not self.host:\n raise InvalidURL(\"No host included in URL.\")\n\n @property\n def scheme(self) -> str:\n return self.components.scheme or \"\"\n\n @property\n def authority(self) -> str:\n return self.components.authority or \"\"\n\n @property\n def username(self) -> str:\n userinfo = self.components.userinfo or \"\"\n return userinfo.partition(\":\")[0]\n\n @property\n def password(self) -> str:\n userinfo = self.components.userinfo or \"\"\n return userinfo.partition(\":\")[2]\n\n @property\n def host(self) -> str:\n return self.components.host or \"\"\n\n @property\n def port(self) -> int:\n port = self.components.port\n if port is None:\n return {\"https\": 443, \"http\": 80}[self.scheme]\n return int(port)\n\n @property\n def path(self) -> str:\n return self.components.path or \"/\"\n\n @property\n def query(self) -> str:\n return self.components.query or \"\"\n\n @property\n def full_path(self) -> str:\n path = self.path\n if self.query:\n path += \"?\" + self.query\n return path\n\n @property\n def fragment(self) -> str:\n return self.components.fragment or \"\"\n\n @property\n def is_ssl(self) -> bool:\n return self.components.scheme == \"https\"\n\n @property\n def is_absolute_url(self) -> bool:\n \"\"\"\n Return `True` for absolute URLs such as 'http://example.com/path',\n and `False` for relative URLs such as '/path'.\n \"\"\"\n # We don't use rfc3986's `is_absolute` because it treats\n # URLs with a fragment portion as not absolute.\n # What we actually care about is if the URL provides\n # a scheme and hostname to which connections should be made.\n return self.components.scheme and self.components.host\n\n @property\n def is_relative_url(self) -> bool:\n return not self.is_absolute_url\n\n @property\n def origin(self) -> \"Origin\":\n return Origin(self)\n\n def copy_with(self, **kwargs: typing.Any) -> \"URL\":\n return URL(self.components.copy_with(**kwargs))\n\n def join(self, relative_url: URLTypes) -> \"URL\":\n \"\"\"\n Return an absolute URL, using given this URL as the base.\n \"\"\"\n if self.is_relative_url:\n return URL(relative_url)\n\n # We drop any fragment portion, because RFC 3986 strictly\n # treats URLs with a fragment portion as not being absolute URLs.\n base_components = self.components.copy_with(fragment=None)\n relative_url = URL(relative_url, allow_relative=True)\n return URL(relative_url.components.resolve_with(base_components))\n\n def __hash__(self) -> int:\n return hash(str(self))\n\n def __eq__(self, other: typing.Any) -> bool:\n return isinstance(other, (URL, str)) and str(self) == str(other)\n\n def __str__(self) -> str:\n return self.components.unsplit()\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n url_str = str(self)\n return f\"{class_name}({url_str!r})\"\n\n\nclass Origin:\n \"\"\"\n The URL scheme and authority information, as a comparable, hashable object.\n \"\"\"\n\n def __init__(self, url: URLTypes) -> None:\n if not isinstance(url, URL):\n url = URL(url)\n self.is_ssl = url.is_ssl\n self.host = url.host\n self.port = url.port\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, self.__class__)\n and self.is_ssl == other.is_ssl\n and self.host == other.host\n and self.port == other.port\n )\n\n def __hash__(self) -> int:\n return hash((self.is_ssl, self.host, self.port))\n\n\nclass QueryParams(typing.Mapping[str, str]):\n \"\"\"\n URL query parameters, as a multi-dict.\n \"\"\"\n\n def __init__(self, *args: QueryParamTypes, **kwargs: typing.Any) -> None:\n assert len(args) < 2, \"Too many arguments.\"\n assert not (args and kwargs), \"Cannot mix named and unnamed arguments.\"\n\n value = args[0] if args else kwargs\n\n if isinstance(value, str):\n items = parse_qsl(value)\n elif isinstance(value, QueryParams):\n items = value.multi_items()\n elif isinstance(value, list):\n items = value\n else:\n items = value.items() # type: ignore\n\n self._list = [(str(k), str(v)) for k, v in items]\n self._dict = {str(k): str(v) for k, v in items}\n\n def getlist(self, key: typing.Any) -> typing.List[str]:\n return [item_value for item_key, item_value in self._list if item_key == key]\n\n def keys(self) -> typing.KeysView:\n return self._dict.keys()\n\n def values(self) -> typing.ValuesView:\n return self._dict.values()\n\n def items(self) -> typing.ItemsView:\n return self._dict.items()\n\n def multi_items(self) -> typing.List[typing.Tuple[str, str]]:\n return list(self._list)\n\n def get(self, key: typing.Any, default: typing.Any = None) -> typing.Any:\n if key in self._dict:\n return self._dict[key]\n return default\n\n def __getitem__(self, key: typing.Any) -> str:\n return self._dict[key]\n\n def __contains__(self, key: typing.Any) -> bool:\n return key in self._dict\n\n def __iter__(self) -> typing.Iterator[typing.Any]:\n return iter(self.keys())\n\n def __len__(self) -> int:\n return len(self._dict)\n\n def __eq__(self, other: typing.Any) -> bool:\n if not isinstance(other, self.__class__):\n return False\n return sorted(self._list) == sorted(other._list)\n\n def __str__(self) -> str:\n return urlencode(self._list)\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n query_string = str(self)\n return f\"{class_name}({query_string!r})\"\n\n\nclass Headers(typing.MutableMapping[str, str]):\n \"\"\"\n HTTP headers, as a case-insensitive multi-dict.\n \"\"\"\n\n def __init__(self, headers: HeaderTypes = None, encoding: str = None) -> None:\n if headers is None:\n self._list = [] # type: typing.List[typing.Tuple[bytes, bytes]]\n elif isinstance(headers, Headers):\n self._list = list(headers.raw)\n elif isinstance(headers, dict):\n self._list = [\n (normalize_header_key(k, encoding), normalize_header_value(v, encoding))\n for k, v in headers.items()\n ]\n else:\n self._list = [\n (normalize_header_key(k, encoding), normalize_header_value(v, encoding))\n for k, v in headers\n ]\n self._encoding = encoding\n\n @property\n def encoding(self) -> str:\n \"\"\"\n Header encoding is mandated as ascii, but we allow fallbacks to utf-8\n or iso-8859-1.\n \"\"\"\n if self._encoding is None:\n for encoding in [\"ascii\", \"utf-8\"]:\n for key, value in self.raw:\n try:\n key.decode(encoding)\n value.decode(encoding)\n except UnicodeDecodeError:\n break\n else:\n # The else block runs if 'break' did not occur, meaning\n # all values fitted the encoding.\n self._encoding = encoding\n break\n else:\n # The ISO-8859-1 encoding covers all 256 code points in a byte,\n # so will never raise decode errors.\n self._encoding = \"iso-8859-1\"\n return self._encoding\n\n @encoding.setter\n def encoding(self, value: str) -> None:\n self._encoding = value\n\n @property\n def raw(self) -> typing.List[typing.Tuple[bytes, bytes]]:\n \"\"\"\n Returns a list of the raw header items, as byte pairs.\n May be mutated in-place.\n \"\"\"\n return self._list\n\n def keys(self) -> typing.List[str]: # type: ignore\n return [key.decode(self.encoding) for key, value in self._list]\n\n def values(self) -> typing.List[str]: # type: ignore\n return [value.decode(self.encoding) for key, value in self._list]\n\n def items(self) -> typing.List[typing.Tuple[str, str]]: # type: ignore\n return [\n (key.decode(self.encoding), value.decode(self.encoding))\n for key, value in self._list\n ]\n\n def get(self, key: str, default: typing.Any = None) -> typing.Any:\n try:\n return self[key]\n except KeyError:\n return default\n\n def getlist(self, key: str, split_commas: bool = False) -> typing.List[str]:\n \"\"\"\n Return multiple header values.\n \"\"\"\n get_header_key = key.lower().encode(self.encoding)\n\n values = [\n item_value.decode(self.encoding)\n for item_key, item_value in self._list\n if item_key == get_header_key\n ]\n\n if not split_commas:\n return values\n\n split_values = []\n for value in values:\n split_values.extend([item.strip() for item in value.split(\",\")])\n return split_values\n\n def __getitem__(self, key: str) -> str:\n \"\"\"\n Return a single header value.\n\n If there are multiple headers with the same key, then we concatenate\n them with commas. See: https://tools.ietf.org/html/rfc7230#section-3.2.2\n \"\"\"\n normalized_key = key.lower().encode(self.encoding)\n\n items = []\n for header_key, header_value in self._list:\n if header_key == normalized_key:\n items.append(header_value.decode(self.encoding))\n\n if items:\n return \", \".join(items)\n\n raise KeyError(key)\n\n def __setitem__(self, key: str, value: str) -> None:\n \"\"\"\n Set the header `key` to `value`, removing any duplicate entries.\n Retains insertion order.\n \"\"\"\n set_key = key.lower().encode(self.encoding)\n set_value = value.encode(self.encoding)\n\n found_indexes = []\n for idx, (item_key, item_value) in enumerate(self._list):\n if item_key == set_key:\n found_indexes.append(idx)\n\n for idx in reversed(found_indexes[1:]):\n del self._list[idx]\n\n if found_indexes:\n idx = found_indexes[0]\n self._list[idx] = (set_key, set_value)\n else:\n self._list.append((set_key, set_value))\n\n def __delitem__(self, key: str) -> None:\n \"\"\"\n Remove the header `key`.\n \"\"\"\n del_key = key.lower().encode(self.encoding)\n\n pop_indexes = []\n for idx, (item_key, item_value) in enumerate(self._list):\n if item_key == del_key:\n pop_indexes.append(idx)\n\n for idx in reversed(pop_indexes):\n del self._list[idx]\n\n def __contains__(self, key: typing.Any) -> bool:\n get_header_key = key.lower().encode(self.encoding)\n for header_key, header_value in self._list:\n if header_key == get_header_key:\n return True\n return False\n\n def __iter__(self) -> typing.Iterator[typing.Any]:\n return iter(self.keys())\n\n def __len__(self) -> int:\n return len(self._list)\n\n def __eq__(self, other: typing.Any) -> bool:\n if not isinstance(other, Headers):\n return False\n return sorted(self._list) == sorted(other._list)\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n\n encoding_str = \"\"\n if self.encoding != \"ascii\":\n encoding_str = f\", encoding={self.encoding!r}\"\n\n as_dict = dict(self.items())\n if len(as_dict) == len(self):\n return f\"{class_name}({as_dict!r}{encoding_str})\"\n as_list = self.items()\n return f\"{class_name}({as_list!r}{encoding_str})\"\n\n\nclass BaseRequest:\n def __init__(\n self,\n method: str,\n url: typing.Union[str, URL],\n *,\n params: QueryParamTypes = None,\n headers: HeaderTypes = None,\n cookies: CookieTypes = None,\n ):\n self.method = method.upper()\n self.url = URL(url, params=params)\n self.headers = Headers(headers)\n if cookies:\n self._cookies = Cookies(cookies)\n self._cookies.set_cookie_header(self)\n\n def encode_data(\n self, data: dict = None, files: RequestFiles = None, json: typing.Any = None\n ) -> typing.Tuple[bytes, str]:\n if json is not None:\n content = jsonlib.dumps(json).encode(\"utf-8\")\n content_type = \"application/json\"\n elif files is not None:\n content, content_type = multipart_encode(data or {}, files)\n elif data is not None:\n content = urlencode(data, doseq=True).encode(\"utf-8\")\n content_type = \"application/x-www-form-urlencoded\"\n else:\n content = b\"\"\n content_type = \"\"\n return content, content_type\n\n def prepare(self) -> None:\n content = getattr(self, \"content\", None) # type: bytes\n is_streaming = getattr(self, \"is_streaming\", False)\n\n auto_headers = [] # type: typing.List[typing.Tuple[bytes, bytes]]\n\n has_host = \"host\" in self.headers\n has_user_agent = \"user-agent\" in self.headers\n has_accept = \"accept\" in self.headers\n has_content_length = (\n \"content-length\" in self.headers or \"transfer-encoding\" in self.headers\n )\n has_accept_encoding = \"accept-encoding\" in self.headers\n has_connection = \"connection\" in self.headers\n\n if not has_host:\n auto_headers.append((b\"host\", self.url.authority.encode(\"ascii\")))\n if not has_user_agent:\n auto_headers.append((b\"user-agent\", USER_AGENT.encode(\"ascii\")))\n if not has_accept:\n auto_headers.append((b\"accept\", b\"*/*\"))\n if not has_content_length:\n if is_streaming:\n auto_headers.append((b\"transfer-encoding\", b\"chunked\"))\n elif content:\n content_length = str(len(content)).encode()\n auto_headers.append((b\"content-length\", content_length))\n if not has_accept_encoding:\n auto_headers.append((b\"accept-encoding\", ACCEPT_ENCODING.encode()))\n if not has_connection:\n auto_headers.append((b\"connection\", b\"keep-alive\"))\n\n for item in reversed(auto_headers):\n self.headers.raw.insert(0, item)\n\n @property\n def cookies(self) -> \"Cookies\":\n if not hasattr(self, \"_cookies\"):\n self._cookies = Cookies()\n return self._cookies\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n url = str(self.url)\n return f\"<{class_name}({self.method!r}, {url!r})>\"\n\n\nclass AsyncRequest(BaseRequest):\n def __init__(\n self,\n method: str,\n url: typing.Union[str, URL],\n *,\n params: QueryParamTypes = None,\n headers: HeaderTypes = None,\n cookies: CookieTypes = None,\n data: AsyncRequestData = None,\n files: RequestFiles = None,\n json: typing.Any = None,\n ):\n super().__init__(\n method=method, url=url, params=params, headers=headers, cookies=cookies\n )\n\n if data is None or isinstance(data, dict):\n content, content_type = self.encode_data(data, files, json)\n self.is_streaming = False\n self.content = content\n if content_type:\n self.headers[\"Content-Type\"] = content_type\n elif isinstance(data, (str, bytes)):\n data = data.encode(\"utf-8\") if isinstance(data, str) else data\n self.is_streaming = False\n self.content = data\n else:\n assert hasattr(data, \"__aiter__\")\n self.is_streaming = True\n self.content_aiter = data\n\n self.prepare()\n\n async def read(self) -> bytes:\n \"\"\"\n Read and return the response content.\n \"\"\"\n if not hasattr(self, \"content\"):\n self.content = b\"\".join([part async for part in self.stream()])\n return self.content\n\n async def stream(self) -> typing.AsyncIterator[bytes]:\n if self.is_streaming:\n async for part in self.content_aiter:\n yield part\n elif self.content:\n yield self.content\n\n\nclass Request(BaseRequest):\n def __init__(\n self,\n method: str,\n url: typing.Union[str, URL],\n *,\n params: QueryParamTypes = None,\n headers: HeaderTypes = None,\n cookies: CookieTypes = None,\n data: RequestData = None,\n files: RequestFiles = None,\n json: typing.Any = None,\n ):\n super().__init__(\n method=method, url=url, params=params, headers=headers, cookies=cookies\n )\n\n if data is None or isinstance(data, dict):\n content, content_type = self.encode_data(data, files, json)\n self.is_streaming = False\n self.content = content\n if content_type:\n self.headers[\"Content-Type\"] = content_type\n elif isinstance(data, (str, bytes)):\n data = data.encode(\"utf-8\") if isinstance(data, str) else data\n self.is_streaming = False\n self.content = data\n else:\n assert hasattr(data, \"__iter__\")\n self.is_streaming = True\n self.content_iter = data\n\n self.prepare()\n\n def read(self) -> bytes:\n if not hasattr(self, \"content\"):\n self.content = b\"\".join([part for part in self.stream()])\n return self.content\n\n def stream(self) -> typing.Iterator[bytes]:\n if self.is_streaming:\n for part in self.content_iter:\n yield part\n elif self.content:\n yield self.content\n\n\nclass BaseResponse:\n def __init__(\n self,\n status_code: int,\n *,\n protocol: str = None,\n headers: HeaderTypes = None,\n request: BaseRequest = None,\n on_close: typing.Callable = None,\n ):\n self.status_code = status_code\n self.protocol = protocol\n self.headers = Headers(headers)\n\n self.request = request\n self.on_close = on_close\n self.next = None # typing.Optional[typing.Callable]\n\n @property\n def reason_phrase(self) -> str:\n return StatusCode.get_reason_phrase(self.status_code)\n\n @property\n def url(self) -> typing.Optional[URL]:\n \"\"\"\n Returns the URL for which the request was made.\n\n Requires that `request` was provided when instantiating the response.\n \"\"\"\n return None if self.request is None else self.request.url\n\n @property\n def content(self) -> bytes:\n if not hasattr(self, \"_content\"):\n if hasattr(self, \"_raw_content\"):\n raw_content = getattr(self, \"_raw_content\") # type: bytes\n content = self.decoder.decode(raw_content)\n content += self.decoder.flush()\n self._content = content\n else:\n raise ResponseNotRead()\n return self._content\n\n @property\n def text(self) -> str:\n if not hasattr(self, \"_text\"):\n content = self.content\n if not content:\n self._text = \"\"\n else:\n encoding = self.encoding\n self._text = content.decode(encoding, errors=\"replace\")\n return self._text\n\n @property\n def encoding(self) -> str:\n if not hasattr(self, \"_encoding\"):\n encoding = self.charset_encoding\n if encoding is None or not is_known_encoding(encoding):\n encoding = self.apparent_encoding\n if encoding is None or not is_known_encoding(encoding):\n encoding = \"utf-8\"\n self._encoding = encoding\n return self._encoding\n\n @encoding.setter\n def encoding(self, value: str) -> None:\n self._encoding = value\n\n @property\n def charset_encoding(self) -> typing.Optional[str]:\n \"\"\"\n Return the encoding, as specified by the Content-Type header.\n \"\"\"\n content_type = self.headers.get(\"Content-Type\")\n if content_type is None:\n return None\n\n parsed = cgi.parse_header(content_type)\n media_type, params = parsed[0], parsed[-1]\n if \"charset\" in params:\n return params[\"charset\"].strip(\"'\\\"\")\n\n # RFC 2616 specifies that 'iso-8859-1' should be used as the default\n # for 'text/*' media types, if no charset is provided.\n # See: https://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1\n if media_type.startswith(\"text/\"):\n return \"iso-8859-1\"\n\n return None\n\n @property\n def apparent_encoding(self) -> typing.Optional[str]:\n \"\"\"\n Return the encoding, as it appears to autodetection.\n \"\"\"\n return chardet.detect(self.content)[\"encoding\"]\n\n @property\n def decoder(self) -> Decoder:\n \"\"\"\n Returns a decoder instance which can be used to decode the raw byte\n content, depending on the Content-Encoding used in the response.\n \"\"\"\n if not hasattr(self, \"_decoder\"):\n decoders = [] # type: typing.List[Decoder]\n values = self.headers.getlist(\"content-encoding\", split_commas=True)\n for value in values:\n value = value.strip().lower()\n decoder_cls = SUPPORTED_DECODERS[value]\n decoders.append(decoder_cls())\n\n if len(decoders) == 1:\n self._decoder = decoders[0]\n elif len(decoders) > 1:\n self._decoder = MultiDecoder(decoders)\n else:\n self._decoder = IdentityDecoder()\n\n return self._decoder\n\n @property\n def is_redirect(self) -> bool:\n return StatusCode.is_redirect(self.status_code) and \"location\" in self.headers\n\n def raise_for_status(self) -> None:\n \"\"\"\n Raise the `HttpError` if one occurred.\n \"\"\"\n message = (\n \"{0.status_code} {error_type}: {0.reason_phrase} for url: {0.url}\\n\"\n \"For more information check: https://httpstatuses.com/{0.status_code}\"\n )\n\n if StatusCode.is_client_error(self.status_code):\n message = message.format(self, error_type=\"Client Error\")\n elif StatusCode.is_server_error(self.status_code):\n message = message.format(self, error_type=\"Server Error\")\n else:\n message = \"\"\n\n if message:\n raise HttpError(message)\n\n def json(self, **kwargs: typing.Any) -> typing.Union[dict, list]:\n if self.charset_encoding is None and self.content and len(self.content) > 3:\n encoding = guess_json_utf(self.content)\n if encoding is not None:\n try:\n return jsonlib.loads(self.content.decode(encoding), **kwargs)\n except UnicodeDecodeError:\n pass\n return jsonlib.loads(self.text, **kwargs)\n\n @property\n def cookies(self) -> \"Cookies\":\n if not hasattr(self, \"_cookies\"):\n assert self.request is not None\n self._cookies = Cookies()\n self._cookies.extract_cookies(self)\n return self._cookies\n\n def __repr__(self) -> str:\n return f\"<Response [{self.status_code} {self.reason_phrase}]>\"\n\n\nclass AsyncResponse(BaseResponse):\n def __init__(\n self,\n status_code: int,\n *,\n protocol: str = None,\n headers: HeaderTypes = None,\n content: AsyncResponseContent = None,\n on_close: typing.Callable = None,\n request: AsyncRequest = None,\n history: typing.List[\"BaseResponse\"] = None,\n ):\n super().__init__(\n status_code=status_code,\n protocol=protocol,\n headers=headers,\n request=request,\n on_close=on_close,\n )\n\n self.history = [] if history is None else list(history)\n\n if content is None or isinstance(content, bytes):\n self.is_closed = True\n self.is_stream_consumed = True\n self._raw_content = content or b\"\"\n else:\n self.is_closed = False\n self.is_stream_consumed = False\n self._raw_stream = content\n\n async def read(self) -> bytes:\n \"\"\"\n Read and return the response content.\n \"\"\"\n if not hasattr(self, \"_content\"):\n self._content = b\"\".join([part async for part in self.stream()])\n return self._content\n\n async def stream(self) -> typing.AsyncIterator[bytes]:\n \"\"\"\n A byte-iterator over the decoded response content.\n This allows us to handle gzip, deflate, and brotli encoded responses.\n \"\"\"\n if hasattr(self, \"_content\"):\n yield self._content\n else:\n async for chunk in self.raw():\n yield self.decoder.decode(chunk)\n yield self.decoder.flush()\n\n async def raw(self) -> typing.AsyncIterator[bytes]:\n \"\"\"\n A byte-iterator over the raw response content.\n \"\"\"\n if hasattr(self, \"_raw_content\"):\n yield self._raw_content\n else:\n if self.is_stream_consumed:\n raise StreamConsumed()\n if self.is_closed:\n raise ResponseClosed()\n\n self.is_stream_consumed = True\n async for part in self._raw_stream:\n yield part\n await self.close()\n\n async def close(self) -> None:\n \"\"\"\n Close the response and release the connection.\n Automatically called if the response body is read to completion.\n \"\"\"\n if not self.is_closed:\n self.is_closed = True\n if self.on_close is not None:\n await self.on_close()\n\n\nclass Response(BaseResponse):\n def __init__(\n self,\n status_code: int,\n *,\n protocol: str = None,\n headers: HeaderTypes = None,\n content: ResponseContent = None,\n on_close: typing.Callable = None,\n request: Request = None,\n history: typing.List[\"BaseResponse\"] = None,\n ):\n super().__init__(\n status_code=status_code,\n protocol=protocol,\n headers=headers,\n request=request,\n on_close=on_close,\n )\n\n self.history = [] if history is None else list(history)\n\n if content is None or isinstance(content, bytes):\n self.is_closed = True\n self.is_stream_consumed = True\n self._raw_content = content or b\"\"\n else:\n self.is_closed = False\n self.is_stream_consumed = False\n self._raw_stream = content\n\n def read(self) -> bytes:\n \"\"\"\n Read and return the response content.\n \"\"\"\n if not hasattr(self, \"_content\"):\n self._content = b\"\".join([part for part in self.stream()])\n return self._content\n\n def stream(self) -> typing.Iterator[bytes]:\n \"\"\"\n A byte-iterator over the decoded response content.\n This allows us to handle gzip, deflate, and brotli encoded responses.\n \"\"\"\n if hasattr(self, \"_content\"):\n yield self._content\n else:\n for chunk in self.raw():\n yield self.decoder.decode(chunk)\n yield self.decoder.flush()\n\n def raw(self) -> typing.Iterator[bytes]:\n \"\"\"\n A byte-iterator over the raw response content.\n \"\"\"\n if hasattr(self, \"_raw_content\"):\n yield self._raw_content\n else:\n if self.is_stream_consumed:\n raise StreamConsumed()\n if self.is_closed:\n raise ResponseClosed()\n\n self.is_stream_consumed = True\n for part in self._raw_stream:\n yield part\n self.close()\n\n def close(self) -> None:\n \"\"\"\n Close the response and release the connection.\n Automatically called if the response body is read to completion.\n \"\"\"\n if not self.is_closed:\n self.is_closed = True\n if self.on_close is not None:\n self.on_close()\n\n\nclass Cookies(MutableMapping):\n \"\"\"\n HTTP Cookies, as a mutable mapping.\n \"\"\"\n\n def __init__(self, cookies: CookieTypes = None) -> None:\n if cookies is None or isinstance(cookies, dict):\n self.jar = CookieJar()\n if isinstance(cookies, dict):\n for key, value in cookies.items():\n self.set(key, value)\n elif isinstance(cookies, Cookies):\n self.jar = CookieJar()\n for cookie in cookies.jar:\n self.jar.set_cookie(cookie)\n else:\n self.jar = cookies\n\n def extract_cookies(self, response: BaseResponse) -> None:\n \"\"\"\n Loads any cookies based on the response `Set-Cookie` headers.\n \"\"\"\n assert response.request is not None\n urlib_response = self._CookieCompatResponse(response)\n urllib_request = self._CookieCompatRequest(response.request)\n\n self.jar.extract_cookies(urlib_response, urllib_request) # type: ignore\n\n def set_cookie_header(self, request: BaseRequest) -> None:\n \"\"\"\n Sets an appropriate 'Cookie:' HTTP header on the `Request`.\n \"\"\"\n urllib_request = self._CookieCompatRequest(request)\n self.jar.add_cookie_header(urllib_request)\n\n def set(self, name: str, value: str, domain: str = \"\", path: str = \"/\") -> None:\n \"\"\"\n Set a cookie value by name. May optionally include domain and path.\n \"\"\"\n kwargs = dict(\n version=0,\n name=name,\n value=value,\n port=None,\n port_specified=False,\n domain=domain,\n domain_specified=bool(domain),\n domain_initial_dot=domain.startswith(\".\"),\n path=path,\n path_specified=bool(path),\n secure=False,\n expires=None,\n discard=True,\n comment=None,\n comment_url=None,\n rest={\"HttpOnly\": None},\n rfc2109=False,\n )\n cookie = Cookie(**kwargs) # type: ignore\n self.jar.set_cookie(cookie)\n\n def get( # type: ignore\n self, name: str, default: str = None, domain: str = None, path: str = None\n ) -> typing.Optional[str]:\n \"\"\"\n Get a cookie by name. May optionally include domain and path\n in order to specify exactly which cookie to retrieve.\n \"\"\"\n value = None\n for cookie in self.jar:\n if cookie.name == name:\n if domain is None or cookie.domain == domain: # type: ignore\n if path is None or cookie.path == path:\n if value is not None:\n message = f\"Multiple cookies exist with name={name}\"\n raise CookieConflict(message)\n value = cookie.value\n\n if value is None:\n return default\n return value\n\n def delete(self, name: str, domain: str = None, path: str = None) -> None:\n \"\"\"\n Delete a cookie by name. May optionally include domain and path\n in order to specify exactly which cookie to delete.\n \"\"\"\n if domain is not None and path is not None:\n return self.jar.clear(domain, path, name)\n\n remove = []\n for cookie in self.jar:\n if cookie.name == name:\n if domain is None or cookie.domain == domain: # type: ignore\n if path is None or cookie.path == path:\n remove.append(cookie)\n\n for cookie in remove:\n self.jar.clear(cookie.domain, cookie.path, cookie.name) # type: ignore\n\n def clear(self, domain: str = None, path: str = None) -> None:\n \"\"\"\n Delete all cookies. Optionally include a domain and path in\n order to only delete a subset of all the cookies.\n \"\"\"\n args = []\n if domain is not None:\n args.append(domain)\n if path is not None:\n assert domain is not None\n args.append(path)\n self.jar.clear(*args)\n\n def update(self, cookies: CookieTypes = None) -> None: # type: ignore\n cookies = Cookies(cookies)\n for cookie in cookies.jar:\n self.jar.set_cookie(cookie)\n\n def __setitem__(self, name: str, value: str) -> None:\n return self.set(name, value)\n\n def __getitem__(self, name: str) -> str:\n value = self.get(name)\n if value is None:\n raise KeyError(name)\n return value\n\n def __delitem__(self, name: str) -> None:\n return self.delete(name)\n\n def __len__(self) -> int:\n return len(self.jar)\n\n def __iter__(self) -> typing.Iterator[str]:\n return (cookie.name for cookie in self.jar)\n\n def __bool__(self) -> bool:\n for cookie in self.jar:\n return True\n return False\n\n class _CookieCompatRequest(urllib.request.Request):\n \"\"\"\n Wraps a `Request` instance up in a compatability interface suitable\n for use with `CookieJar` operations.\n \"\"\"\n\n def __init__(self, request: BaseRequest) -> None:\n super().__init__(\n url=str(request.url),\n headers=dict(request.headers),\n method=request.method,\n )\n self.request = request\n\n def add_unredirected_header(self, key: str, value: str) -> None:\n super().add_unredirected_header(key, value)\n self.request.headers[key] = value\n\n class _CookieCompatResponse:\n \"\"\"\n Wraps a `Request` instance up in a compatability interface suitable\n for use with `CookieJar` operations.\n \"\"\"\n\n def __init__(self, response: BaseResponse):\n self.response = response\n\n def info(self) -> email.message.Message:\n info = email.message.Message()\n for key, value in self.response.headers.items():\n info[key] = value\n return info\n",
"path": "httpx/models.py"
}
] | diff --git a/httpx/models.py b/httpx/models.py
index 710c0303df..9cbdc250e2 100644
--- a/httpx/models.py
+++ b/httpx/models.py
@@ -204,7 +204,7 @@ def __hash__(self) -> int:
return hash(str(self))
def __eq__(self, other: typing.Any) -> bool:
- return isinstance(other, URL) and str(self) == str(other)
+ return isinstance(other, (URL, str)) and str(self) == str(other)
def __str__(self) -> str:
return self.components.unsplit()
diff --git a/tests/models/test_url.py b/tests/models/test_url.py
index 5f5208ca92..70089e0f1f 100644
--- a/tests/models/test_url.py
+++ b/tests/models/test_url.py
@@ -25,6 +25,12 @@ def test_url():
assert new.scheme == "http"
+def test_url_eq_str():
+ url = URL("https://example.org:123/path/to/somewhere?abc=123#anchor")
+ assert url == "https://example.org:123/path/to/somewhere?abc=123#anchor"
+ assert str(url) == url
+
+
def test_url__params():
url = URL("https://example.org:123/path/to/somewhere", params={"a": "123"})
assert str(url) == "https://example.org:123/path/to/somewhere?a=123"
|
LMFDB__lmfdb-2975 | Could not download the elliptic curves of norm conductor 1
From the feedback page:
I could not download the elliptic curves of norm conductor 1 into a magma file.
Please respond to [email protected]
| [
{
"content": "# -*- coding: utf-8 -*-\n# This Blueprint is about Elliptic Curves over Number Fields\n# Authors: Harald Schilly and John Cremona\n\nimport ast, re, StringIO, time\nfrom operator import mul\nfrom urllib import quote, unquote\n\nfrom flask import render_template, request, url_for, redirect, flash, send_file, make_response\nfrom markupsafe import Markup\n\nfrom lmfdb import db\nfrom lmfdb.backend.encoding import Json\nfrom lmfdb.app import app\nfrom lmfdb.utils import (\n to_dict,\n parse_ints, parse_noop, nf_string_to_label, parse_nf_string, parse_nf_elt, parse_bracketed_posints,\n search_wrap)\nfrom lmfdb.number_fields.number_field import field_pretty\nfrom lmfdb.number_fields.web_number_field import nf_display_knowl, WebNumberField\nfrom lmfdb.ecnf import ecnf_page\nfrom lmfdb.ecnf.ecnf_stats import ECNF_stats\nfrom lmfdb.ecnf.WebEllipticCurve import ECNF, web_ainvs, convert_IQF_label\nfrom lmfdb.ecnf.isog_class import ECNF_isoclass\n\ndef split_full_label(lab):\n r\"\"\" Split a full curve label into 4 components\n (field_label,conductor_label,isoclass_label,curve_number)\n \"\"\"\n data = lab.split(\"-\")\n if len(data) != 3:\n flash(Markup(\"Error: <span style='color:black'>%s</span> is not a valid elliptic curve label. It must be of the form (number field label) - (conductor label) - (isogeny class label) - (curve identifier) separated by dashes, such as 2.2.5.1-31.1-a1\" % lab), \"error\")\n raise ValueError\n field_label = data[0]\n conductor_label = data[1]\n try:\n # field 3.1.23.1 uses upper case letters\n isoclass_label = re.search(\"(CM)?[a-zA-Z]+\", data[2]).group()\n curve_number = re.search(\"\\d+\", data[2]).group() # (a string)\n except AttributeError:\n flash(Markup(\"Error: <span style='color:black'>%s</span> is not a valid elliptic curve label. The last part must contain both an isogeny class label (a sequence of letters), followed by a curve id (an integer), such as a1\" % lab), \"error\")\n raise ValueError\n return (field_label, conductor_label, isoclass_label, curve_number)\n\n\ndef split_short_label(lab):\n r\"\"\" Split a short curve label into 3 components\n (conductor_label,isoclass_label,curve_number)\n \"\"\"\n data = lab.split(\"-\")\n if len(data) != 2:\n flash(Markup(\"Error: <span style='color:black'>%s</span> is not a valid elliptic curve label. It must be of the form (conductor label) - (isogeny class label) - (curve identifier) separated by dashes, such as 31.1-a1\" % lab), \"error\")\n raise ValueError\n conductor_label = data[0]\n try:\n # field 3.1.23.1 uses upper case letters\n isoclass_label = re.search(\"[a-zA-Z]+\", data[1]).group()\n curve_number = re.search(\"\\d+\", data[1]).group() # (a string)\n except AttributeError:\n flash(Markup(\"Error: <span style='color:black'>%s</span> is not a valid elliptic curve label. The last part must contain both an isogeny class label (a sequence of letters), followed by a curve id (an integer), such as a1\" % lab), \"error\")\n raise ValueError\n return (conductor_label, isoclass_label, curve_number)\n\n\ndef split_class_label(lab):\n r\"\"\" Split a class label into 3 components\n (field_label, conductor_label,isoclass_label)\n \"\"\"\n data = lab.split(\"-\")\n if len(data) != 3:\n flash(Markup(\"Error: <span style='color:black'>%s</span> is not a valid isogeny class label. It must be of the form (number field label) - (conductor label) - (isogeny class label) (separated by dashes), such as 2.2.5.1-31.1-a\" % lab), \"error\")\n raise ValueError\n field_label = data[0]\n conductor_label = data[1]\n isoclass_label = data[2]\n return (field_label, conductor_label, isoclass_label)\n\n\ndef split_short_class_label(lab):\n r\"\"\" Split a short class label into 2 components\n (conductor_label,isoclass_label)\n \"\"\"\n data = lab.split(\"-\")\n if len(data) != 2:\n flash(Markup(\"Error: <span style='color:black'>%s</span> is not a valid isogeny class label. It must be of the form (conductor label) - (isogeny class label) (separated by dashes), such as 31.1-a\" % lab), \"error\")\n raise ValueError\n conductor_label = data[0]\n isoclass_label = data[1]\n return (conductor_label, isoclass_label)\n\ndef conductor_label_norm(lab):\n r\"\"\" extract norm from conductor label (as a string)\"\"\"\n s = lab.replace(' ','')\n if re.match(r'\\d+.\\d+',s):\n return s.split('.')[0]\n else:\n flash(Markup(\"Error: <span style='color:black'>%s</span> is not a valid conductor label. It must be of the form N.m or [N,c,d]\" % lab), \"error\")\n raise ValueError\n\ndef get_nf_info(lab):\n r\"\"\" extract number field label from string and pretty\"\"\"\n try:\n label = nf_string_to_label(lab)\n pretty = field_pretty (label)\n except ValueError as err:\n flash(Markup(\"Error: <span style='color:black'>%s</span> is not a valid number field. %s\" % (lab,err)), \"error\")\n raise ValueError\n return label, pretty\n\n\necnf_credit = \"John Cremona, Alyson Deines, Steve Donelly, Paul Gunnells, Warren Moore, Haluk Sengun, Andrew V Sutherland, John Voight, Dan Yasaki\"\n\n\ndef get_bread(*breads):\n bc = [(\"Elliptic Curves\", url_for(\".index\"))]\n map(bc.append, breads)\n return bc\n\ndef learnmore_list():\n return [('Completeness of the data', url_for(\".completeness_page\")),\n ('Source of the data', url_for(\".how_computed_page\")),\n ('Reliability of the data', url_for(\".reliability_page\")),\n ('Elliptic Curve labels', url_for(\".labels_page\"))]\n\n# Return the learnmore list with the matchstring entry removed\ndef learnmore_list_remove(matchstring):\n return filter(lambda t:t[0].find(matchstring) <0, learnmore_list())\n\n@ecnf_page.route(\"/Completeness\")\ndef completeness_page():\n t = 'Completeness of the Elliptic Curve Data over Number Fields'\n bread = [('Elliptic Curves', url_for(\"ecnf.index\")),\n ('Completeness', '')]\n credit = 'John Cremona'\n return render_template(\"single.html\", kid='dq.ecnf.extent',\n credit=credit, title=t, bread=bread, learnmore=learnmore_list_remove('Completeness'))\n\n\n@ecnf_page.route(\"/Source\")\ndef how_computed_page():\n t = 'Source of the Elliptic Curve Data over Number Fields'\n bread = [('Elliptic Curves', url_for(\"ecnf.index\")),\n ('Source', '')]\n credit = 'John Cremona'\n return render_template(\"single.html\", kid='dq.ecnf.source',\n credit=credit, title=t, bread=bread, learnmore=learnmore_list_remove('Source'))\n\n@ecnf_page.route(\"/Reliability\")\ndef reliability_page():\n t = 'Reliability of the Elliptic Curve Data over Number Fields'\n bread = [('Elliptic Curves', url_for(\"ecnf.index\")),\n ('Source', '')]\n credit = 'John Cremona'\n return render_template(\"single.html\", kid='dq.ecnf.reliability',\n credit=credit, title=t, bread=bread, learnmore=learnmore_list_remove('Reliability'))\n\n@ecnf_page.route(\"/Labels\")\ndef labels_page():\n t = 'Labels for Elliptic Curves over Number Fields'\n bread = [('Elliptic Curves', url_for(\"ecnf.index\")),\n ('Labels', '')]\n credit = 'John Cremona'\n return render_template(\"single.html\", kid='ec.curve_label',\n credit=credit, title=t, bread=bread, learnmore=learnmore_list_remove('labels'))\n\n\n@ecnf_page.route(\"/\")\ndef index():\n # if 'jump' in request.args:\n # return show_ecnf1(request.args['label'])\n if len(request.args) > 0:\n return elliptic_curve_search(request.args)\n bread = get_bread()\n\n # the dict data will hold additional information to be displayed on\n # the main browse and search page\n\n data = {}\n\n # data['fields'] holds data for a sample of number fields of different\n # signatures for a general browse:\n\n fields_by_deg = ECNF_stats().fields_by_deg\n fields_by_sig = ECNF_stats().fields_by_sig\n data['fields'] = []\n # Rationals\n data['fields'].append(['the rational field', (('1.1.1.1', [url_for('ec.rational_elliptic_curves'), '$\\Q$']),)])\n\n # Real quadratics (sample)\n rqfs = ['2.2.{}.1'.format(d) for d in [5, 89, 229, 497]]\n niqfs = len(fields_by_sig[0,1])\n nrqfs = len(fields_by_sig[2,0])\n data['fields'].append(['{} real quadratic fields, including'.format(nrqfs),\n ((nf, [url_for('.show_ecnf1', nf=nf), field_pretty(nf)])\n for nf in rqfs)])\n\n # Imaginary quadratics (sample)\n iqfs = ['2.0.{}.1'.format(d) for d in [4, 8, 3, 7, 11]]\n data['fields'].append(['{} imaginary quadratic fields, including'.format(niqfs),\n ((nf, [url_for('.show_ecnf1', nf=nf), field_pretty(nf)])\n for nf in iqfs)])\n\n # Cubics (sample)\n cubics = ['3.1.23.1'] + ['3.3.{}.1'.format(d) for d in [49,148,1957]]\n ncubics = len(fields_by_deg[3])\n data['fields'].append(['{} cubic fields, including'.format(ncubics),\n ((nf, [url_for('.show_ecnf1', nf=nf), field_pretty(nf)])\n for nf in cubics)])\n\n # Quartics (sample)\n quartics = ['4.4.{}.1'.format(d) for d in [725,2777,9909,19821]]\n nquartics = len(fields_by_deg[4])\n data['fields'].append(['{} totally real quartic fields, including'.format(nquartics),\n ((nf, [url_for('.show_ecnf1', nf=nf), field_pretty(nf)])\n for nf in quartics)])\n\n # Quintics (sample)\n quintics = ['5.5.{}.1'.format(d) for d in [14641, 24217, 36497, 38569, 65657]]\n nquintics = len(fields_by_deg[5])\n data['fields'].append(['{} totally real quintic fields, including'.format(nquintics),\n ((nf, [url_for('.show_ecnf1', nf=nf), field_pretty(nf)])\n for nf in quintics)])\n\n # Sextics (sample)\n sextics = ['6.6.{}.1'.format(d) for d in [300125, 371293, 434581, 453789, 485125]]\n nsextics = len(fields_by_deg[6])\n data['fields'].append(['{} totally real sextic fields, including'.format(nsextics),\n ((nf, [url_for('.show_ecnf1', nf=nf), field_pretty(nf)])\n for nf in sextics)])\n\n data['degrees'] = sorted([int(d) for d in fields_by_deg.keys() if d!='_id'])\n\n# data['highlights'] holds data (URL and descriptive text) for a\n# sample of elliptic curves with interesting features:\n\n data['highlights'] = []\n data['highlights'].append(\n ['A curve with $C_3\\\\times C_3$ torsion',\n url_for('.show_ecnf', nf='2.0.3.1', class_label='a', conductor_label='2268.36.18', number=int(1))]\n )\n data['highlights'].append(\n ['A curve with $C_4\\\\times C_4$ torsion',\n url_for('.show_ecnf', nf='2.0.4.1', class_label='b', conductor_label='5525.870.5', number=int(9))]\n )\n data['highlights'].append(\n ['A curve with CM by $\\\\sqrt{-267}$',\n url_for('.show_ecnf', nf='2.2.89.1', class_label='a', conductor_label='81.1', number=int(1))]\n )\n data['highlights'].append(\n ['An isogeny class with isogenies of degree $3$ and $89$ (and $267$)',\n url_for('.show_ecnf_isoclass', nf='2.2.89.1', class_label='a', conductor_label='81.1')]\n )\n data['highlights'].append(\n ['A curve with everywhere good reduction, but no global minimal model',\n url_for('.show_ecnf', nf='2.2.229.1', class_label='a', conductor_label='1.1', number=int(1))]\n )\n\n return render_template(\"ecnf-index.html\",\n title=\"Elliptic Curves over Number Fields\",\n data=data,\n bread=bread, learnmore=learnmore_list())\n\n@ecnf_page.route(\"/random\")\ndef random_curve():\n E = db.ec_nfcurves.random(projection=['field_label', 'conductor_label', 'iso_label', 'number'])\n return redirect(url_for(\".show_ecnf\", nf=E['field_label'], conductor_label=E['conductor_label'], class_label=E['iso_label'], number=E['number']), 307)\n\n@ecnf_page.route(\"/<nf>/\")\ndef show_ecnf1(nf):\n try:\n nf_label, nf_pretty = get_nf_info(nf)\n except ValueError:\n return search_input_error()\n if nf_label == '1.1.1.1':\n return redirect(url_for(\"ec.rational_elliptic_curves\", **request.args), 301)\n info = to_dict(request.args)\n info['title'] = 'Elliptic Curves over %s' % nf_pretty\n info['bread'] = [('Elliptic Curves', url_for(\".index\")), (nf_pretty, url_for(\".show_ecnf1\", nf=nf))]\n if len(request.args) > 0:\n # if requested field differs from nf, redirect to general search\n if 'field' in request.args and request.args['field'] != nf_label:\n return redirect (url_for(\".index\", **request.args), 307)\n info['title'] += ' Search Results'\n info['bread'].append(('Search Results',''))\n info['field'] = nf_label\n return elliptic_curve_search(info)\n\n@ecnf_page.route(\"/<nf>/<conductor_label>/\")\ndef show_ecnf_conductor(nf, conductor_label):\n conductor_label = unquote(conductor_label)\n conductor_label = convert_IQF_label(nf,conductor_label)\n try:\n nf_label, nf_pretty = get_nf_info(nf)\n conductor_norm = conductor_label_norm(conductor_label)\n except ValueError:\n return search_input_error()\n info = to_dict(request.args)\n info['title'] = 'Elliptic Curves over %s of Conductor %s' % (nf_pretty, conductor_label)\n info['bread'] = [('Elliptic Curves', url_for(\".index\")), (nf_pretty, url_for(\".show_ecnf1\", nf=nf)), (conductor_label, url_for(\".show_ecnf_conductor\",nf=nf,conductor_label=conductor_label))]\n if len(request.args) > 0:\n # if requested field or conductor norm differs from nf or conductor_lable, redirect to general search\n if ('field' in request.args and request.args['field'] != nf_label) or \\\n ('conductor_norm' in request.args and request.args['conductor_norm'] != conductor_norm):\n return redirect (url_for(\".index\", **request.args), 307)\n info['title'] += ' Search Results'\n info['bread'].append(('Search Results',''))\n info['field'] = nf_label\n info['conductor_label'] = conductor_label\n info['conductor_norm'] = conductor_norm\n return elliptic_curve_search(info)\n\n@ecnf_page.route(\"/<nf>/<conductor_label>/<class_label>/\")\ndef show_ecnf_isoclass(nf, conductor_label, class_label):\n conductor_label = unquote(conductor_label)\n conductor_label = convert_IQF_label(nf,conductor_label)\n try:\n nf_label, nf_pretty = get_nf_info(nf)\n except ValueError:\n return search_input_error()\n label = \"-\".join([nf_label, conductor_label, class_label])\n full_class_label = \"-\".join([conductor_label, class_label])\n cl = ECNF_isoclass.by_label(label)\n bread = [(\"Elliptic Curves\", url_for(\".index\"))]\n if not isinstance(cl, ECNF_isoclass):\n info = {'query':{}, 'err':'No elliptic curve isogeny class in the database has label %s.' % label}\n return search_input_error(info, bread)\n title = \"Elliptic Curve Isogeny Class %s over Number Field %s\" % (full_class_label, cl.field_name)\n bread.append((nf_pretty, url_for(\".show_ecnf1\", nf=nf)))\n bread.append((conductor_label, url_for(\".show_ecnf_conductor\", nf=nf_label, conductor_label=conductor_label)))\n bread.append((class_label, url_for(\".show_ecnf_isoclass\", nf=nf_label, conductor_label=quote(conductor_label), class_label=class_label)))\n return render_template(\"ecnf-isoclass.html\",\n credit=ecnf_credit,\n title=title,\n bread=bread,\n cl=cl,\n properties2=cl.properties,\n friends=cl.friends,\n learnmore=learnmore_list())\n\n\n@ecnf_page.route(\"/<nf>/<conductor_label>/<class_label>/<number>\")\ndef show_ecnf(nf, conductor_label, class_label, number):\n conductor_label = unquote(conductor_label)\n conductor_label = convert_IQF_label(nf,conductor_label)\n try:\n nf_label = nf_string_to_label(nf)\n except ValueError:\n return search_input_error()\n label = \"\".join([\"-\".join([nf_label, conductor_label, class_label]), number])\n ec = ECNF.by_label(label)\n bread = [(\"Elliptic Curves\", url_for(\".index\"))]\n if not ec:\n info = {'query':{}}\n info['err'] = 'No elliptic curve in the database has label %s.' % label\n return search_input_error(info, bread)\n\n title = \"Elliptic Curve %s over Number Field %s\" % (ec.short_label, ec.field.field_pretty())\n bread = [(\"Elliptic Curves\", url_for(\".index\"))]\n bread.append((ec.field.field_pretty(), ec.urls['field']))\n bread.append((ec.conductor_label, ec.urls['conductor']))\n bread.append((ec.iso_label, ec.urls['class']))\n bread.append((ec.number, ec.urls['curve']))\n code = ec.code()\n code['show'] = {'magma':'','pari':'','sage':''} # use default show names\n info = {}\n return render_template(\"ecnf-curve.html\",\n credit=ecnf_credit,\n title=title,\n bread=bread,\n ec=ec,\n code = code,\n # properties = ec.properties,\n properties2=ec.properties,\n friends=ec.friends,\n downloads=ec.downloads,\n info=info,\n KNOWL_ID=\"ec.%s\"%label,\n learnmore=learnmore_list())\n\ndef download_search(info):\n dltype = info['submit']\n delim = 'bracket'\n com = r'\\\\' # single line comment start\n com1 = '' # multiline comment start\n com2 = '' # multiline comment end\n filename = 'elliptic_curves.gp'\n mydate = time.strftime(\"%d %B %Y\")\n if dltype == 'sage':\n com = '#'\n filename = 'elliptic_curves.sage'\n if dltype == 'magma':\n com = ''\n com1 = '/*'\n com2 = '*/'\n delim = 'magma'\n filename = 'elliptic_curves.m'\n s = com1 + \"\\n\"\n s += com + ' Elliptic curves downloaded from the LMFDB downloaded on %s.\\n'%(mydate)\n s += com + ' Below is a list called data. Each entry has the form:\\n'\n s += com + ' [[field_poly],[Weierstrass Coefficients, constant first in increasing degree]]\\n'\n s += '\\n' + com2\n s += '\\n'\n\n if dltype == 'magma':\n s += 'P<x> := PolynomialRing(Rationals()); \\n'\n s += 'data := ['\n elif dltype == 'sage':\n s += 'R.<x> = QQ[]; \\n'\n s += 'data = [ '\n else:\n s += 'data = [ '\n s += '\\\\\\n'\n nf_dict = {}\n for f in db.ec_nfcurves.search(ast.literal_eval(info[\"query\"]), ['field_label', 'ainvs']):\n nf = str(f['field_label'])\n # look up number field and see if we already have the min poly\n if nf in nf_dict:\n poly = nf_dict[nf]\n else:\n poly = str(WebNumberField(f['field_label']).poly())\n nf_dict[nf] = poly\n entry = str(f['ainvs'])\n entry = entry.replace('u','')\n entry = entry.replace('\\'','')\n entry = entry.replace(';','],[')\n s += '[[' + poly + '], [[' + entry + ']]],\\\\\\n'\n s = s[:-3]\n s += ']\\n'\n\n if delim == 'brace':\n s = s.replace('[', '{')\n s = s.replace(']', '}')\n if delim == 'magma':\n s = s.replace('[', '[*')\n s = s.replace(']', '*]')\n s += ';'\n strIO = StringIO.StringIO()\n strIO.write(s)\n strIO.seek(0)\n return send_file(strIO,\n attachment_filename=filename,\n as_attachment=True,\n add_etags=False)\n\ndef elliptic_curve_jump(info):\n label = info.get('label', '').replace(\" \", \"\")\n # This label should be a full isogeny class label or a full\n # curve label (including the field_label component)\n try:\n nf, cond_label, iso_label, number = split_full_label(label.strip())\n except ValueError:\n info['err'] = ''\n bread = [('Elliptic Curves', url_for(\".index\")), ('Search Results', '.')]\n return search_input_error(info, bread)\n\n return redirect(url_for(\".show_ecnf\", nf=nf, conductor_label=cond_label, class_label=iso_label, number=number), 301)\n\n@search_wrap(template=\"ecnf-search-results.html\",\n table=db.ec_nfcurves,\n title='Elliptic Curve Search Results',\n err_title='Elliptic Curve Search Input Error',\n shortcuts={'jump':elliptic_curve_jump,\n 'download':download_search},\n cleaners={'numb':lambda e: str(e['number']),\n 'field_knowl':lambda e: nf_display_knowl(e['field_label'], field_pretty(e['field_label']))},\n bread=lambda:[('Elliptic Curves', url_for(\".index\")), ('Search Results', '.')],\n credit=lambda:ecnf_credit)\ndef elliptic_curve_search(info, query):\n parse_nf_string(info,query,'field',name=\"base number field\",qfield='field_label')\n if query.get('field_label') == '1.1.1.1':\n return redirect(url_for(\"ec.rational_elliptic_curves\", **request.args), 301)\n\n parse_ints(info,query,'conductor_norm')\n parse_noop(info,query,'conductor_label')\n parse_ints(info,query,'torsion',name='Torsion order',qfield='torsion_order')\n parse_bracketed_posints(info,query,'torsion_structure',maxlength=2)\n if 'torsion_structure' in query and not 'torsion_order' in query:\n query['torsion_order'] = reduce(mul,[int(n) for n in query['torsion_structure']],1)\n parse_ints(info,query,field='isodeg',qfield='isogeny_degrees')\n\n if 'jinv' in info:\n if info.get('field','').strip() == '2.2.5.1':\n info['jinv'] = info['jinv'].replace('phi','a')\n if info.get('field','').strip() == '2.0.4.1':\n info['jinv'] = info['jinv'].replace('i','a')\n parse_nf_elt(info,query,'jinv',name='j-invariant')\n if query.get('jinv'):\n query['jinv'] =','.join(query['jinv'])\n\n if 'include_isogenous' in info and info['include_isogenous'] == 'off':\n info['number'] = 1\n query['number'] = 1\n\n if 'include_base_change' in info:\n if info['include_base_change'] == 'off':\n query['base_change'] = []\n if info['include_base_change'] == 'only':\n query['base_change'] = {'$ne':[]}\n else:\n info['include_base_change'] = \"on\"\n\n if 'include_Q_curves' in info:\n if info['include_Q_curves'] == 'exclude':\n query['q_curve'] = False\n elif info['include_Q_curves'] == 'only':\n query['q_curve'] = True\n\n if 'include_cm' in info:\n if info['include_cm'] == 'exclude':\n query['cm'] = 0\n elif info['include_cm'] == 'only':\n query['cm'] = {'$ne' : 0}\n\n info['field_pretty'] = field_pretty\n info['web_ainvs'] = web_ainvs\n\ndef search_input_error(info=None, bread=None):\n if info is None: info = {'err':'','query':{}}\n if bread is None: bread = [('Elliptic Curves', url_for(\".index\")), ('Search Results', '.')]\n return render_template(\"ecnf-search-results.html\", info=info, title='Elliptic Curve Search Input Error', bread=bread)\n\n\n@ecnf_page.route(\"/browse/\")\ndef browse():\n data = ECNF_stats().sigs_by_deg\n # We could use the dict directly but then could not control the order\n # of the keys (degrees), so we use a list\n info = [[d,['%s,%s'%sig for sig in data[d]]] for d in sorted(data.keys())]\n credit = 'John Cremona'\n t = 'Elliptic Curves over Number Fields'\n bread = [('Elliptic Curves', url_for(\"ecnf.index\")),\n ('Browse', ' ')]\n return render_template(\"ecnf-stats.html\", info=info, credit=credit, title=t, bread=bread, learnmore=learnmore_list())\n\n@ecnf_page.route(\"/browse/<int:d>/\")\ndef statistics_by_degree(d):\n if d==1:\n return redirect(url_for(\"ec.statistics\"))\n info = {}\n\n sigs_by_deg = ECNF_stats().sigs_by_deg\n if d not in sigs_by_deg:\n info['error'] = \"The database does not contain any elliptic curves defined over fields of degree %s\" % d\n else:\n info['degree'] = d\n\n fields_by_sig = ECNF_stats().fields_by_sig\n counts_by_sig = ECNF_stats().sig_normstats\n counts_by_field = ECNF_stats().field_normstats\n\n def field_counts(f):\n return [f,counts_by_field[f]]\n\n def sig_counts(sig):\n return ['%s,%s'%sig, counts_by_sig[sig], [field_counts(f) for f in fields_by_sig[sig]]]\n\n info['summary'] = ECNF_stats().degree_summary(d)\n info['sig_stats'] = [sig_counts(sig) for sig in sigs_by_deg[d]]\n credit = 'John Cremona'\n if d==2:\n t = 'Elliptic Curves over Quadratic Number Fields'\n elif d==3:\n t = 'Elliptic Curves over Cubic Number Fields'\n elif d==4:\n t = 'Elliptic Curves over Quartic Number Fields'\n elif d==5:\n t = 'Elliptic Curves over Quintic Number Fields'\n elif d==6:\n t = 'Elliptic Curves over Sextic Number Fields'\n else:\n t = 'Elliptic Curves over Number Fields of Degree {}'.format(d)\n\n bread = [('Elliptic Curves', url_for(\"ecnf.index\")),\n ('Degree %s' % d,' ')]\n return render_template(\"ecnf-by-degree.html\", info=info, credit=credit, title=t, bread=bread, learnmore=learnmore_list())\n\n@ecnf_page.route(\"/browse/<int:d>/<int:r>/\")\ndef statistics_by_signature(d,r):\n if d==1:\n return redirect(url_for(\"ec.statistics\"))\n\n info = {}\n\n sigs_by_deg = ECNF_stats().sigs_by_deg\n if d not in sigs_by_deg:\n info['error'] = \"The database does not contain any elliptic curves defined over fields of degree %s\" % d\n else:\n info['degree'] = d\n\n if not r in range(d%2,d+1,2):\n info['error'] = \"Invalid signature %s\" % info['sig']\n s = (d-r)//2\n sig = (r,s)\n info['sig'] = '%s,%s' % sig\n info['summary'] = ECNF_stats().signature_summary(sig)\n\n fields_by_sig = ECNF_stats().fields_by_sig\n counts_by_field = ECNF_stats().field_normstats\n\n def field_counts(f):\n return [f,counts_by_field[f]]\n\n info['sig_stats'] = [field_counts(f) for f in fields_by_sig[sig]]\n credit = 'John Cremona'\n if info['sig'] == '2,0':\n t = 'Elliptic Curves over Real Quadratic Number Fields'\n elif info['sig'] == '0,1':\n t = 'Elliptic Curves over Imaginary Quadratic Number Fields'\n elif info['sig'] == '3,0':\n t = 'Elliptic Curves over Totally Real Cubic Number fields'\n elif info['sig'] == '1,1':\n t = 'Elliptic Curves over Mixed Cubic Number Fields'\n elif info['sig'] == '4,0':\n t = 'Elliptic Curves over Totally Real Quartic Number Fields'\n elif info['sig'] == '5,0':\n t = 'Elliptic Curves over Totally Real Quintic Number Fields'\n elif info['sig'] == '6,0':\n t = 'Elliptic Curves over Totally Real Sextic Number Fields'\n else:\n t = 'Elliptic Curves over Number Fields of Degree %s, Signature (%s)' % (d,info['sig'])\n bread = [('Elliptic Curves', url_for(\"ecnf.index\")),\n ('Degree %s' % d,url_for(\"ecnf.statistics_by_degree\", d=d)),\n ('Signature (%s)' % info['sig'],' ')]\n return render_template(\"ecnf-by-signature.html\", info=info, credit=credit, title=t, bread=bread, learnmore=learnmore_list())\n\ndef tor_struct_search_nf(prefill=\"any\"):\n def fix(t):\n return t + ' selected = \"yes\"' if prefill==t else t\n def cyc(n):\n return [fix(\"[\"+str(n)+\"]\"), \"C{}\".format(n)]\n def cyc2(m,n):\n return [fix(\"[{},{}]\".format(m,n)), \"C{}×C{}\".format(m,n)]\n gps = [[fix(\"\"), \"any\"], [fix(\"[]\"), \"trivial\"]]\n\n tors = ECNF_stats().torsion_counts\n\n # The following was the set as of 24/4/2017:\n # assert tors == [[2], [2, 2], [2, 4], [2, 6], [2, 8], [2, 10], [2, 12], [2, 14], [2, 16], [2, 18], [3], [3, 3], [3, 6], [4], [4, 4], [5], [6], [7], [8], [9], [10], [11], [12], [13], [14], [15], [16], [17], [18], [19], [20], [21], [22], [25], [27], [37]]\n\n for t in tors:\n if len(t)==1:\n gps.append(cyc(t[0]))\n elif len(t)==2:\n gps.append(cyc2(*t))\n\n return \"\\n\".join([\"<select name='torsion_structure'>\"] + [\"<option value={}>{}</option>\".format(a,b) for a,b in gps] + [\"</select>\"])\n\n# the following allows the preceding function to be used in any template via {{...}}\napp.jinja_env.globals.update(tor_struct_search_nf=tor_struct_search_nf)\n\n@ecnf_page.route(\"/download_all/<nf>/<conductor_label>/<class_label>/<number>\")\ndef download_ECNF_all(nf,conductor_label,class_label,number):\n conductor_label = unquote(conductor_label)\n conductor_label = convert_IQF_label(nf,conductor_label)\n try:\n nf_label = nf_string_to_label(nf)\n except ValueError:\n return search_input_error()\n label = \"\".join([\"-\".join([nf_label, conductor_label, class_label]), number])\n data = db.ec_nfcurves.lookup(label)\n if data is None:\n return search_input_error()\n\n response = make_response(Json.dumps(data))\n response.headers['Content-type'] = 'text/plain'\n return response\n\n@ecnf_page.route('/<nf>/<conductor_label>/<class_label>/<number>/download/<download_type>')\ndef ecnf_code_download(**args):\n response = make_response(ecnf_code(**args))\n response.headers['Content-type'] = 'text/plain'\n return response\n\nsorted_code_names = ['field', 'curve', 'is_min', 'cond', 'cond_norm',\n 'disc', 'disc_norm', 'jinv', 'cm', 'rank', 'ntors',\n 'gens', 'reg', 'tors', 'torgens', 'localdata']\n\ncode_names = {'field': 'Define the base number field',\n 'curve': 'Define the curve',\n 'is_min': 'Test whether it is a global minimal model',\n 'cond': 'Compute the conductor',\n 'cond_norm': 'Compute the norm of the conductor',\n 'disc': 'Compute the discriminant',\n 'disc_norm': 'Compute the norm of the discriminant',\n 'jinv': 'Compute the j-invariant',\n 'cm': 'Test for Complex Multiplication',\n 'rank': 'Compute the Mordell-Weil rank',\n 'ntors': 'Compute the order of the torsion subgroup',\n 'gens': 'Compute the generators (of infinite order)',\n 'reg': 'Compute the regulator',\n 'tors': 'Compute the torsion subgroup',\n 'torgens': 'Compute the generators of the torsion subgroup',\n 'localdata': 'Compute the local reduction data at primes of bad reduction'\n}\n\nFullname = {'magma': 'Magma', 'sage': 'SageMath', 'gp': 'Pari/GP'}\nComment = {'magma': '//', 'sage': '#', 'gp': '\\\\\\\\', 'pari': '\\\\\\\\'}\n\ndef ecnf_code(**args):\n label = \"\".join([\"-\".join([args['nf'], args['conductor_label'], args['class_label']]), args['number']])\n E = ECNF.by_label(label)\n Ecode = E.code()\n lang = args['download_type']\n code = \"{} {} code for working with elliptic curve {}\\n\\n\".format(Comment[lang],Fullname[lang],label)\n code += \"{} (Note that not all these functions may be available, and some may take a long time to execute.)\\n\".format(Comment[lang])\n if lang=='gp':\n lang = 'pari'\n for k in sorted_code_names:\n if lang in Ecode[k]:\n code += \"\\n{} {}: \\n\".format(Comment[lang],code_names[k])\n code += Ecode[k][lang] + ('\\n' if not '\\n' in Ecode[k][lang] else '')\n return code\n\n",
"path": "lmfdb/ecnf/main.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n# This Blueprint is about Elliptic Curves over Number Fields\n# Authors: Harald Schilly and John Cremona\n\nimport ast, re, StringIO, time\nfrom operator import mul\nfrom urllib import quote, unquote\n\nfrom flask import render_template, request, url_for, redirect, flash, send_file, make_response\nfrom markupsafe import Markup\n\nfrom lmfdb import db\nfrom lmfdb.backend.encoding import Json\nfrom lmfdb.app import app\nfrom lmfdb.utils import (\n to_dict,\n parse_ints, parse_noop, nf_string_to_label, parse_nf_string, parse_nf_elt, parse_bracketed_posints,\n search_wrap)\nfrom lmfdb.number_fields.number_field import field_pretty\nfrom lmfdb.number_fields.web_number_field import nf_display_knowl, WebNumberField\nfrom lmfdb.ecnf import ecnf_page\nfrom lmfdb.ecnf.ecnf_stats import ECNF_stats\nfrom lmfdb.ecnf.WebEllipticCurve import ECNF, web_ainvs, convert_IQF_label\nfrom lmfdb.ecnf.isog_class import ECNF_isoclass\n\ndef split_full_label(lab):\n r\"\"\" Split a full curve label into 4 components\n (field_label,conductor_label,isoclass_label,curve_number)\n \"\"\"\n data = lab.split(\"-\")\n if len(data) != 3:\n flash(Markup(\"Error: <span style='color:black'>%s</span> is not a valid elliptic curve label. It must be of the form (number field label) - (conductor label) - (isogeny class label) - (curve identifier) separated by dashes, such as 2.2.5.1-31.1-a1\" % lab), \"error\")\n raise ValueError\n field_label = data[0]\n conductor_label = data[1]\n try:\n # field 3.1.23.1 uses upper case letters\n isoclass_label = re.search(\"(CM)?[a-zA-Z]+\", data[2]).group()\n curve_number = re.search(\"\\d+\", data[2]).group() # (a string)\n except AttributeError:\n flash(Markup(\"Error: <span style='color:black'>%s</span> is not a valid elliptic curve label. The last part must contain both an isogeny class label (a sequence of letters), followed by a curve id (an integer), such as a1\" % lab), \"error\")\n raise ValueError\n return (field_label, conductor_label, isoclass_label, curve_number)\n\n\ndef split_short_label(lab):\n r\"\"\" Split a short curve label into 3 components\n (conductor_label,isoclass_label,curve_number)\n \"\"\"\n data = lab.split(\"-\")\n if len(data) != 2:\n flash(Markup(\"Error: <span style='color:black'>%s</span> is not a valid elliptic curve label. It must be of the form (conductor label) - (isogeny class label) - (curve identifier) separated by dashes, such as 31.1-a1\" % lab), \"error\")\n raise ValueError\n conductor_label = data[0]\n try:\n # field 3.1.23.1 uses upper case letters\n isoclass_label = re.search(\"[a-zA-Z]+\", data[1]).group()\n curve_number = re.search(\"\\d+\", data[1]).group() # (a string)\n except AttributeError:\n flash(Markup(\"Error: <span style='color:black'>%s</span> is not a valid elliptic curve label. The last part must contain both an isogeny class label (a sequence of letters), followed by a curve id (an integer), such as a1\" % lab), \"error\")\n raise ValueError\n return (conductor_label, isoclass_label, curve_number)\n\n\ndef split_class_label(lab):\n r\"\"\" Split a class label into 3 components\n (field_label, conductor_label,isoclass_label)\n \"\"\"\n data = lab.split(\"-\")\n if len(data) != 3:\n flash(Markup(\"Error: <span style='color:black'>%s</span> is not a valid isogeny class label. It must be of the form (number field label) - (conductor label) - (isogeny class label) (separated by dashes), such as 2.2.5.1-31.1-a\" % lab), \"error\")\n raise ValueError\n field_label = data[0]\n conductor_label = data[1]\n isoclass_label = data[2]\n return (field_label, conductor_label, isoclass_label)\n\n\ndef split_short_class_label(lab):\n r\"\"\" Split a short class label into 2 components\n (conductor_label,isoclass_label)\n \"\"\"\n data = lab.split(\"-\")\n if len(data) != 2:\n flash(Markup(\"Error: <span style='color:black'>%s</span> is not a valid isogeny class label. It must be of the form (conductor label) - (isogeny class label) (separated by dashes), such as 31.1-a\" % lab), \"error\")\n raise ValueError\n conductor_label = data[0]\n isoclass_label = data[1]\n return (conductor_label, isoclass_label)\n\ndef conductor_label_norm(lab):\n r\"\"\" extract norm from conductor label (as a string)\"\"\"\n s = lab.replace(' ','')\n if re.match(r'\\d+.\\d+',s):\n return s.split('.')[0]\n else:\n flash(Markup(\"Error: <span style='color:black'>%s</span> is not a valid conductor label. It must be of the form N.m or [N,c,d]\" % lab), \"error\")\n raise ValueError\n\ndef get_nf_info(lab):\n r\"\"\" extract number field label from string and pretty\"\"\"\n try:\n label = nf_string_to_label(lab)\n pretty = field_pretty (label)\n except ValueError as err:\n flash(Markup(\"Error: <span style='color:black'>%s</span> is not a valid number field. %s\" % (lab,err)), \"error\")\n raise ValueError\n return label, pretty\n\n\necnf_credit = \"John Cremona, Alyson Deines, Steve Donelly, Paul Gunnells, Warren Moore, Haluk Sengun, Andrew V Sutherland, John Voight, Dan Yasaki\"\n\n\ndef get_bread(*breads):\n bc = [(\"Elliptic Curves\", url_for(\".index\"))]\n map(bc.append, breads)\n return bc\n\ndef learnmore_list():\n return [('Completeness of the data', url_for(\".completeness_page\")),\n ('Source of the data', url_for(\".how_computed_page\")),\n ('Reliability of the data', url_for(\".reliability_page\")),\n ('Elliptic Curve labels', url_for(\".labels_page\"))]\n\n# Return the learnmore list with the matchstring entry removed\ndef learnmore_list_remove(matchstring):\n return filter(lambda t:t[0].find(matchstring) <0, learnmore_list())\n\n@ecnf_page.route(\"/Completeness\")\ndef completeness_page():\n t = 'Completeness of the Elliptic Curve Data over Number Fields'\n bread = [('Elliptic Curves', url_for(\"ecnf.index\")),\n ('Completeness', '')]\n credit = 'John Cremona'\n return render_template(\"single.html\", kid='dq.ecnf.extent',\n credit=credit, title=t, bread=bread, learnmore=learnmore_list_remove('Completeness'))\n\n\n@ecnf_page.route(\"/Source\")\ndef how_computed_page():\n t = 'Source of the Elliptic Curve Data over Number Fields'\n bread = [('Elliptic Curves', url_for(\"ecnf.index\")),\n ('Source', '')]\n credit = 'John Cremona'\n return render_template(\"single.html\", kid='dq.ecnf.source',\n credit=credit, title=t, bread=bread, learnmore=learnmore_list_remove('Source'))\n\n@ecnf_page.route(\"/Reliability\")\ndef reliability_page():\n t = 'Reliability of the Elliptic Curve Data over Number Fields'\n bread = [('Elliptic Curves', url_for(\"ecnf.index\")),\n ('Source', '')]\n credit = 'John Cremona'\n return render_template(\"single.html\", kid='dq.ecnf.reliability',\n credit=credit, title=t, bread=bread, learnmore=learnmore_list_remove('Reliability'))\n\n@ecnf_page.route(\"/Labels\")\ndef labels_page():\n t = 'Labels for Elliptic Curves over Number Fields'\n bread = [('Elliptic Curves', url_for(\"ecnf.index\")),\n ('Labels', '')]\n credit = 'John Cremona'\n return render_template(\"single.html\", kid='ec.curve_label',\n credit=credit, title=t, bread=bread, learnmore=learnmore_list_remove('labels'))\n\n\n@ecnf_page.route(\"/\")\ndef index():\n # if 'jump' in request.args:\n # return show_ecnf1(request.args['label'])\n if len(request.args) > 0:\n return elliptic_curve_search(request.args)\n bread = get_bread()\n\n # the dict data will hold additional information to be displayed on\n # the main browse and search page\n\n data = {}\n\n # data['fields'] holds data for a sample of number fields of different\n # signatures for a general browse:\n\n fields_by_deg = ECNF_stats().fields_by_deg\n fields_by_sig = ECNF_stats().fields_by_sig\n data['fields'] = []\n # Rationals\n data['fields'].append(['the rational field', (('1.1.1.1', [url_for('ec.rational_elliptic_curves'), '$\\Q$']),)])\n\n # Real quadratics (sample)\n rqfs = ['2.2.{}.1'.format(d) for d in [5, 89, 229, 497]]\n niqfs = len(fields_by_sig[0,1])\n nrqfs = len(fields_by_sig[2,0])\n data['fields'].append(['{} real quadratic fields, including'.format(nrqfs),\n ((nf, [url_for('.show_ecnf1', nf=nf), field_pretty(nf)])\n for nf in rqfs)])\n\n # Imaginary quadratics (sample)\n iqfs = ['2.0.{}.1'.format(d) for d in [4, 8, 3, 7, 11]]\n data['fields'].append(['{} imaginary quadratic fields, including'.format(niqfs),\n ((nf, [url_for('.show_ecnf1', nf=nf), field_pretty(nf)])\n for nf in iqfs)])\n\n # Cubics (sample)\n cubics = ['3.1.23.1'] + ['3.3.{}.1'.format(d) for d in [49,148,1957]]\n ncubics = len(fields_by_deg[3])\n data['fields'].append(['{} cubic fields, including'.format(ncubics),\n ((nf, [url_for('.show_ecnf1', nf=nf), field_pretty(nf)])\n for nf in cubics)])\n\n # Quartics (sample)\n quartics = ['4.4.{}.1'.format(d) for d in [725,2777,9909,19821]]\n nquartics = len(fields_by_deg[4])\n data['fields'].append(['{} totally real quartic fields, including'.format(nquartics),\n ((nf, [url_for('.show_ecnf1', nf=nf), field_pretty(nf)])\n for nf in quartics)])\n\n # Quintics (sample)\n quintics = ['5.5.{}.1'.format(d) for d in [14641, 24217, 36497, 38569, 65657]]\n nquintics = len(fields_by_deg[5])\n data['fields'].append(['{} totally real quintic fields, including'.format(nquintics),\n ((nf, [url_for('.show_ecnf1', nf=nf), field_pretty(nf)])\n for nf in quintics)])\n\n # Sextics (sample)\n sextics = ['6.6.{}.1'.format(d) for d in [300125, 371293, 434581, 453789, 485125]]\n nsextics = len(fields_by_deg[6])\n data['fields'].append(['{} totally real sextic fields, including'.format(nsextics),\n ((nf, [url_for('.show_ecnf1', nf=nf), field_pretty(nf)])\n for nf in sextics)])\n\n data['degrees'] = sorted([int(d) for d in fields_by_deg.keys() if d!='_id'])\n\n# data['highlights'] holds data (URL and descriptive text) for a\n# sample of elliptic curves with interesting features:\n\n data['highlights'] = []\n data['highlights'].append(\n ['A curve with $C_3\\\\times C_3$ torsion',\n url_for('.show_ecnf', nf='2.0.3.1', class_label='a', conductor_label='2268.36.18', number=int(1))]\n )\n data['highlights'].append(\n ['A curve with $C_4\\\\times C_4$ torsion',\n url_for('.show_ecnf', nf='2.0.4.1', class_label='b', conductor_label='5525.870.5', number=int(9))]\n )\n data['highlights'].append(\n ['A curve with CM by $\\\\sqrt{-267}$',\n url_for('.show_ecnf', nf='2.2.89.1', class_label='a', conductor_label='81.1', number=int(1))]\n )\n data['highlights'].append(\n ['An isogeny class with isogenies of degree $3$ and $89$ (and $267$)',\n url_for('.show_ecnf_isoclass', nf='2.2.89.1', class_label='a', conductor_label='81.1')]\n )\n data['highlights'].append(\n ['A curve with everywhere good reduction, but no global minimal model',\n url_for('.show_ecnf', nf='2.2.229.1', class_label='a', conductor_label='1.1', number=int(1))]\n )\n\n return render_template(\"ecnf-index.html\",\n title=\"Elliptic Curves over Number Fields\",\n data=data,\n bread=bread, learnmore=learnmore_list())\n\n@ecnf_page.route(\"/random\")\ndef random_curve():\n E = db.ec_nfcurves.random(projection=['field_label', 'conductor_label', 'iso_label', 'number'])\n return redirect(url_for(\".show_ecnf\", nf=E['field_label'], conductor_label=E['conductor_label'], class_label=E['iso_label'], number=E['number']), 307)\n\n@ecnf_page.route(\"/<nf>/\")\ndef show_ecnf1(nf):\n try:\n nf_label, nf_pretty = get_nf_info(nf)\n except ValueError:\n return search_input_error()\n if nf_label == '1.1.1.1':\n return redirect(url_for(\"ec.rational_elliptic_curves\", **request.args), 301)\n info = to_dict(request.args)\n info['title'] = 'Elliptic Curves over %s' % nf_pretty\n info['bread'] = [('Elliptic Curves', url_for(\".index\")), (nf_pretty, url_for(\".show_ecnf1\", nf=nf))]\n if len(request.args) > 0:\n # if requested field differs from nf, redirect to general search\n if 'field' in request.args and request.args['field'] != nf_label:\n return redirect (url_for(\".index\", **request.args), 307)\n info['title'] += ' Search Results'\n info['bread'].append(('Search Results',''))\n info['field'] = nf_label\n return elliptic_curve_search(info)\n\n@ecnf_page.route(\"/<nf>/<conductor_label>/\")\ndef show_ecnf_conductor(nf, conductor_label):\n conductor_label = unquote(conductor_label)\n conductor_label = convert_IQF_label(nf,conductor_label)\n try:\n nf_label, nf_pretty = get_nf_info(nf)\n conductor_norm = conductor_label_norm(conductor_label)\n except ValueError:\n return search_input_error()\n info = to_dict(request.args)\n info['title'] = 'Elliptic Curves over %s of Conductor %s' % (nf_pretty, conductor_label)\n info['bread'] = [('Elliptic Curves', url_for(\".index\")), (nf_pretty, url_for(\".show_ecnf1\", nf=nf)), (conductor_label, url_for(\".show_ecnf_conductor\",nf=nf,conductor_label=conductor_label))]\n if len(request.args) > 0:\n # if requested field or conductor norm differs from nf or conductor_lable, redirect to general search\n if ('field' in request.args and request.args['field'] != nf_label) or \\\n ('conductor_norm' in request.args and request.args['conductor_norm'] != conductor_norm):\n return redirect (url_for(\".index\", **request.args), 307)\n info['title'] += ' Search Results'\n info['bread'].append(('Search Results',''))\n info['field'] = nf_label\n info['conductor_label'] = conductor_label\n info['conductor_norm'] = conductor_norm\n return elliptic_curve_search(info)\n\n@ecnf_page.route(\"/<nf>/<conductor_label>/<class_label>/\")\ndef show_ecnf_isoclass(nf, conductor_label, class_label):\n conductor_label = unquote(conductor_label)\n conductor_label = convert_IQF_label(nf,conductor_label)\n try:\n nf_label, nf_pretty = get_nf_info(nf)\n except ValueError:\n return search_input_error()\n label = \"-\".join([nf_label, conductor_label, class_label])\n full_class_label = \"-\".join([conductor_label, class_label])\n cl = ECNF_isoclass.by_label(label)\n bread = [(\"Elliptic Curves\", url_for(\".index\"))]\n if not isinstance(cl, ECNF_isoclass):\n info = {'query':{}, 'err':'No elliptic curve isogeny class in the database has label %s.' % label}\n return search_input_error(info, bread)\n title = \"Elliptic Curve Isogeny Class %s over Number Field %s\" % (full_class_label, cl.field_name)\n bread.append((nf_pretty, url_for(\".show_ecnf1\", nf=nf)))\n bread.append((conductor_label, url_for(\".show_ecnf_conductor\", nf=nf_label, conductor_label=conductor_label)))\n bread.append((class_label, url_for(\".show_ecnf_isoclass\", nf=nf_label, conductor_label=quote(conductor_label), class_label=class_label)))\n return render_template(\"ecnf-isoclass.html\",\n credit=ecnf_credit,\n title=title,\n bread=bread,\n cl=cl,\n properties2=cl.properties,\n friends=cl.friends,\n learnmore=learnmore_list())\n\n\n@ecnf_page.route(\"/<nf>/<conductor_label>/<class_label>/<number>\")\ndef show_ecnf(nf, conductor_label, class_label, number):\n conductor_label = unquote(conductor_label)\n conductor_label = convert_IQF_label(nf,conductor_label)\n try:\n nf_label = nf_string_to_label(nf)\n except ValueError:\n return search_input_error()\n label = \"\".join([\"-\".join([nf_label, conductor_label, class_label]), number])\n ec = ECNF.by_label(label)\n bread = [(\"Elliptic Curves\", url_for(\".index\"))]\n if not ec:\n info = {'query':{}}\n info['err'] = 'No elliptic curve in the database has label %s.' % label\n return search_input_error(info, bread)\n\n title = \"Elliptic Curve %s over Number Field %s\" % (ec.short_label, ec.field.field_pretty())\n bread = [(\"Elliptic Curves\", url_for(\".index\"))]\n bread.append((ec.field.field_pretty(), ec.urls['field']))\n bread.append((ec.conductor_label, ec.urls['conductor']))\n bread.append((ec.iso_label, ec.urls['class']))\n bread.append((ec.number, ec.urls['curve']))\n code = ec.code()\n code['show'] = {'magma':'','pari':'','sage':''} # use default show names\n info = {}\n return render_template(\"ecnf-curve.html\",\n credit=ecnf_credit,\n title=title,\n bread=bread,\n ec=ec,\n code = code,\n # properties = ec.properties,\n properties2=ec.properties,\n friends=ec.friends,\n downloads=ec.downloads,\n info=info,\n KNOWL_ID=\"ec.%s\"%label,\n learnmore=learnmore_list())\n\ndef download_search(info):\n dltype = info['Submit']\n delim = 'bracket'\n com = r'\\\\' # single line comment start\n com1 = '' # multiline comment start\n com2 = '' # multiline comment end\n filename = 'elliptic_curves.gp'\n mydate = time.strftime(\"%d %B %Y\")\n if dltype == 'sage':\n com = '#'\n filename = 'elliptic_curves.sage'\n if dltype == 'magma':\n com = ''\n com1 = '/*'\n com2 = '*/'\n delim = 'magma'\n filename = 'elliptic_curves.m'\n s = com1 + \"\\n\"\n s += com + ' Elliptic curves downloaded from the LMFDB downloaded on %s.\\n'%(mydate)\n s += com + ' Below is a list called data. Each entry has the form:\\n'\n s += com + ' [[field_poly],[Weierstrass Coefficients, constant first in increasing degree]]\\n'\n s += '\\n' + com2\n s += '\\n'\n\n if dltype == 'magma':\n s += 'P<x> := PolynomialRing(Rationals()); \\n'\n s += 'data := ['\n elif dltype == 'sage':\n s += 'R.<x> = QQ[]; \\n'\n s += 'data = [ '\n else:\n s += 'data = [ '\n s += '\\\\\\n'\n nf_dict = {}\n for f in db.ec_nfcurves.search(ast.literal_eval(info[\"query\"]), ['field_label', 'ainvs']):\n nf = str(f['field_label'])\n # look up number field and see if we already have the min poly\n if nf in nf_dict:\n poly = nf_dict[nf]\n else:\n poly = str(WebNumberField(f['field_label']).poly())\n nf_dict[nf] = poly\n entry = str(f['ainvs'])\n entry = entry.replace('u','')\n entry = entry.replace('\\'','')\n entry = entry.replace(';','],[')\n s += '[[' + poly + '], [[' + entry + ']]],\\\\\\n'\n s = s[:-3]\n s += ']\\n'\n\n if delim == 'brace':\n s = s.replace('[', '{')\n s = s.replace(']', '}')\n if delim == 'magma':\n s = s.replace('[', '[*')\n s = s.replace(']', '*]')\n s += ';'\n strIO = StringIO.StringIO()\n strIO.write(s)\n strIO.seek(0)\n return send_file(strIO,\n attachment_filename=filename,\n as_attachment=True,\n add_etags=False)\n\ndef elliptic_curve_jump(info):\n label = info.get('label', '').replace(\" \", \"\")\n # This label should be a full isogeny class label or a full\n # curve label (including the field_label component)\n try:\n nf, cond_label, iso_label, number = split_full_label(label.strip())\n except ValueError:\n info['err'] = ''\n bread = [('Elliptic Curves', url_for(\".index\")), ('Search Results', '.')]\n return search_input_error(info, bread)\n\n return redirect(url_for(\".show_ecnf\", nf=nf, conductor_label=cond_label, class_label=iso_label, number=number), 301)\n\n@search_wrap(template=\"ecnf-search-results.html\",\n table=db.ec_nfcurves,\n title='Elliptic Curve Search Results',\n err_title='Elliptic Curve Search Input Error',\n shortcuts={'jump':elliptic_curve_jump,\n 'download':download_search},\n cleaners={'numb':lambda e: str(e['number']),\n 'field_knowl':lambda e: nf_display_knowl(e['field_label'], field_pretty(e['field_label']))},\n bread=lambda:[('Elliptic Curves', url_for(\".index\")), ('Search Results', '.')],\n credit=lambda:ecnf_credit)\ndef elliptic_curve_search(info, query):\n parse_nf_string(info,query,'field',name=\"base number field\",qfield='field_label')\n if query.get('field_label') == '1.1.1.1':\n return redirect(url_for(\"ec.rational_elliptic_curves\", **request.args), 301)\n\n parse_ints(info,query,'conductor_norm')\n parse_noop(info,query,'conductor_label')\n parse_ints(info,query,'torsion',name='Torsion order',qfield='torsion_order')\n parse_bracketed_posints(info,query,'torsion_structure',maxlength=2)\n if 'torsion_structure' in query and not 'torsion_order' in query:\n query['torsion_order'] = reduce(mul,[int(n) for n in query['torsion_structure']],1)\n parse_ints(info,query,field='isodeg',qfield='isogeny_degrees')\n\n if 'jinv' in info:\n if info.get('field','').strip() == '2.2.5.1':\n info['jinv'] = info['jinv'].replace('phi','a')\n if info.get('field','').strip() == '2.0.4.1':\n info['jinv'] = info['jinv'].replace('i','a')\n parse_nf_elt(info,query,'jinv',name='j-invariant')\n if query.get('jinv'):\n query['jinv'] =','.join(query['jinv'])\n\n if 'include_isogenous' in info and info['include_isogenous'] == 'off':\n info['number'] = 1\n query['number'] = 1\n\n if 'include_base_change' in info:\n if info['include_base_change'] == 'off':\n query['base_change'] = []\n if info['include_base_change'] == 'only':\n query['base_change'] = {'$ne':[]}\n else:\n info['include_base_change'] = \"on\"\n\n if 'include_Q_curves' in info:\n if info['include_Q_curves'] == 'exclude':\n query['q_curve'] = False\n elif info['include_Q_curves'] == 'only':\n query['q_curve'] = True\n\n if 'include_cm' in info:\n if info['include_cm'] == 'exclude':\n query['cm'] = 0\n elif info['include_cm'] == 'only':\n query['cm'] = {'$ne' : 0}\n\n info['field_pretty'] = field_pretty\n info['web_ainvs'] = web_ainvs\n\ndef search_input_error(info=None, bread=None):\n if info is None: info = {'err':'','query':{}}\n if bread is None: bread = [('Elliptic Curves', url_for(\".index\")), ('Search Results', '.')]\n return render_template(\"ecnf-search-results.html\", info=info, title='Elliptic Curve Search Input Error', bread=bread)\n\n\n@ecnf_page.route(\"/browse/\")\ndef browse():\n data = ECNF_stats().sigs_by_deg\n # We could use the dict directly but then could not control the order\n # of the keys (degrees), so we use a list\n info = [[d,['%s,%s'%sig for sig in data[d]]] for d in sorted(data.keys())]\n credit = 'John Cremona'\n t = 'Elliptic Curves over Number Fields'\n bread = [('Elliptic Curves', url_for(\"ecnf.index\")),\n ('Browse', ' ')]\n return render_template(\"ecnf-stats.html\", info=info, credit=credit, title=t, bread=bread, learnmore=learnmore_list())\n\n@ecnf_page.route(\"/browse/<int:d>/\")\ndef statistics_by_degree(d):\n if d==1:\n return redirect(url_for(\"ec.statistics\"))\n info = {}\n\n sigs_by_deg = ECNF_stats().sigs_by_deg\n if d not in sigs_by_deg:\n info['error'] = \"The database does not contain any elliptic curves defined over fields of degree %s\" % d\n else:\n info['degree'] = d\n\n fields_by_sig = ECNF_stats().fields_by_sig\n counts_by_sig = ECNF_stats().sig_normstats\n counts_by_field = ECNF_stats().field_normstats\n\n def field_counts(f):\n return [f,counts_by_field[f]]\n\n def sig_counts(sig):\n return ['%s,%s'%sig, counts_by_sig[sig], [field_counts(f) for f in fields_by_sig[sig]]]\n\n info['summary'] = ECNF_stats().degree_summary(d)\n info['sig_stats'] = [sig_counts(sig) for sig in sigs_by_deg[d]]\n credit = 'John Cremona'\n if d==2:\n t = 'Elliptic Curves over Quadratic Number Fields'\n elif d==3:\n t = 'Elliptic Curves over Cubic Number Fields'\n elif d==4:\n t = 'Elliptic Curves over Quartic Number Fields'\n elif d==5:\n t = 'Elliptic Curves over Quintic Number Fields'\n elif d==6:\n t = 'Elliptic Curves over Sextic Number Fields'\n else:\n t = 'Elliptic Curves over Number Fields of Degree {}'.format(d)\n\n bread = [('Elliptic Curves', url_for(\"ecnf.index\")),\n ('Degree %s' % d,' ')]\n return render_template(\"ecnf-by-degree.html\", info=info, credit=credit, title=t, bread=bread, learnmore=learnmore_list())\n\n@ecnf_page.route(\"/browse/<int:d>/<int:r>/\")\ndef statistics_by_signature(d,r):\n if d==1:\n return redirect(url_for(\"ec.statistics\"))\n\n info = {}\n\n sigs_by_deg = ECNF_stats().sigs_by_deg\n if d not in sigs_by_deg:\n info['error'] = \"The database does not contain any elliptic curves defined over fields of degree %s\" % d\n else:\n info['degree'] = d\n\n if not r in range(d%2,d+1,2):\n info['error'] = \"Invalid signature %s\" % info['sig']\n s = (d-r)//2\n sig = (r,s)\n info['sig'] = '%s,%s' % sig\n info['summary'] = ECNF_stats().signature_summary(sig)\n\n fields_by_sig = ECNF_stats().fields_by_sig\n counts_by_field = ECNF_stats().field_normstats\n\n def field_counts(f):\n return [f,counts_by_field[f]]\n\n info['sig_stats'] = [field_counts(f) for f in fields_by_sig[sig]]\n credit = 'John Cremona'\n if info['sig'] == '2,0':\n t = 'Elliptic Curves over Real Quadratic Number Fields'\n elif info['sig'] == '0,1':\n t = 'Elliptic Curves over Imaginary Quadratic Number Fields'\n elif info['sig'] == '3,0':\n t = 'Elliptic Curves over Totally Real Cubic Number fields'\n elif info['sig'] == '1,1':\n t = 'Elliptic Curves over Mixed Cubic Number Fields'\n elif info['sig'] == '4,0':\n t = 'Elliptic Curves over Totally Real Quartic Number Fields'\n elif info['sig'] == '5,0':\n t = 'Elliptic Curves over Totally Real Quintic Number Fields'\n elif info['sig'] == '6,0':\n t = 'Elliptic Curves over Totally Real Sextic Number Fields'\n else:\n t = 'Elliptic Curves over Number Fields of Degree %s, Signature (%s)' % (d,info['sig'])\n bread = [('Elliptic Curves', url_for(\"ecnf.index\")),\n ('Degree %s' % d,url_for(\"ecnf.statistics_by_degree\", d=d)),\n ('Signature (%s)' % info['sig'],' ')]\n return render_template(\"ecnf-by-signature.html\", info=info, credit=credit, title=t, bread=bread, learnmore=learnmore_list())\n\ndef tor_struct_search_nf(prefill=\"any\"):\n def fix(t):\n return t + ' selected = \"yes\"' if prefill==t else t\n def cyc(n):\n return [fix(\"[\"+str(n)+\"]\"), \"C{}\".format(n)]\n def cyc2(m,n):\n return [fix(\"[{},{}]\".format(m,n)), \"C{}×C{}\".format(m,n)]\n gps = [[fix(\"\"), \"any\"], [fix(\"[]\"), \"trivial\"]]\n\n tors = ECNF_stats().torsion_counts\n\n # The following was the set as of 24/4/2017:\n # assert tors == [[2], [2, 2], [2, 4], [2, 6], [2, 8], [2, 10], [2, 12], [2, 14], [2, 16], [2, 18], [3], [3, 3], [3, 6], [4], [4, 4], [5], [6], [7], [8], [9], [10], [11], [12], [13], [14], [15], [16], [17], [18], [19], [20], [21], [22], [25], [27], [37]]\n\n for t in tors:\n if len(t)==1:\n gps.append(cyc(t[0]))\n elif len(t)==2:\n gps.append(cyc2(*t))\n\n return \"\\n\".join([\"<select name='torsion_structure'>\"] + [\"<option value={}>{}</option>\".format(a,b) for a,b in gps] + [\"</select>\"])\n\n# the following allows the preceding function to be used in any template via {{...}}\napp.jinja_env.globals.update(tor_struct_search_nf=tor_struct_search_nf)\n\n@ecnf_page.route(\"/download_all/<nf>/<conductor_label>/<class_label>/<number>\")\ndef download_ECNF_all(nf,conductor_label,class_label,number):\n conductor_label = unquote(conductor_label)\n conductor_label = convert_IQF_label(nf,conductor_label)\n try:\n nf_label = nf_string_to_label(nf)\n except ValueError:\n return search_input_error()\n label = \"\".join([\"-\".join([nf_label, conductor_label, class_label]), number])\n data = db.ec_nfcurves.lookup(label)\n if data is None:\n return search_input_error()\n\n response = make_response(Json.dumps(data))\n response.headers['Content-type'] = 'text/plain'\n return response\n\n@ecnf_page.route('/<nf>/<conductor_label>/<class_label>/<number>/download/<download_type>')\ndef ecnf_code_download(**args):\n response = make_response(ecnf_code(**args))\n response.headers['Content-type'] = 'text/plain'\n return response\n\nsorted_code_names = ['field', 'curve', 'is_min', 'cond', 'cond_norm',\n 'disc', 'disc_norm', 'jinv', 'cm', 'rank', 'ntors',\n 'gens', 'reg', 'tors', 'torgens', 'localdata']\n\ncode_names = {'field': 'Define the base number field',\n 'curve': 'Define the curve',\n 'is_min': 'Test whether it is a global minimal model',\n 'cond': 'Compute the conductor',\n 'cond_norm': 'Compute the norm of the conductor',\n 'disc': 'Compute the discriminant',\n 'disc_norm': 'Compute the norm of the discriminant',\n 'jinv': 'Compute the j-invariant',\n 'cm': 'Test for Complex Multiplication',\n 'rank': 'Compute the Mordell-Weil rank',\n 'ntors': 'Compute the order of the torsion subgroup',\n 'gens': 'Compute the generators (of infinite order)',\n 'reg': 'Compute the regulator',\n 'tors': 'Compute the torsion subgroup',\n 'torgens': 'Compute the generators of the torsion subgroup',\n 'localdata': 'Compute the local reduction data at primes of bad reduction'\n}\n\nFullname = {'magma': 'Magma', 'sage': 'SageMath', 'gp': 'Pari/GP'}\nComment = {'magma': '//', 'sage': '#', 'gp': '\\\\\\\\', 'pari': '\\\\\\\\'}\n\ndef ecnf_code(**args):\n label = \"\".join([\"-\".join([args['nf'], args['conductor_label'], args['class_label']]), args['number']])\n E = ECNF.by_label(label)\n Ecode = E.code()\n lang = args['download_type']\n code = \"{} {} code for working with elliptic curve {}\\n\\n\".format(Comment[lang],Fullname[lang],label)\n code += \"{} (Note that not all these functions may be available, and some may take a long time to execute.)\\n\".format(Comment[lang])\n if lang=='gp':\n lang = 'pari'\n for k in sorted_code_names:\n if lang in Ecode[k]:\n code += \"\\n{} {}: \\n\".format(Comment[lang],code_names[k])\n code += Ecode[k][lang] + ('\\n' if not '\\n' in Ecode[k][lang] else '')\n return code\n\n",
"path": "lmfdb/ecnf/main.py"
}
] | diff --git a/lmfdb/ecnf/main.py b/lmfdb/ecnf/main.py
index 9b365cbc9b..5dc161bd3f 100644
--- a/lmfdb/ecnf/main.py
+++ b/lmfdb/ecnf/main.py
@@ -378,7 +378,7 @@ def show_ecnf(nf, conductor_label, class_label, number):
learnmore=learnmore_list())
def download_search(info):
- dltype = info['submit']
+ dltype = info['Submit']
delim = 'bracket'
com = r'\\' # single line comment start
com1 = '' # multiline comment start
|
liqd__a4-meinberlin-488 | Login with username
It is currently not possible to login with username, only with email. This *was* possible with the old meinberlin that was based on a3. So the login flow of established users breaks even though we migrated all accounts.
| [
{
"content": "\"\"\"\nDjango settings for meinberlin project.\n\nGenerated by 'django-admin startproject' using Django 1.8.17.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.8/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.8/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nPROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nBASE_DIR = os.path.dirname(PROJECT_DIR)\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n\n 'wagtail.wagtailforms',\n 'wagtail.wagtailredirects',\n 'wagtail.wagtailembeds',\n 'wagtail.wagtailsites',\n 'wagtail.wagtailusers',\n 'wagtail.wagtailsnippets',\n 'wagtail.wagtaildocs',\n 'wagtail.wagtailimages',\n 'wagtail.wagtailsearch',\n 'wagtail.wagtailadmin',\n 'wagtail.wagtailcore',\n 'wagtail.contrib.wagtailstyleguide',\n\n 'taggit', # wagtail dependency\n 'widget_tweaks',\n 'rest_framework',\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'rules.apps.AutodiscoverRulesConfig',\n 'easy_thumbnails',\n 'ckeditor',\n 'ckeditor_uploader',\n 'capture_tag',\n\n 'adhocracy4.organisations.apps.OrganisationsConfig',\n 'adhocracy4.projects.apps.ProjectsConfig',\n 'adhocracy4.images.apps.ImagesConfig',\n 'adhocracy4.phases.apps.PhasesConfig',\n 'adhocracy4.modules.apps.ModulesConfig',\n 'adhocracy4.ratings.apps.RatingsConfig',\n 'adhocracy4.reports.apps.ReportsConfig',\n 'adhocracy4.comments.apps.CommentsConfig',\n 'adhocracy4.categories.apps.CategoriesConfig',\n 'adhocracy4.maps.apps.MapsConfig',\n 'adhocracy4.actions.apps.ActionsConfig',\n 'adhocracy4.follows.apps.FollowsConfig',\n\n 'apps.contrib.apps.Config',\n 'apps.cms.apps.Config',\n 'apps.users.apps.Config',\n 'apps.projects.apps.Config',\n 'apps.organisations.apps.Config',\n 'apps.embed.apps.Config',\n 'apps.moderatorfeedback.apps.Config',\n 'apps.maps.apps.Config',\n 'apps.notifications.apps.Config',\n\n 'apps.account.apps.Config',\n 'apps.dashboard.apps.Config',\n\n 'apps.bplan.apps.Config',\n 'apps.budgeting.apps.Config',\n 'apps.documents.apps.Config',\n 'apps.extprojects.apps.Config',\n 'apps.ideas.apps.Config',\n 'apps.kiezkasse.apps.Config',\n 'apps.mapideas.apps.Config',\n 'apps.polls.apps.Config',\n 'apps.topicprio.apps.Config',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n\n 'wagtail.wagtailcore.middleware.SiteMiddleware',\n 'wagtail.wagtailredirects.middleware.RedirectMiddleware',\n\n 'apps.embed.middleware.AjaxPathMiddleware',\n)\n\nSITE_ID = 1\n\nROOT_URLCONF = 'meinberlin.urls'\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'locale')]\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(PROJECT_DIR, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'meinberlin.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.8/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n 'TEST': {\n 'NAME': os.path.join(BASE_DIR, 'test_db.sqlite3'),\n }\n }\n}\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Europe/Berlin'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nSTATICFILES_DIRS = [\n os.path.join(PROJECT_DIR, 'static'),\n]\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nSTATIC_URL = '/static/'\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\n\nIMAGE_ALIASES = {\n '*': {\n 'max_size': 5*10**6,\n 'fileformats': ('image/png', 'image/jpeg', 'image/gif')\n },\n 'heroimage': {'min_resolution': (1300, 600)},\n 'logo': {'min_resolution': (200, 200), 'aspect_ratio': (1, 1)},\n 'avatar': {'min_resolution': (200, 200)},\n 'idea_image': {'min_resolution': (800, 200)},\n}\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'heroimage': {'size': (1500, 500), 'crop': 'smart'},\n 'heroimage_preview': {'size': (880, 220), 'crop': 'smart'},\n 'project_thumbnail': {'size': (520, 330), 'crop': 'smart'},\n 'idea_image': {'size': (800, 0), 'crop': 'scale'},\n 'idea_thumbnail': {'size': (240, 240), 'crop': 'smart'},\n }\n}\n\nALLOWED_UPLOAD_IMAGES = ('png', 'jpeg', 'gif')\n\n\n# Wagtail settings\n\nWAGTAIL_SITE_NAME = 'meinberlin'\n\n# Base URL to use when referring to full URLs within the Wagtail admin backend -\n# e.g. in notification emails. Don't include '/admin' or a trailing slash\nBASE_URL = 'http://localhost:8000'\n\n# Authentication\n\nAUTH_USER_MODEL = 'meinberlin_users.User'\n\nAUTHENTICATION_BACKENDS = (\n 'rules.permissions.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\nACCOUNT_ADAPTER = 'apps.users.adapters.AccountAdapter'\nACCOUNT_AUTHENTICATION_METHOD = 'email'\nACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_USERNAME_REQUIRED = True\nACCOUNT_LOGIN_ATTEMPTS_LIMIT = 10\nACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 300 # seconds\nACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True\nACCOUNT_LOGIN_ON_PASSWORD_RESET = True\nACCOUNT_SIGNUP_FORM_CLASS = 'apps.users.forms.TermsSignupForm'\n\nLOGIN_URL = 'account_login'\nLOGIN_REDIRECT_URL = '/'\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\nPASSWORD_HASHERS = [\n 'django.contrib.auth.hashers.PBKDF2PasswordHasher',\n 'django.contrib.auth.hashers.BCryptPasswordHasher', # a3\n 'apps.users.hashers.A2PasswordHasher',\n]\n\n\n# ckeditor\n\nCKEDITOR_UPLOAD_PATH = \"uploads/\"\nCKEDITOR_RESTRICT_BY_USER = True\nCKEDITOR_ALLOW_NONIMAGE_FILES = False\n\nCKEDITOR_CONFIGS = {\n 'default': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n },\n 'image-editor': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['Image'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n }\n}\n\nBLEACH_LIST = {\n 'default' : {\n 'tags': ['p','strong','em','u','ol','li','ul','a'],\n 'attributes': {\n 'a': ['href', 'rel'],\n },\n },\n 'image-editor': {\n 'tags': ['p','strong','em','u','ol','li','ul','a','img'],\n 'attributes': {\n 'a': ['href', 'rel'],\n 'img': ['src', 'alt', 'style']\n },\n 'styles': [\n 'float',\n 'margin',\n 'padding',\n 'width',\n 'height',\n 'margin-bottom',\n 'margin-top',\n 'margin-left',\n 'margin-right',\n ],\n }\n}\n\n\n# adhocracy4\n\nA4_ORGANISATIONS_MODEL = 'meinberlin_organisations.Organisation'\n\nA4_RATEABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n ('meinberlin_budgeting', 'proposal'),\n ('meinberlin_topicprio', 'topic'),\n ('meinberlin_kiezkasse', 'proposal'),\n)\n\nA4_COMMENTABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_documents', 'document'),\n ('meinberlin_documents', 'paragraph'),\n ('meinberlin_mapideas', 'mapidea'),\n ('meinberlin_budgeting', 'proposal'),\n ('meinberlin_topicprio', 'topic'),\n ('meinberlin_polls', 'poll'),\n ('meinberlin_kiezkasse', 'proposal'),\n)\n\nA4_REPORTABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n ('meinberlin_budgeting', 'proposal'),\n ('meinberlin_topicprio', 'topic'),\n ('meinberlin_kiezkasse', 'proposal'),\n)\nA4_AUTO_FOLLOWABLES = (('a4comments', 'comment'),)\n\nA4_ACTIONABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n ('meinberlin_budgeting', 'proposal'),\n ('meinberlin_kiezkasse', 'proposal'),\n)\n\nA4_AUTO_FOLLOWABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n ('meinberlin_budgeting', 'proposal'),\n ('meinberlin_kiezkasse', 'proposal'),\n ('meinberlin_polls', 'vote'), # TODO: really?\n)\n\nA4_MAP_BASEURL = 'https://maps.berlinonline.de/tile/bright/'\nA4_MAP_ATTRIBUTION = '© <a href=\"http://openstreetmap.org/copyright\">OpenStreetMap</a> contributors'\nA4_MAP_BOUNDING_BOX = ([[52.3517, 13.8229], [52.6839, 12.9543]])\n\nCONTACT_EMAIL = '[email protected]'\nEMAIL_DEFAULT_LANGUAGE = 'de'\n",
"path": "meinberlin/settings/base.py"
}
] | [
{
"content": "\"\"\"\nDjango settings for meinberlin project.\n\nGenerated by 'django-admin startproject' using Django 1.8.17.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.8/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.8/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nPROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nBASE_DIR = os.path.dirname(PROJECT_DIR)\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n\n 'wagtail.wagtailforms',\n 'wagtail.wagtailredirects',\n 'wagtail.wagtailembeds',\n 'wagtail.wagtailsites',\n 'wagtail.wagtailusers',\n 'wagtail.wagtailsnippets',\n 'wagtail.wagtaildocs',\n 'wagtail.wagtailimages',\n 'wagtail.wagtailsearch',\n 'wagtail.wagtailadmin',\n 'wagtail.wagtailcore',\n 'wagtail.contrib.wagtailstyleguide',\n\n 'taggit', # wagtail dependency\n 'widget_tweaks',\n 'rest_framework',\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'rules.apps.AutodiscoverRulesConfig',\n 'easy_thumbnails',\n 'ckeditor',\n 'ckeditor_uploader',\n 'capture_tag',\n\n 'adhocracy4.organisations.apps.OrganisationsConfig',\n 'adhocracy4.projects.apps.ProjectsConfig',\n 'adhocracy4.images.apps.ImagesConfig',\n 'adhocracy4.phases.apps.PhasesConfig',\n 'adhocracy4.modules.apps.ModulesConfig',\n 'adhocracy4.ratings.apps.RatingsConfig',\n 'adhocracy4.reports.apps.ReportsConfig',\n 'adhocracy4.comments.apps.CommentsConfig',\n 'adhocracy4.categories.apps.CategoriesConfig',\n 'adhocracy4.maps.apps.MapsConfig',\n 'adhocracy4.actions.apps.ActionsConfig',\n 'adhocracy4.follows.apps.FollowsConfig',\n\n 'apps.contrib.apps.Config',\n 'apps.cms.apps.Config',\n 'apps.users.apps.Config',\n 'apps.projects.apps.Config',\n 'apps.organisations.apps.Config',\n 'apps.embed.apps.Config',\n 'apps.moderatorfeedback.apps.Config',\n 'apps.maps.apps.Config',\n 'apps.notifications.apps.Config',\n\n 'apps.account.apps.Config',\n 'apps.dashboard.apps.Config',\n\n 'apps.bplan.apps.Config',\n 'apps.budgeting.apps.Config',\n 'apps.documents.apps.Config',\n 'apps.extprojects.apps.Config',\n 'apps.ideas.apps.Config',\n 'apps.kiezkasse.apps.Config',\n 'apps.mapideas.apps.Config',\n 'apps.polls.apps.Config',\n 'apps.topicprio.apps.Config',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n\n 'wagtail.wagtailcore.middleware.SiteMiddleware',\n 'wagtail.wagtailredirects.middleware.RedirectMiddleware',\n\n 'apps.embed.middleware.AjaxPathMiddleware',\n)\n\nSITE_ID = 1\n\nROOT_URLCONF = 'meinberlin.urls'\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'locale')]\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(PROJECT_DIR, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'meinberlin.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.8/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n 'TEST': {\n 'NAME': os.path.join(BASE_DIR, 'test_db.sqlite3'),\n }\n }\n}\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Europe/Berlin'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nSTATICFILES_DIRS = [\n os.path.join(PROJECT_DIR, 'static'),\n]\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nSTATIC_URL = '/static/'\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\n\nIMAGE_ALIASES = {\n '*': {\n 'max_size': 5*10**6,\n 'fileformats': ('image/png', 'image/jpeg', 'image/gif')\n },\n 'heroimage': {'min_resolution': (1300, 600)},\n 'logo': {'min_resolution': (200, 200), 'aspect_ratio': (1, 1)},\n 'avatar': {'min_resolution': (200, 200)},\n 'idea_image': {'min_resolution': (800, 200)},\n}\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'heroimage': {'size': (1500, 500), 'crop': 'smart'},\n 'heroimage_preview': {'size': (880, 220), 'crop': 'smart'},\n 'project_thumbnail': {'size': (520, 330), 'crop': 'smart'},\n 'idea_image': {'size': (800, 0), 'crop': 'scale'},\n 'idea_thumbnail': {'size': (240, 240), 'crop': 'smart'},\n }\n}\n\nALLOWED_UPLOAD_IMAGES = ('png', 'jpeg', 'gif')\n\n\n# Wagtail settings\n\nWAGTAIL_SITE_NAME = 'meinberlin'\n\n# Base URL to use when referring to full URLs within the Wagtail admin backend -\n# e.g. in notification emails. Don't include '/admin' or a trailing slash\nBASE_URL = 'http://localhost:8000'\n\n# Authentication\n\nAUTH_USER_MODEL = 'meinberlin_users.User'\n\nAUTHENTICATION_BACKENDS = (\n 'rules.permissions.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\nACCOUNT_ADAPTER = 'apps.users.adapters.AccountAdapter'\nACCOUNT_AUTHENTICATION_METHOD = 'username_email'\nACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_USERNAME_REQUIRED = True\nACCOUNT_LOGIN_ATTEMPTS_LIMIT = 10\nACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 300 # seconds\nACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True\nACCOUNT_LOGIN_ON_PASSWORD_RESET = True\nACCOUNT_SIGNUP_FORM_CLASS = 'apps.users.forms.TermsSignupForm'\n\nLOGIN_URL = 'account_login'\nLOGIN_REDIRECT_URL = '/'\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\nPASSWORD_HASHERS = [\n 'django.contrib.auth.hashers.PBKDF2PasswordHasher',\n 'django.contrib.auth.hashers.BCryptPasswordHasher', # a3\n 'apps.users.hashers.A2PasswordHasher',\n]\n\n\n# ckeditor\n\nCKEDITOR_UPLOAD_PATH = \"uploads/\"\nCKEDITOR_RESTRICT_BY_USER = True\nCKEDITOR_ALLOW_NONIMAGE_FILES = False\n\nCKEDITOR_CONFIGS = {\n 'default': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n },\n 'image-editor': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['Image'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n }\n}\n\nBLEACH_LIST = {\n 'default' : {\n 'tags': ['p','strong','em','u','ol','li','ul','a'],\n 'attributes': {\n 'a': ['href', 'rel'],\n },\n },\n 'image-editor': {\n 'tags': ['p','strong','em','u','ol','li','ul','a','img'],\n 'attributes': {\n 'a': ['href', 'rel'],\n 'img': ['src', 'alt', 'style']\n },\n 'styles': [\n 'float',\n 'margin',\n 'padding',\n 'width',\n 'height',\n 'margin-bottom',\n 'margin-top',\n 'margin-left',\n 'margin-right',\n ],\n }\n}\n\n\n# adhocracy4\n\nA4_ORGANISATIONS_MODEL = 'meinberlin_organisations.Organisation'\n\nA4_RATEABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n ('meinberlin_budgeting', 'proposal'),\n ('meinberlin_topicprio', 'topic'),\n ('meinberlin_kiezkasse', 'proposal'),\n)\n\nA4_COMMENTABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_documents', 'document'),\n ('meinberlin_documents', 'paragraph'),\n ('meinberlin_mapideas', 'mapidea'),\n ('meinberlin_budgeting', 'proposal'),\n ('meinberlin_topicprio', 'topic'),\n ('meinberlin_polls', 'poll'),\n ('meinberlin_kiezkasse', 'proposal'),\n)\n\nA4_REPORTABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n ('meinberlin_budgeting', 'proposal'),\n ('meinberlin_topicprio', 'topic'),\n ('meinberlin_kiezkasse', 'proposal'),\n)\nA4_AUTO_FOLLOWABLES = (('a4comments', 'comment'),)\n\nA4_ACTIONABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n ('meinberlin_budgeting', 'proposal'),\n ('meinberlin_kiezkasse', 'proposal'),\n)\n\nA4_AUTO_FOLLOWABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n ('meinberlin_budgeting', 'proposal'),\n ('meinberlin_kiezkasse', 'proposal'),\n ('meinberlin_polls', 'vote'), # TODO: really?\n)\n\nA4_MAP_BASEURL = 'https://maps.berlinonline.de/tile/bright/'\nA4_MAP_ATTRIBUTION = '© <a href=\"http://openstreetmap.org/copyright\">OpenStreetMap</a> contributors'\nA4_MAP_BOUNDING_BOX = ([[52.3517, 13.8229], [52.6839, 12.9543]])\n\nCONTACT_EMAIL = '[email protected]'\nEMAIL_DEFAULT_LANGUAGE = 'de'\n",
"path": "meinberlin/settings/base.py"
}
] | diff --git a/meinberlin/settings/base.py b/meinberlin/settings/base.py
index e5c634d93e..8be4c29c06 100644
--- a/meinberlin/settings/base.py
+++ b/meinberlin/settings/base.py
@@ -218,7 +218,7 @@
)
ACCOUNT_ADAPTER = 'apps.users.adapters.AccountAdapter'
-ACCOUNT_AUTHENTICATION_METHOD = 'email'
+ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
|
pyg-team__pytorch_geometric-8179 | Dataset is not undirected
### 🐛 Describe the bug
Dataset is not undirected, despite passing ``to_undirected=True`` flag.
```python
# !pip install pyg-nightly
from torch_geometric.datasets import CitationFull
from torch_geometric.utils import is_undirected
edge_index = CitationFull(root=".", name="Cora_ML", to_undirected=True).edge_index
is_undirected(edge_index)
```
The above outputs: *False*
### Environment
* PyG version: 2.4.0.dev20231010
* PyTorch version: 2.0.1+cu118
* OS: Colab
* Python version: 3.10.12
* CUDA/cuDNN version: 11.8
* How you installed PyTorch and PyG (`conda`, `pip`, source): pip
* Any other relevant information (*e.g.*, version of `torch-scatter`):
| [
{
"content": "import os.path as osp\nfrom typing import Callable, Optional\n\nimport torch\n\nfrom torch_geometric.data import InMemoryDataset, download_url\nfrom torch_geometric.io import read_npz\n\n\nclass CitationFull(InMemoryDataset):\n r\"\"\"The full citation network datasets from the\n `\"Deep Gaussian Embedding of Graphs: Unsupervised Inductive Learning via\n Ranking\" <https://arxiv.org/abs/1707.03815>`_ paper.\n Nodes represent documents and edges represent citation links.\n Datasets include :obj:`\"Cora\"`, :obj:`\"Cora_ML\"`, :obj:`\"CiteSeer\"`,\n :obj:`\"DBLP\"`, :obj:`\"PubMed\"`.\n\n Args:\n root (str): Root directory where the dataset should be saved.\n name (str): The name of the dataset (:obj:`\"Cora\"`, :obj:`\"Cora_ML\"`\n :obj:`\"CiteSeer\"`, :obj:`\"DBLP\"`, :obj:`\"PubMed\"`).\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n to_undirected (bool, optional): Whether the original graph is\n converted to an undirected one. (default: :obj:`True`)\n\n **STATS:**\n\n .. list-table::\n :widths: 10 10 10 10 10\n :header-rows: 1\n\n * - Name\n - #nodes\n - #edges\n - #features\n - #classes\n * - Cora\n - 19,793\n - 126,842\n - 8,710\n - 70\n * - Cora_ML\n - 2,995\n - 16,316\n - 2,879\n - 7\n * - CiteSeer\n - 4,230\n - 10,674\n - 602\n - 6\n * - DBLP\n - 17,716\n - 105,734\n - 1,639\n - 4\n * - PubMed\n - 19,717\n - 88,648\n - 500\n - 3\n \"\"\"\n\n url = 'https://github.com/abojchevski/graph2gauss/raw/master/data/{}.npz'\n\n def __init__(\n self,\n root: str,\n name: str,\n transform: Optional[Callable] = None,\n pre_transform: Optional[Callable] = None,\n to_undirected: bool = True,\n ):\n self.name = name.lower()\n self.to_undirected = to_undirected\n assert self.name in ['cora', 'cora_ml', 'citeseer', 'dblp', 'pubmed']\n super().__init__(root, transform, pre_transform)\n self.data, self.slices = torch.load(self.processed_paths[0])\n\n @property\n def raw_dir(self) -> str:\n return osp.join(self.root, self.name, 'raw')\n\n @property\n def processed_dir(self) -> str:\n return osp.join(self.root, self.name, 'processed')\n\n @property\n def raw_file_names(self) -> str:\n return f'{self.name}.npz'\n\n @property\n def processed_file_names(self) -> str:\n return 'data.pt'\n\n def download(self):\n download_url(self.url.format(self.name), self.raw_dir)\n\n def process(self):\n data = read_npz(self.raw_paths[0], to_undirected=self.to_undirected)\n data = data if self.pre_transform is None else self.pre_transform(data)\n data, slices = self.collate([data])\n torch.save((data, slices), self.processed_paths[0])\n\n def __repr__(self) -> str:\n return f'{self.name.capitalize()}Full()'\n\n\nclass CoraFull(CitationFull):\n r\"\"\"Alias for :class:`~torch_geometric.datasets.CitationFull` with\n :obj:`name=\"Cora\"`.\n\n **STATS:**\n\n .. list-table::\n :widths: 10 10 10 10\n :header-rows: 1\n\n * - #nodes\n - #edges\n - #features\n - #classes\n * - 19,793\n - 126,842\n - 8,710\n - 70\n \"\"\"\n def __init__(self, root: str, transform: Optional[Callable] = None,\n pre_transform: Optional[Callable] = None):\n super().__init__(root, 'cora', transform, pre_transform)\n\n def download(self):\n super().download()\n\n def process(self):\n super().process()\n",
"path": "torch_geometric/datasets/citation_full.py"
}
] | [
{
"content": "import os.path as osp\nfrom typing import Callable, Optional\n\nimport torch\n\nfrom torch_geometric.data import InMemoryDataset, download_url\nfrom torch_geometric.io import read_npz\n\n\nclass CitationFull(InMemoryDataset):\n r\"\"\"The full citation network datasets from the\n `\"Deep Gaussian Embedding of Graphs: Unsupervised Inductive Learning via\n Ranking\" <https://arxiv.org/abs/1707.03815>`_ paper.\n Nodes represent documents and edges represent citation links.\n Datasets include :obj:`\"Cora\"`, :obj:`\"Cora_ML\"`, :obj:`\"CiteSeer\"`,\n :obj:`\"DBLP\"`, :obj:`\"PubMed\"`.\n\n Args:\n root (str): Root directory where the dataset should be saved.\n name (str): The name of the dataset (:obj:`\"Cora\"`, :obj:`\"Cora_ML\"`\n :obj:`\"CiteSeer\"`, :obj:`\"DBLP\"`, :obj:`\"PubMed\"`).\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n to_undirected (bool, optional): Whether the original graph is\n converted to an undirected one. (default: :obj:`True`)\n\n **STATS:**\n\n .. list-table::\n :widths: 10 10 10 10 10\n :header-rows: 1\n\n * - Name\n - #nodes\n - #edges\n - #features\n - #classes\n * - Cora\n - 19,793\n - 126,842\n - 8,710\n - 70\n * - Cora_ML\n - 2,995\n - 16,316\n - 2,879\n - 7\n * - CiteSeer\n - 4,230\n - 10,674\n - 602\n - 6\n * - DBLP\n - 17,716\n - 105,734\n - 1,639\n - 4\n * - PubMed\n - 19,717\n - 88,648\n - 500\n - 3\n \"\"\"\n\n url = 'https://github.com/abojchevski/graph2gauss/raw/master/data/{}.npz'\n\n def __init__(\n self,\n root: str,\n name: str,\n transform: Optional[Callable] = None,\n pre_transform: Optional[Callable] = None,\n to_undirected: bool = True,\n ):\n self.name = name.lower()\n self.to_undirected = to_undirected\n assert self.name in ['cora', 'cora_ml', 'citeseer', 'dblp', 'pubmed']\n super().__init__(root, transform, pre_transform)\n self.data, self.slices = torch.load(self.processed_paths[0])\n\n @property\n def raw_dir(self) -> str:\n return osp.join(self.root, self.name, 'raw')\n\n @property\n def processed_dir(self) -> str:\n return osp.join(self.root, self.name, 'processed')\n\n @property\n def raw_file_names(self) -> str:\n return f'{self.name}.npz'\n\n @property\n def processed_file_names(self) -> str:\n suffix = 'undirected' if self.to_undirected else 'directed'\n return f'data_{suffix}.pt'\n\n def download(self):\n download_url(self.url.format(self.name), self.raw_dir)\n\n def process(self):\n data = read_npz(self.raw_paths[0], to_undirected=self.to_undirected)\n data = data if self.pre_transform is None else self.pre_transform(data)\n data, slices = self.collate([data])\n torch.save((data, slices), self.processed_paths[0])\n\n def __repr__(self) -> str:\n return f'{self.name.capitalize()}Full()'\n\n\nclass CoraFull(CitationFull):\n r\"\"\"Alias for :class:`~torch_geometric.datasets.CitationFull` with\n :obj:`name=\"Cora\"`.\n\n **STATS:**\n\n .. list-table::\n :widths: 10 10 10 10\n :header-rows: 1\n\n * - #nodes\n - #edges\n - #features\n - #classes\n * - 19,793\n - 126,842\n - 8,710\n - 70\n \"\"\"\n def __init__(self, root: str, transform: Optional[Callable] = None,\n pre_transform: Optional[Callable] = None):\n super().__init__(root, 'cora', transform, pre_transform)\n\n def download(self):\n super().download()\n\n def process(self):\n super().process()\n",
"path": "torch_geometric/datasets/citation_full.py"
}
] | diff --git a/torch_geometric/datasets/citation_full.py b/torch_geometric/datasets/citation_full.py
index 70c2d6f51b76..a1a305ce064b 100644
--- a/torch_geometric/datasets/citation_full.py
+++ b/torch_geometric/datasets/citation_full.py
@@ -98,7 +98,8 @@ def raw_file_names(self) -> str:
@property
def processed_file_names(self) -> str:
- return 'data.pt'
+ suffix = 'undirected' if self.to_undirected else 'directed'
+ return f'data_{suffix}.pt'
def download(self):
download_url(self.url.format(self.name), self.raw_dir)
|
getsentry__sentry-5094 | Webhook data does not have event id
Webhook data contains issue id only. It would be nice to have event id as well.
Discussed with @mattrobenolt on IRC. Documenting it here with this issue.
| [
{
"content": "from __future__ import absolute_import\n\nimport logging\nimport six\nimport sentry\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom sentry.exceptions import PluginError\nfrom sentry.plugins.bases import notify\nfrom sentry.http import is_valid_url, safe_urlopen\nfrom sentry.utils.safe import safe_execute\n\n\ndef validate_urls(value, **kwargs):\n output = []\n for url in value.split('\\n'):\n url = url.strip()\n if not url:\n continue\n if not url.startswith(('http://', 'https://')):\n raise PluginError('Not a valid URL.')\n if not is_valid_url(url):\n raise PluginError('Not a valid URL.')\n output.append(url)\n return '\\n'.join(output)\n\n\nclass WebHooksOptionsForm(notify.NotificationConfigurationForm):\n urls = forms.CharField(\n label=_('Callback URLs'),\n widget=forms.Textarea(attrs={\n 'class': 'span6', 'placeholder': 'https://sentry.io/callback/url'}),\n help_text=_('Enter callback URLs to POST new events to (one per line).'))\n\n def clean_url(self):\n value = self.cleaned_data.get('url')\n return validate_urls(value)\n\n\nclass WebHooksPlugin(notify.NotificationPlugin):\n author = 'Sentry Team'\n author_url = 'https://github.com/getsentry/sentry'\n version = sentry.VERSION\n description = \"Integrates web hooks.\"\n resource_links = [\n ('Bug Tracker', 'https://github.com/getsentry/sentry/issues'),\n ('Source', 'https://github.com/getsentry/sentry'),\n ]\n\n slug = 'webhooks'\n title = 'WebHooks'\n conf_title = title\n conf_key = 'webhooks'\n # TODO(dcramer): remove when this is migrated to React\n project_conf_form = WebHooksOptionsForm\n timeout = getattr(settings, 'SENTRY_WEBHOOK_TIMEOUT', 3)\n logger = logging.getLogger('sentry.plugins.webhooks')\n user_agent = 'sentry-webhooks/%s' % version\n\n def is_configured(self, project, **kwargs):\n return bool(self.get_option('urls', project))\n\n def get_config(self, project, **kwargs):\n return [{\n 'name': 'urls',\n 'label': 'Callback URLs',\n 'type': 'textarea',\n 'help': 'Enter callback URLs to POST new events to (one per line).',\n 'placeholder': 'https://sentry.io/callback/url',\n 'validators': [validate_urls],\n 'required': False\n }]\n\n def get_group_data(self, group, event):\n data = {\n 'id': six.text_type(group.id),\n 'project': group.project.slug,\n 'project_name': group.project.name,\n 'logger': event.get_tag('logger'),\n 'level': event.get_tag('level'),\n 'culprit': group.culprit,\n 'message': event.get_legacy_message(),\n 'url': group.get_absolute_url(),\n }\n data['event'] = dict(event.data or {})\n data['event']['tags'] = event.get_tags()\n return data\n\n def get_webhook_urls(self, project):\n urls = self.get_option('urls', project)\n if not urls:\n return ()\n return filter(bool, urls.strip().splitlines())\n\n def send_webhook(self, url, payload):\n return safe_urlopen(\n url=url,\n json=payload,\n timeout=self.timeout,\n verify_ssl=False,\n )\n\n def notify_users(self, group, event, fail_silently=False):\n payload = self.get_group_data(group, event)\n for url in self.get_webhook_urls(group.project):\n safe_execute(self.send_webhook, url, payload, _with_transaction=False)\n",
"path": "src/sentry/plugins/sentry_webhooks/plugin.py"
}
] | [
{
"content": "from __future__ import absolute_import\n\nimport logging\nimport six\nimport sentry\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom sentry.exceptions import PluginError\nfrom sentry.plugins.bases import notify\nfrom sentry.http import is_valid_url, safe_urlopen\nfrom sentry.utils.safe import safe_execute\n\n\ndef validate_urls(value, **kwargs):\n output = []\n for url in value.split('\\n'):\n url = url.strip()\n if not url:\n continue\n if not url.startswith(('http://', 'https://')):\n raise PluginError('Not a valid URL.')\n if not is_valid_url(url):\n raise PluginError('Not a valid URL.')\n output.append(url)\n return '\\n'.join(output)\n\n\nclass WebHooksOptionsForm(notify.NotificationConfigurationForm):\n urls = forms.CharField(\n label=_('Callback URLs'),\n widget=forms.Textarea(attrs={\n 'class': 'span6', 'placeholder': 'https://sentry.io/callback/url'}),\n help_text=_('Enter callback URLs to POST new events to (one per line).'))\n\n def clean_url(self):\n value = self.cleaned_data.get('url')\n return validate_urls(value)\n\n\nclass WebHooksPlugin(notify.NotificationPlugin):\n author = 'Sentry Team'\n author_url = 'https://github.com/getsentry/sentry'\n version = sentry.VERSION\n description = \"Integrates web hooks.\"\n resource_links = [\n ('Bug Tracker', 'https://github.com/getsentry/sentry/issues'),\n ('Source', 'https://github.com/getsentry/sentry'),\n ]\n\n slug = 'webhooks'\n title = 'WebHooks'\n conf_title = title\n conf_key = 'webhooks'\n # TODO(dcramer): remove when this is migrated to React\n project_conf_form = WebHooksOptionsForm\n timeout = getattr(settings, 'SENTRY_WEBHOOK_TIMEOUT', 3)\n logger = logging.getLogger('sentry.plugins.webhooks')\n user_agent = 'sentry-webhooks/%s' % version\n\n def is_configured(self, project, **kwargs):\n return bool(self.get_option('urls', project))\n\n def get_config(self, project, **kwargs):\n return [{\n 'name': 'urls',\n 'label': 'Callback URLs',\n 'type': 'textarea',\n 'help': 'Enter callback URLs to POST new events to (one per line).',\n 'placeholder': 'https://sentry.io/callback/url',\n 'validators': [validate_urls],\n 'required': False\n }]\n\n def get_group_data(self, group, event):\n data = {\n 'id': six.text_type(group.id),\n 'project': group.project.slug,\n 'project_name': group.project.name,\n 'logger': event.get_tag('logger'),\n 'level': event.get_tag('level'),\n 'culprit': group.culprit,\n 'message': event.get_legacy_message(),\n 'url': group.get_absolute_url(),\n }\n data['event'] = dict(event.data or {})\n data['event']['tags'] = event.get_tags()\n data['event']['event_id'] = event.event_id\n data['event']['id'] = event.id\n return data\n\n def get_webhook_urls(self, project):\n urls = self.get_option('urls', project)\n if not urls:\n return ()\n return filter(bool, urls.strip().splitlines())\n\n def send_webhook(self, url, payload):\n return safe_urlopen(\n url=url,\n json=payload,\n timeout=self.timeout,\n verify_ssl=False,\n )\n\n def notify_users(self, group, event, fail_silently=False):\n payload = self.get_group_data(group, event)\n for url in self.get_webhook_urls(group.project):\n safe_execute(self.send_webhook, url, payload, _with_transaction=False)\n",
"path": "src/sentry/plugins/sentry_webhooks/plugin.py"
}
] | diff --git a/src/sentry/plugins/sentry_webhooks/plugin.py b/src/sentry/plugins/sentry_webhooks/plugin.py
index 3bfac698fc4bf4..f25c7546a3a559 100644
--- a/src/sentry/plugins/sentry_webhooks/plugin.py
+++ b/src/sentry/plugins/sentry_webhooks/plugin.py
@@ -87,6 +87,8 @@ def get_group_data(self, group, event):
}
data['event'] = dict(event.data or {})
data['event']['tags'] = event.get_tags()
+ data['event']['event_id'] = event.event_id
+ data['event']['id'] = event.id
return data
def get_webhook_urls(self, project):
diff --git a/tests/sentry/plugins/sentry_webhooks/test_plugin.py b/tests/sentry/plugins/sentry_webhooks/test_plugin.py
index ab49c75257a14f..a2e7f05226f948 100644
--- a/tests/sentry/plugins/sentry_webhooks/test_plugin.py
+++ b/tests/sentry/plugins/sentry_webhooks/test_plugin.py
@@ -23,7 +23,7 @@ def test_simple_notification(self):
responses.add(responses.POST, 'http://example.com')
group = self.create_group(message='Hello world')
- event = self.create_event(group=group, message='Hello world', tags={'level': 'warning'})
+ event = self.create_event(group=group, message='Hello world', tags={'level': 'warning'}, id=24)
rule = Rule.objects.create(project=self.project, label='my rule')
@@ -39,3 +39,5 @@ def test_simple_notification(self):
assert payload['level'] == 'warning'
assert payload['message'] == 'Hello world'
+ assert payload['event']['id'] == 24
+ assert payload['event']['event_id'] == event.event_id
|
joke2k__faker-435 | Published packages include docs/ as a module
The published wheel and sdist on PyPI for at least version 0.7.5 include `docs/__init__.py` as a top-level module in addition to `faker`. This conflicts with some other packages we use (PyICU) and seems like bad package hygiene, especially since the `docs` dir in this repository is definitely not a module. My guess is that a `__init__.py` made it in there on the maintainer's machine before running `setup.py` and it was erroneously discovered as a module.
We're going to republish the package to our own internal repository, but I think it would help the community to `git clean` as necessary and re-publish a new version, and consider adding necessary exclusions to the `setup.py` or `MANIFEST.in`.
| [
{
"content": "#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport io\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = io.open(os.path.join(here, 'README.rst'), encoding=\"utf8\").read()\n\n\nversion = '0.7.5'\n\n# this module can be zip-safe if the zipimporter implements iter_modules or if\n# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.\ntry:\n import pkgutil\n import zipimport\n zip_safe = hasattr(zipimport.zipimporter, \"iter_modules\") or \\\n zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()\nexcept (ImportError, AttributeError):\n zip_safe = False\n\nsetup(\n name='Faker',\n version=version,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README,\n entry_points={\n 'console_scripts': ['faker=faker.cli:execute_from_command_line'],\n },\n classifiers=[\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Testing',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License'\n ],\n keywords='faker fixtures data test mock generator',\n author='joke2k',\n author_email='[email protected]',\n url='https://github.com/joke2k/faker',\n license='MIT License',\n packages=find_packages(),\n platforms=[\"any\"],\n test_suite='faker.tests',\n zip_safe=zip_safe,\n install_requires=[\n \"python-dateutil>=2.4\",\n \"six\",\n ],\n extras_require={\n ':python_version==\"2.7\"': [\n 'ipaddress',\n ],\n ':python_version==\"3.0\"': [\n 'importlib',\n ],\n ':python_version==\"3.2\"': [\n 'ipaddress',\n ],\n }\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport io\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = io.open(os.path.join(here, 'README.rst'), encoding=\"utf8\").read()\n\n\nversion = '0.7.5'\n\n# this module can be zip-safe if the zipimporter implements iter_modules or if\n# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.\ntry:\n import pkgutil\n import zipimport\n zip_safe = hasattr(zipimport.zipimporter, \"iter_modules\") or \\\n zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()\nexcept (ImportError, AttributeError):\n zip_safe = False\n\nsetup(\n name='Faker',\n version=version,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README,\n entry_points={\n 'console_scripts': ['faker=faker.cli:execute_from_command_line'],\n },\n classifiers=[\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Testing',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License'\n ],\n keywords='faker fixtures data test mock generator',\n author='joke2k',\n author_email='[email protected]',\n url='https://github.com/joke2k/faker',\n license='MIT License',\n packages=find_packages(exclude=(\"docs\",)),\n platforms=[\"any\"],\n test_suite='faker.tests',\n zip_safe=zip_safe,\n install_requires=[\n \"python-dateutil>=2.4\",\n \"six\",\n ],\n extras_require={\n ':python_version==\"2.7\"': [\n 'ipaddress',\n ],\n ':python_version==\"3.0\"': [\n 'importlib',\n ],\n ':python_version==\"3.2\"': [\n 'ipaddress',\n ],\n }\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index a4582f7970..7584c0187b 100644
--- a/setup.py
+++ b/setup.py
@@ -51,7 +51,7 @@
author_email='[email protected]',
url='https://github.com/joke2k/faker',
license='MIT License',
- packages=find_packages(),
+ packages=find_packages(exclude=("docs",)),
platforms=["any"],
test_suite='faker.tests',
zip_safe=zip_safe,
|
aio-libs__aiohttp-1854 | Sub app middlewares are called before main app,
## Long story short
According to http://aiohttp.readthedocs.io/en/stable/web.html#nested-applications,
`[I]f URL is '/admin/something' middlewares from [main application] are applied first and [sub application] middlewares are the next in the call chain.`
It turns out it's actually the other way.
## Steps to reproduce
Run this code saved as foo.py:
```python
#!/usr/bin/python3 -tt
import aiohttp
import aiohttp.web
async def middleware1(app, next_handler):
async def handler(request):
print("middleware1")
return await next_handler(request)
return handler
async def middleware2(app, next_handler):
async def handler(request):
print("middleware2")
return await next_handler(request)
return handler
async def middleware3(app, next_handler):
async def handler(request):
print("middleware3")
return await next_handler(request)
return handler
app = aiohttp.web.Application(
middlewares=[middleware1])
subapp1 = aiohttp.web.Application(middlewares=[middleware2])
app.add_subapp("/foo", subapp1)
subapp2 = aiohttp.web.Application(middlewares=[middleware3])
subapp1.add_subapp("/foo/bar", subapp2)
aiohttp.web.run_app(app, host='127.0.0.1', port=4096)
```
And the run `wget http://127.0.0.1:4096/foo/bar`.
### Current Result:
```
$ python3 foo.py
======== Running on http://127.0.0.1:4096 ========
(Press CTRL+C to quit)
middleware3
middleware2
middleware1
```
### Expected output:
```
$ python3 foo.py
======== Running on http://127.0.0.1:4096 ========
(Press CTRL+C to quit)
middleware1
middleware2
middleware3
```
## Your environment
Ubuntu Xenial (python 3.5.2) with aiohttp 2.0.7
| [
{
"content": "import asyncio\nimport os\nimport socket\nimport stat\nimport sys\nimport warnings\nfrom argparse import ArgumentParser\nfrom collections import Iterable, MutableMapping\nfrom importlib import import_module\n\nfrom yarl import URL\n\nfrom . import (hdrs, web_exceptions, web_fileresponse, web_middlewares,\n web_protocol, web_request, web_response, web_server,\n web_urldispatcher, web_ws)\nfrom .abc import AbstractMatchInfo, AbstractRouter\nfrom .helpers import FrozenList\nfrom .http import HttpVersion # noqa\nfrom .log import access_logger, web_logger\nfrom .signals import FuncSignal, PostSignal, PreSignal, Signal\nfrom .web_exceptions import * # noqa\nfrom .web_fileresponse import * # noqa\nfrom .web_middlewares import * # noqa\nfrom .web_protocol import * # noqa\nfrom .web_request import * # noqa\nfrom .web_response import * # noqa\nfrom .web_server import Server\nfrom .web_urldispatcher import * # noqa\nfrom .web_urldispatcher import PrefixedSubAppResource\nfrom .web_ws import * # noqa\n\n__all__ = (web_protocol.__all__ +\n web_fileresponse.__all__ +\n web_request.__all__ +\n web_response.__all__ +\n web_exceptions.__all__ +\n web_urldispatcher.__all__ +\n web_ws.__all__ +\n web_server.__all__ +\n web_middlewares.__all__ +\n ('Application', 'HttpVersion', 'MsgType'))\n\n\nclass Application(MutableMapping):\n def __init__(self, *,\n logger=web_logger,\n router=None,\n middlewares=(),\n handler_args=None,\n client_max_size=1024**2,\n secure_proxy_ssl_header=None,\n loop=None,\n debug=...):\n if router is None:\n router = web_urldispatcher.UrlDispatcher()\n assert isinstance(router, AbstractRouter), router\n\n if loop is not None:\n warnings.warn(\"loop argument is deprecated\", ResourceWarning)\n\n self._debug = debug\n self._router = router\n self._secure_proxy_ssl_header = secure_proxy_ssl_header\n self._loop = loop\n self._handler_args = handler_args\n self.logger = logger\n\n self._middlewares = FrozenList(middlewares)\n self._state = {}\n self._frozen = False\n self._subapps = []\n\n self._on_pre_signal = PreSignal()\n self._on_post_signal = PostSignal()\n self._on_loop_available = FuncSignal(self)\n self._on_response_prepare = Signal(self)\n self._on_startup = Signal(self)\n self._on_shutdown = Signal(self)\n self._on_cleanup = Signal(self)\n self._client_max_size = client_max_size\n\n # MutableMapping API\n\n def __getitem__(self, key):\n return self._state[key]\n\n def _check_frozen(self):\n if self._frozen:\n warnings.warn(\"Changing state of started or joined \"\n \"application is deprecated\",\n DeprecationWarning,\n stacklevel=3)\n\n def __setitem__(self, key, value):\n self._check_frozen()\n self._state[key] = value\n\n def __delitem__(self, key):\n self._check_frozen()\n del self._state[key]\n\n def __len__(self):\n return len(self._state)\n\n def __iter__(self):\n return iter(self._state)\n\n ########\n @property\n def loop(self):\n return self._loop\n\n def _set_loop(self, loop):\n if loop is None:\n loop = asyncio.get_event_loop()\n if self._loop is not None and self._loop is not loop:\n raise RuntimeError(\n \"web.Application instance initialized with different loop\")\n\n self._loop = loop\n self._on_loop_available.send(self)\n\n # set loop debug\n if self._debug is ...:\n self._debug = loop.get_debug()\n\n # set loop to sub applications\n for subapp in self._subapps:\n subapp._set_loop(loop)\n\n @property\n def frozen(self):\n return self._frozen\n\n def freeze(self):\n if self._frozen:\n return\n\n self._frozen = True\n self._middlewares = tuple(reversed(self._middlewares))\n self._router.freeze()\n self._on_loop_available.freeze()\n self._on_pre_signal.freeze()\n self._on_post_signal.freeze()\n self._on_response_prepare.freeze()\n self._on_startup.freeze()\n self._on_shutdown.freeze()\n self._on_cleanup.freeze()\n\n for subapp in self._subapps:\n subapp.freeze()\n\n @property\n def debug(self):\n return self._debug\n\n def _reg_subapp_signals(self, subapp):\n\n def reg_handler(signame):\n subsig = getattr(subapp, signame)\n\n @asyncio.coroutine\n def handler(app):\n yield from subsig.send(subapp)\n appsig = getattr(self, signame)\n appsig.append(handler)\n\n reg_handler('on_startup')\n reg_handler('on_shutdown')\n reg_handler('on_cleanup')\n\n def add_subapp(self, prefix, subapp):\n if self.frozen:\n raise RuntimeError(\n \"Cannot add sub application to frozen application\")\n if subapp.frozen:\n raise RuntimeError(\"Cannot add frozen application\")\n if prefix.endswith('/'):\n prefix = prefix[:-1]\n if prefix in ('', '/'):\n raise ValueError(\"Prefix cannot be empty\")\n\n resource = PrefixedSubAppResource(prefix, subapp)\n self.router.register_resource(resource)\n self._reg_subapp_signals(subapp)\n self._subapps.append(subapp)\n if self._loop is not None:\n subapp._set_loop(self._loop)\n return resource\n\n @property\n def on_loop_available(self):\n return self._on_loop_available\n\n @property\n def on_response_prepare(self):\n return self._on_response_prepare\n\n @property\n def on_pre_signal(self):\n return self._on_pre_signal\n\n @property\n def on_post_signal(self):\n return self._on_post_signal\n\n @property\n def on_startup(self):\n return self._on_startup\n\n @property\n def on_shutdown(self):\n return self._on_shutdown\n\n @property\n def on_cleanup(self):\n return self._on_cleanup\n\n @property\n def router(self):\n return self._router\n\n @property\n def middlewares(self):\n return self._middlewares\n\n def make_handler(self, *, loop=None,\n secure_proxy_ssl_header=None, **kwargs):\n self._set_loop(loop)\n self.freeze()\n\n kwargs['debug'] = self.debug\n if self._handler_args:\n for k, v in self._handler_args.items():\n kwargs[k] = v\n\n if secure_proxy_ssl_header:\n self._secure_proxy_ssl_header = secure_proxy_ssl_header\n return Server(self._handle, request_factory=self._make_request,\n loop=self.loop, **kwargs)\n\n @asyncio.coroutine\n def startup(self):\n \"\"\"Causes on_startup signal\n\n Should be called in the event loop along with the request handler.\n \"\"\"\n yield from self.on_startup.send(self)\n\n @asyncio.coroutine\n def shutdown(self):\n \"\"\"Causes on_shutdown signal\n\n Should be called before cleanup()\n \"\"\"\n yield from self.on_shutdown.send(self)\n\n @asyncio.coroutine\n def cleanup(self):\n \"\"\"Causes on_cleanup signal\n\n Should be called after shutdown()\n \"\"\"\n yield from self.on_cleanup.send(self)\n\n def _make_request(self, message, payload, protocol, writer, task,\n _cls=web_request.Request):\n return _cls(\n message, payload, protocol, writer, protocol._time_service, task,\n secure_proxy_ssl_header=self._secure_proxy_ssl_header,\n client_max_size=self._client_max_size)\n\n @asyncio.coroutine\n def _handle(self, request):\n match_info = yield from self._router.resolve(request)\n assert isinstance(match_info, AbstractMatchInfo), match_info\n match_info.add_app(self)\n\n if __debug__:\n match_info.freeze()\n\n resp = None\n request._match_info = match_info\n expect = request.headers.get(hdrs.EXPECT)\n if expect:\n resp = yield from match_info.expect_handler(request)\n yield from request.writer.drain()\n\n if resp is None:\n handler = match_info.handler\n for app in match_info.apps:\n for factory in app._middlewares:\n handler = yield from factory(app, handler)\n\n resp = yield from handler(request)\n\n assert isinstance(resp, web_response.StreamResponse), \\\n (\"Handler {!r} should return response instance, \"\n \"got {!r} [middlewares {!r}]\").format(\n match_info.handler, type(resp),\n [middleware for middleware in app.middlewares\n for app in match_info.apps])\n return resp\n\n def __call__(self):\n \"\"\"gunicorn compatibility\"\"\"\n return self\n\n def __repr__(self):\n return \"<Application 0x{:x}>\".format(id(self))\n\n\ndef run_app(app, *, host=None, port=None, path=None, sock=None,\n shutdown_timeout=60.0, ssl_context=None,\n print=print, backlog=128, access_log_format=None,\n access_log=access_logger, loop=None):\n \"\"\"Run an app locally\"\"\"\n if loop is None:\n loop = asyncio.get_event_loop()\n\n make_handler_kwargs = dict()\n if access_log_format is not None:\n make_handler_kwargs['access_log_format'] = access_log_format\n handler = app.make_handler(loop=loop, access_log=access_log,\n **make_handler_kwargs)\n\n loop.run_until_complete(app.startup())\n\n scheme = 'https' if ssl_context else 'http'\n base_url = URL('{}://localhost'.format(scheme)).with_port(port)\n\n if path is None:\n paths = ()\n elif isinstance(path, (str, bytes, bytearray, memoryview))\\\n or not isinstance(path, Iterable):\n paths = (path,)\n else:\n paths = path\n\n if sock is None:\n socks = ()\n elif not isinstance(sock, Iterable):\n socks = (sock,)\n else:\n socks = sock\n\n if host is None:\n if (paths or socks) and not port:\n hosts = ()\n else:\n hosts = (\"0.0.0.0\",)\n elif isinstance(host, (str, bytes, bytearray, memoryview))\\\n or not isinstance(host, Iterable):\n hosts = (host,)\n else:\n hosts = host\n\n if hosts and port is None:\n port = 8443 if ssl_context else 8080\n\n server_creations = []\n uris = [str(base_url.with_host(host)) for host in hosts]\n if hosts:\n # Multiple hosts bound to same server is available in most loop\n # implementations, but only send multiple if we have multiple.\n host_binding = hosts[0] if len(hosts) == 1 else hosts\n server_creations.append(\n loop.create_server(\n handler, host_binding, port, ssl=ssl_context, backlog=backlog\n )\n )\n for path in paths:\n # Most loop implementations don't support multiple paths bound in same\n # server, so create a server for each.\n server_creations.append(\n loop.create_unix_server(\n handler, path, ssl=ssl_context, backlog=backlog\n )\n )\n uris.append('{}://unix:{}:'.format(scheme, path))\n\n # Clean up prior socket path if stale and not abstract.\n # CPython 3.5.3+'s event loop already does this. See\n # https://github.com/python/asyncio/issues/425\n if path[0] not in (0, '\\x00'): # pragma: no branch\n try:\n if stat.S_ISSOCK(os.stat(path).st_mode):\n os.remove(path)\n except FileNotFoundError:\n pass\n for sock in socks:\n server_creations.append(\n loop.create_server(\n handler, sock=sock, ssl=ssl_context, backlog=backlog\n )\n )\n\n if hasattr(socket, 'AF_UNIX') and sock.family == socket.AF_UNIX:\n uris.append('{}://unix:{}:'.format(scheme, sock.getsockname()))\n else:\n host, port = sock.getsockname()\n uris.append(str(base_url.with_host(host).with_port(port)))\n\n servers = loop.run_until_complete(\n asyncio.gather(*server_creations, loop=loop)\n )\n\n print(\"======== Running on {} ========\\n\"\n \"(Press CTRL+C to quit)\".format(', '.join(uris)))\n\n try:\n loop.run_forever()\n except KeyboardInterrupt: # pragma: no cover\n pass\n finally:\n server_closures = []\n for srv in servers:\n srv.close()\n server_closures.append(srv.wait_closed())\n loop.run_until_complete(asyncio.gather(*server_closures, loop=loop))\n loop.run_until_complete(app.shutdown())\n loop.run_until_complete(handler.shutdown(shutdown_timeout))\n loop.run_until_complete(app.cleanup())\n loop.close()\n\n\ndef main(argv):\n arg_parser = ArgumentParser(\n description=\"aiohttp.web Application server\",\n prog=\"aiohttp.web\"\n )\n arg_parser.add_argument(\n \"entry_func\",\n help=(\"Callable returning the `aiohttp.web.Application` instance to \"\n \"run. Should be specified in the 'module:function' syntax.\"),\n metavar=\"entry-func\"\n )\n arg_parser.add_argument(\n \"-H\", \"--hostname\",\n help=\"TCP/IP hostname to serve on (default: %(default)r)\",\n default=\"localhost\"\n )\n arg_parser.add_argument(\n \"-P\", \"--port\",\n help=\"TCP/IP port to serve on (default: %(default)r)\",\n type=int,\n default=\"8080\"\n )\n arg_parser.add_argument(\n \"-U\", \"--path\",\n help=\"Unix file system path to serve on. Specifying a path will cause \"\n \"hostname and port arguments to be ignored.\",\n )\n args, extra_argv = arg_parser.parse_known_args(argv)\n\n # Import logic\n mod_str, _, func_str = args.entry_func.partition(\":\")\n if not func_str or not mod_str:\n arg_parser.error(\n \"'entry-func' not in 'module:function' syntax\"\n )\n if mod_str.startswith(\".\"):\n arg_parser.error(\"relative module names not supported\")\n try:\n module = import_module(mod_str)\n except ImportError as ex:\n arg_parser.error(\"unable to import %s: %s\" % (mod_str, ex))\n try:\n func = getattr(module, func_str)\n except AttributeError:\n arg_parser.error(\"module %r has no attribute %r\" % (mod_str, func_str))\n\n # Compatibility logic\n if args.path is not None and not hasattr(socket, 'AF_UNIX'):\n arg_parser.error(\"file system paths not supported by your operating\"\n \" environment\")\n\n app = func(extra_argv)\n run_app(app, host=args.hostname, port=args.port, path=args.path)\n arg_parser.exit(message=\"Stopped\\n\")\n\n\nif __name__ == \"__main__\": # pragma: no branch\n main(sys.argv[1:]) # pragma: no cover\n",
"path": "aiohttp/web.py"
}
] | [
{
"content": "import asyncio\nimport os\nimport socket\nimport stat\nimport sys\nimport warnings\nfrom argparse import ArgumentParser\nfrom collections import Iterable, MutableMapping\nfrom importlib import import_module\n\nfrom yarl import URL\n\nfrom . import (hdrs, web_exceptions, web_fileresponse, web_middlewares,\n web_protocol, web_request, web_response, web_server,\n web_urldispatcher, web_ws)\nfrom .abc import AbstractMatchInfo, AbstractRouter\nfrom .helpers import FrozenList\nfrom .http import HttpVersion # noqa\nfrom .log import access_logger, web_logger\nfrom .signals import FuncSignal, PostSignal, PreSignal, Signal\nfrom .web_exceptions import * # noqa\nfrom .web_fileresponse import * # noqa\nfrom .web_middlewares import * # noqa\nfrom .web_protocol import * # noqa\nfrom .web_request import * # noqa\nfrom .web_response import * # noqa\nfrom .web_server import Server\nfrom .web_urldispatcher import * # noqa\nfrom .web_urldispatcher import PrefixedSubAppResource\nfrom .web_ws import * # noqa\n\n__all__ = (web_protocol.__all__ +\n web_fileresponse.__all__ +\n web_request.__all__ +\n web_response.__all__ +\n web_exceptions.__all__ +\n web_urldispatcher.__all__ +\n web_ws.__all__ +\n web_server.__all__ +\n web_middlewares.__all__ +\n ('Application', 'HttpVersion', 'MsgType'))\n\n\nclass Application(MutableMapping):\n def __init__(self, *,\n logger=web_logger,\n router=None,\n middlewares=(),\n handler_args=None,\n client_max_size=1024**2,\n secure_proxy_ssl_header=None,\n loop=None,\n debug=...):\n if router is None:\n router = web_urldispatcher.UrlDispatcher()\n assert isinstance(router, AbstractRouter), router\n\n if loop is not None:\n warnings.warn(\"loop argument is deprecated\", ResourceWarning)\n\n self._debug = debug\n self._router = router\n self._secure_proxy_ssl_header = secure_proxy_ssl_header\n self._loop = loop\n self._handler_args = handler_args\n self.logger = logger\n\n self._middlewares = FrozenList(middlewares)\n self._state = {}\n self._frozen = False\n self._subapps = []\n\n self._on_pre_signal = PreSignal()\n self._on_post_signal = PostSignal()\n self._on_loop_available = FuncSignal(self)\n self._on_response_prepare = Signal(self)\n self._on_startup = Signal(self)\n self._on_shutdown = Signal(self)\n self._on_cleanup = Signal(self)\n self._client_max_size = client_max_size\n\n # MutableMapping API\n\n def __getitem__(self, key):\n return self._state[key]\n\n def _check_frozen(self):\n if self._frozen:\n warnings.warn(\"Changing state of started or joined \"\n \"application is deprecated\",\n DeprecationWarning,\n stacklevel=3)\n\n def __setitem__(self, key, value):\n self._check_frozen()\n self._state[key] = value\n\n def __delitem__(self, key):\n self._check_frozen()\n del self._state[key]\n\n def __len__(self):\n return len(self._state)\n\n def __iter__(self):\n return iter(self._state)\n\n ########\n @property\n def loop(self):\n return self._loop\n\n def _set_loop(self, loop):\n if loop is None:\n loop = asyncio.get_event_loop()\n if self._loop is not None and self._loop is not loop:\n raise RuntimeError(\n \"web.Application instance initialized with different loop\")\n\n self._loop = loop\n self._on_loop_available.send(self)\n\n # set loop debug\n if self._debug is ...:\n self._debug = loop.get_debug()\n\n # set loop to sub applications\n for subapp in self._subapps:\n subapp._set_loop(loop)\n\n @property\n def frozen(self):\n return self._frozen\n\n def freeze(self):\n if self._frozen:\n return\n\n self._frozen = True\n self._middlewares = tuple(reversed(self._middlewares))\n self._router.freeze()\n self._on_loop_available.freeze()\n self._on_pre_signal.freeze()\n self._on_post_signal.freeze()\n self._on_response_prepare.freeze()\n self._on_startup.freeze()\n self._on_shutdown.freeze()\n self._on_cleanup.freeze()\n\n for subapp in self._subapps:\n subapp.freeze()\n\n @property\n def debug(self):\n return self._debug\n\n def _reg_subapp_signals(self, subapp):\n\n def reg_handler(signame):\n subsig = getattr(subapp, signame)\n\n @asyncio.coroutine\n def handler(app):\n yield from subsig.send(subapp)\n appsig = getattr(self, signame)\n appsig.append(handler)\n\n reg_handler('on_startup')\n reg_handler('on_shutdown')\n reg_handler('on_cleanup')\n\n def add_subapp(self, prefix, subapp):\n if self.frozen:\n raise RuntimeError(\n \"Cannot add sub application to frozen application\")\n if subapp.frozen:\n raise RuntimeError(\"Cannot add frozen application\")\n if prefix.endswith('/'):\n prefix = prefix[:-1]\n if prefix in ('', '/'):\n raise ValueError(\"Prefix cannot be empty\")\n\n resource = PrefixedSubAppResource(prefix, subapp)\n self.router.register_resource(resource)\n self._reg_subapp_signals(subapp)\n self._subapps.append(subapp)\n if self._loop is not None:\n subapp._set_loop(self._loop)\n return resource\n\n @property\n def on_loop_available(self):\n return self._on_loop_available\n\n @property\n def on_response_prepare(self):\n return self._on_response_prepare\n\n @property\n def on_pre_signal(self):\n return self._on_pre_signal\n\n @property\n def on_post_signal(self):\n return self._on_post_signal\n\n @property\n def on_startup(self):\n return self._on_startup\n\n @property\n def on_shutdown(self):\n return self._on_shutdown\n\n @property\n def on_cleanup(self):\n return self._on_cleanup\n\n @property\n def router(self):\n return self._router\n\n @property\n def middlewares(self):\n return self._middlewares\n\n def make_handler(self, *, loop=None,\n secure_proxy_ssl_header=None, **kwargs):\n self._set_loop(loop)\n self.freeze()\n\n kwargs['debug'] = self.debug\n if self._handler_args:\n for k, v in self._handler_args.items():\n kwargs[k] = v\n\n if secure_proxy_ssl_header:\n self._secure_proxy_ssl_header = secure_proxy_ssl_header\n return Server(self._handle, request_factory=self._make_request,\n loop=self.loop, **kwargs)\n\n @asyncio.coroutine\n def startup(self):\n \"\"\"Causes on_startup signal\n\n Should be called in the event loop along with the request handler.\n \"\"\"\n yield from self.on_startup.send(self)\n\n @asyncio.coroutine\n def shutdown(self):\n \"\"\"Causes on_shutdown signal\n\n Should be called before cleanup()\n \"\"\"\n yield from self.on_shutdown.send(self)\n\n @asyncio.coroutine\n def cleanup(self):\n \"\"\"Causes on_cleanup signal\n\n Should be called after shutdown()\n \"\"\"\n yield from self.on_cleanup.send(self)\n\n def _make_request(self, message, payload, protocol, writer, task,\n _cls=web_request.Request):\n return _cls(\n message, payload, protocol, writer, protocol._time_service, task,\n secure_proxy_ssl_header=self._secure_proxy_ssl_header,\n client_max_size=self._client_max_size)\n\n @asyncio.coroutine\n def _handle(self, request):\n match_info = yield from self._router.resolve(request)\n assert isinstance(match_info, AbstractMatchInfo), match_info\n match_info.add_app(self)\n\n if __debug__:\n match_info.freeze()\n\n resp = None\n request._match_info = match_info\n expect = request.headers.get(hdrs.EXPECT)\n if expect:\n resp = yield from match_info.expect_handler(request)\n yield from request.writer.drain()\n\n if resp is None:\n handler = match_info.handler\n for app in match_info.apps[::-1]:\n for factory in app._middlewares:\n handler = yield from factory(app, handler)\n\n resp = yield from handler(request)\n\n assert isinstance(resp, web_response.StreamResponse), \\\n (\"Handler {!r} should return response instance, \"\n \"got {!r} [middlewares {!r}]\").format(\n match_info.handler, type(resp),\n [middleware for middleware in app.middlewares\n for app in match_info.apps])\n return resp\n\n def __call__(self):\n \"\"\"gunicorn compatibility\"\"\"\n return self\n\n def __repr__(self):\n return \"<Application 0x{:x}>\".format(id(self))\n\n\ndef run_app(app, *, host=None, port=None, path=None, sock=None,\n shutdown_timeout=60.0, ssl_context=None,\n print=print, backlog=128, access_log_format=None,\n access_log=access_logger, loop=None):\n \"\"\"Run an app locally\"\"\"\n if loop is None:\n loop = asyncio.get_event_loop()\n\n make_handler_kwargs = dict()\n if access_log_format is not None:\n make_handler_kwargs['access_log_format'] = access_log_format\n handler = app.make_handler(loop=loop, access_log=access_log,\n **make_handler_kwargs)\n\n loop.run_until_complete(app.startup())\n\n scheme = 'https' if ssl_context else 'http'\n base_url = URL('{}://localhost'.format(scheme)).with_port(port)\n\n if path is None:\n paths = ()\n elif isinstance(path, (str, bytes, bytearray, memoryview))\\\n or not isinstance(path, Iterable):\n paths = (path,)\n else:\n paths = path\n\n if sock is None:\n socks = ()\n elif not isinstance(sock, Iterable):\n socks = (sock,)\n else:\n socks = sock\n\n if host is None:\n if (paths or socks) and not port:\n hosts = ()\n else:\n hosts = (\"0.0.0.0\",)\n elif isinstance(host, (str, bytes, bytearray, memoryview))\\\n or not isinstance(host, Iterable):\n hosts = (host,)\n else:\n hosts = host\n\n if hosts and port is None:\n port = 8443 if ssl_context else 8080\n\n server_creations = []\n uris = [str(base_url.with_host(host)) for host in hosts]\n if hosts:\n # Multiple hosts bound to same server is available in most loop\n # implementations, but only send multiple if we have multiple.\n host_binding = hosts[0] if len(hosts) == 1 else hosts\n server_creations.append(\n loop.create_server(\n handler, host_binding, port, ssl=ssl_context, backlog=backlog\n )\n )\n for path in paths:\n # Most loop implementations don't support multiple paths bound in same\n # server, so create a server for each.\n server_creations.append(\n loop.create_unix_server(\n handler, path, ssl=ssl_context, backlog=backlog\n )\n )\n uris.append('{}://unix:{}:'.format(scheme, path))\n\n # Clean up prior socket path if stale and not abstract.\n # CPython 3.5.3+'s event loop already does this. See\n # https://github.com/python/asyncio/issues/425\n if path[0] not in (0, '\\x00'): # pragma: no branch\n try:\n if stat.S_ISSOCK(os.stat(path).st_mode):\n os.remove(path)\n except FileNotFoundError:\n pass\n for sock in socks:\n server_creations.append(\n loop.create_server(\n handler, sock=sock, ssl=ssl_context, backlog=backlog\n )\n )\n\n if hasattr(socket, 'AF_UNIX') and sock.family == socket.AF_UNIX:\n uris.append('{}://unix:{}:'.format(scheme, sock.getsockname()))\n else:\n host, port = sock.getsockname()\n uris.append(str(base_url.with_host(host).with_port(port)))\n\n servers = loop.run_until_complete(\n asyncio.gather(*server_creations, loop=loop)\n )\n\n print(\"======== Running on {} ========\\n\"\n \"(Press CTRL+C to quit)\".format(', '.join(uris)))\n\n try:\n loop.run_forever()\n except KeyboardInterrupt: # pragma: no cover\n pass\n finally:\n server_closures = []\n for srv in servers:\n srv.close()\n server_closures.append(srv.wait_closed())\n loop.run_until_complete(asyncio.gather(*server_closures, loop=loop))\n loop.run_until_complete(app.shutdown())\n loop.run_until_complete(handler.shutdown(shutdown_timeout))\n loop.run_until_complete(app.cleanup())\n loop.close()\n\n\ndef main(argv):\n arg_parser = ArgumentParser(\n description=\"aiohttp.web Application server\",\n prog=\"aiohttp.web\"\n )\n arg_parser.add_argument(\n \"entry_func\",\n help=(\"Callable returning the `aiohttp.web.Application` instance to \"\n \"run. Should be specified in the 'module:function' syntax.\"),\n metavar=\"entry-func\"\n )\n arg_parser.add_argument(\n \"-H\", \"--hostname\",\n help=\"TCP/IP hostname to serve on (default: %(default)r)\",\n default=\"localhost\"\n )\n arg_parser.add_argument(\n \"-P\", \"--port\",\n help=\"TCP/IP port to serve on (default: %(default)r)\",\n type=int,\n default=\"8080\"\n )\n arg_parser.add_argument(\n \"-U\", \"--path\",\n help=\"Unix file system path to serve on. Specifying a path will cause \"\n \"hostname and port arguments to be ignored.\",\n )\n args, extra_argv = arg_parser.parse_known_args(argv)\n\n # Import logic\n mod_str, _, func_str = args.entry_func.partition(\":\")\n if not func_str or not mod_str:\n arg_parser.error(\n \"'entry-func' not in 'module:function' syntax\"\n )\n if mod_str.startswith(\".\"):\n arg_parser.error(\"relative module names not supported\")\n try:\n module = import_module(mod_str)\n except ImportError as ex:\n arg_parser.error(\"unable to import %s: %s\" % (mod_str, ex))\n try:\n func = getattr(module, func_str)\n except AttributeError:\n arg_parser.error(\"module %r has no attribute %r\" % (mod_str, func_str))\n\n # Compatibility logic\n if args.path is not None and not hasattr(socket, 'AF_UNIX'):\n arg_parser.error(\"file system paths not supported by your operating\"\n \" environment\")\n\n app = func(extra_argv)\n run_app(app, host=args.hostname, port=args.port, path=args.path)\n arg_parser.exit(message=\"Stopped\\n\")\n\n\nif __name__ == \"__main__\": # pragma: no branch\n main(sys.argv[1:]) # pragma: no cover\n",
"path": "aiohttp/web.py"
}
] | diff --git a/CHANGES.rst b/CHANGES.rst
index 7283d2b4cd8..432137c526c 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -31,6 +31,8 @@ Changes
- Do not unquote `+` in match_info values #1816
+- Fix sub-application middlewares resolution order #1853
+
2.0.7 (2017-04-12)
------------------
diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt
index c6bfe84e2d1..16a0f6ea48c 100644
--- a/CONTRIBUTORS.txt
+++ b/CONTRIBUTORS.txt
@@ -1,4 +1,4 @@
-
+Contributors
------------
A. Jesse Jiryu Davis
Adam Mills
@@ -42,7 +42,7 @@ Chris AtLee
Chris Laws
Chris Moore
Christopher Schmitt
-Contributors
+Damien Nadé
Daniel García
Daniel Nelson
Danny Song
@@ -174,4 +174,4 @@ Yuriy Shatrov
Yury Selivanov
Yusuke Tsutsumi
Марк Коренберг
-Семён Марьясин
\ No newline at end of file
+Семён Марьясин
diff --git a/aiohttp/web.py b/aiohttp/web.py
index 54b7e2fd8f0..1ce388b06b1 100644
--- a/aiohttp/web.py
+++ b/aiohttp/web.py
@@ -288,7 +288,7 @@ def _handle(self, request):
if resp is None:
handler = match_info.handler
- for app in match_info.apps:
+ for app in match_info.apps[::-1]:
for factory in app._middlewares:
handler = yield from factory(app, handler)
|
pymodbus-dev__pymodbus-1604 | pymodbus.server does not listen on modbus port
<!--
Before opening a new issue, make sure you do the following:
- Check that your issue isn't already filed: https://github.com/pymodbus-dev/pymodbus/issues
- Check the discussions forum https://github.com/pymodbus-dev/pymodbus/discussions
- Prepare a short, runnable example that reproduce the issue with the latest development version of Pymodbus
-->
### Versions
- Python: 3.11.3
- OS: Fedora 37
- Pymodbus: 3.3.1
### Pymodbus Specific
- Server: tcp
### Description
- start pymodbus server:
```
pymodbus.server --verbose run -u 1
__________ .______. _________
\______ \___.__. _____ ____ __| _/\_ |__ __ __ ______ / _____/ ______________ __ ___________
| ___< | |/ \ / _ \ / __ | | __ \| | \/ ___/ \_____ \_/ __ \_ __ \ \/ // __ \_ __ \\
| | \___ | Y Y ( <_> ) /_/ | | \_\ \ | /\___ \ / \ ___/| | \/\ /\ ___/| | \/
|____| / ____|__|_| /\____/\____ | |___ /____//____ > /_______ /\___ >__| \_/ \___ >__|
\/ \/ \/ \/ \/ \/ \/ \/
SERVER >
```
- try to connect to port 5020 or check which process is listening on port 5020
- current result
- server does not listen on port 5020
| [
{
"content": "\"\"\"Repl server main.\"\"\"\nimport asyncio\nimport json\nimport logging\nimport sys\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import List\n\nimport typer\n\nfrom pymodbus import pymodbus_apply_logging_config\nfrom pymodbus.framer.socket_framer import ModbusSocketFramer\nfrom pymodbus.logging import Log\nfrom pymodbus.repl.server.cli import run_repl\nfrom pymodbus.server.reactive.default_config import DEFAULT_CONFIG\nfrom pymodbus.server.reactive.main import (\n DEFAULT_FRAMER,\n DEFUALT_HANDLERS,\n ReactiveServer,\n)\n\n\nCANCELLED_ERROR = asyncio.exceptions.CancelledError\nCONTEXT_SETTING = {\"allow_extra_args\": True, \"ignore_unknown_options\": True}\n\n\n# TBD class ModbusServerConfig:\n\n\nclass ModbusServerTypes(str, Enum):\n \"\"\"Server types.\"\"\"\n\n # [\"tcp\", \"serial\", \"tls\", \"udp\"]\n tcp = \"tcp\" # pylint: disable=invalid-name\n serial = \"serial\" # pylint: disable=invalid-name\n tls = \"tls\" # pylint: disable=invalid-name\n udp = \"udp\" # pylint: disable=invalid-name\n\n\nclass ModbusFramerTypes(str, Enum):\n \"\"\"Framer types.\"\"\"\n\n # [\"socket\", \"rtu\", \"tls\", \"ascii\", \"binary\"]\n socket = \"socket\" # pylint: disable=invalid-name\n rtu = \"rtu\" # pylint: disable=invalid-name\n tls = \"tls\" # pylint: disable=invalid-name\n ascii = \"ascii\" # pylint: disable=invalid-name\n binary = \"binary\" # pylint: disable=invalid-name\n\n\ndef _completer(incomplete: str, valid_values: List[str]) -> List[str]:\n \"\"\"Complete value.\"\"\"\n completion = []\n for name in valid_values:\n if name.startswith(incomplete):\n completion.append(name)\n return completion\n\n\ndef framers(incomplete: str) -> List[str]:\n \"\"\"Return an autocompleted list of supported clouds.\"\"\"\n _framers = [\"socket\", \"rtu\", \"tls\", \"ascii\", \"binary\"]\n return _completer(incomplete, _framers)\n\n\ndef servers(incomplete: str) -> List[str]:\n \"\"\"Return an autocompleted list of supported clouds.\"\"\"\n _servers = [\"tcp\", \"serial\", \"tls\", \"udp\"]\n return _completer(incomplete, _servers)\n\n\ndef process_extra_args(extra_args: List[str], modbus_config: dict) -> dict:\n \"\"\"Process extra args passed to server.\"\"\"\n options_stripped = [x.strip().replace(\"--\", \"\") for x in extra_args[::2]]\n extra_args_dict = dict(list(zip(options_stripped, extra_args[1::2])))\n for option, value in extra_args_dict.items():\n if option in modbus_config:\n try:\n modbus_config[option] = type(modbus_config[option])(value)\n except ValueError as err:\n Log.error(\n \"Error parsing extra arg {} with value '{}'. {}\", option, value, err\n )\n sys.exit(1)\n return modbus_config\n\n\napp = typer.Typer(\n no_args_is_help=True,\n context_settings=CONTEXT_SETTING,\n help=\"Reactive Modbus server\",\n)\n\n\[email protected]()\ndef server(\n ctx: typer.Context,\n host: str = typer.Option(\"localhost\", \"--host\", help=\"Host address\"),\n web_port: int = typer.Option(8080, \"--web-port\", help=\"Web app port\"),\n broadcast_support: bool = typer.Option(\n False, \"-b\", help=\"Support broadcast messages\"\n ),\n repl: bool = typer.Option(True, help=\"Enable/Disable repl for server\"),\n verbose: bool = typer.Option(\n False, help=\"Run with debug logs enabled for pymodbus\"\n ),\n):\n \"\"\"Run server code.\"\"\"\n log_level = logging.DEBUG if verbose else logging.ERROR\n pymodbus_apply_logging_config(log_level)\n\n ctx.obj = {\n \"repl\": repl,\n \"host\": host,\n \"web_port\": web_port,\n \"broadcast\": broadcast_support,\n }\n\n\[email protected](\"run\", context_settings=CONTEXT_SETTING)\ndef run(\n ctx: typer.Context,\n modbus_server: str = typer.Option(\n ModbusServerTypes.tcp.value,\n \"--modbus-server\",\n \"-s\",\n case_sensitive=False,\n autocompletion=servers,\n help=\"Modbus Server\",\n ),\n modbus_framer: str = typer.Option(\n ModbusFramerTypes.socket.value,\n \"--framer\",\n \"-f\",\n case_sensitive=False,\n autocompletion=framers,\n help=\"Modbus framer to use\",\n ),\n modbus_port: int = typer.Option(5020, \"--modbus-port\", \"-p\", help=\"Modbus port\"),\n modbus_slave_id: List[int] = typer.Option(\n [1], \"--slave-id\", \"-u\", help=\"Supported Modbus slave id's\"\n ),\n modbus_config_path: Path = typer.Option(\n None, help=\"Path to additional modbus server config\"\n ),\n randomize: int = typer.Option(\n 0,\n \"--random\",\n \"-r\",\n help=\"Randomize every `r` reads. 0=never, 1=always,2=every-second-read\"\n \", and so on. Applicable IR and DI.\",\n ),\n change_rate: int = typer.Option(\n 0,\n \"--change-rate\",\n \"-c\",\n help=\"Rate in % registers to change. 0=none, 100=all, 12=12% of registers\"\n \", and so on. Applicable IR and DI.\",\n ),\n):\n \"\"\"Run Reactive Modbus server.\n\n Exposing REST endpoint for response manipulation.\n \"\"\"\n repl = ctx.obj.pop(\"repl\")\n # TBD extra_args = ctx.args\n web_app_config = ctx.obj\n loop = asyncio.get_event_loop()\n framer = DEFAULT_FRAMER.get(modbus_framer, ModbusSocketFramer)\n if modbus_config_path:\n with open(modbus_config_path, encoding=\"utf-8\") as my_file:\n modbus_config = json.load(my_file)\n else:\n modbus_config = DEFAULT_CONFIG\n\n extra_args = ctx.args\n data_block_settings = modbus_config.pop(\"data_block_settings\", {})\n modbus_config = modbus_config.get(modbus_server, {})\n modbus_config = process_extra_args(extra_args, modbus_config)\n if modbus_server != \"serial\":\n handler = modbus_config.pop(\"handler\", \"ModbusConnectedRequestHandler\")\n else:\n handler = modbus_config.pop(\"handler\", \"ModbusSingleRequestHandler\")\n handler = DEFUALT_HANDLERS.get(handler.strip())\n\n modbus_config[\"handler\"] = handler\n modbus_config[\"randomize\"] = randomize\n modbus_config[\"change_rate\"] = change_rate\n app = ReactiveServer.factory(\n modbus_server,\n framer,\n modbus_port=modbus_port,\n slave=modbus_slave_id,\n loop=loop,\n single=False,\n data_block_settings=data_block_settings,\n **web_app_config,\n **modbus_config,\n )\n if repl:\n loop.run_until_complete(run_repl(app))\n else:\n loop.run_until_complete(app.run_async(repl))\n loop.run_forever()\n\n\nif __name__ == \"__main__\":\n app()\n",
"path": "pymodbus/repl/server/main.py"
}
] | [
{
"content": "\"\"\"Repl server main.\"\"\"\nimport asyncio\nimport json\nimport logging\nimport sys\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import List\n\nimport typer\n\nfrom pymodbus import pymodbus_apply_logging_config\nfrom pymodbus.framer.socket_framer import ModbusSocketFramer\nfrom pymodbus.logging import Log\nfrom pymodbus.repl.server.cli import run_repl\nfrom pymodbus.server.reactive.default_config import DEFAULT_CONFIG\nfrom pymodbus.server.reactive.main import (\n DEFAULT_FRAMER,\n DEFUALT_HANDLERS,\n ReactiveServer,\n)\n\n\nCANCELLED_ERROR = asyncio.exceptions.CancelledError\nCONTEXT_SETTING = {\"allow_extra_args\": True, \"ignore_unknown_options\": True}\n\n\n# TBD class ModbusServerConfig:\n\n\nclass ModbusServerTypes(str, Enum):\n \"\"\"Server types.\"\"\"\n\n # [\"tcp\", \"serial\", \"tls\", \"udp\"]\n tcp = \"tcp\" # pylint: disable=invalid-name\n serial = \"serial\" # pylint: disable=invalid-name\n tls = \"tls\" # pylint: disable=invalid-name\n udp = \"udp\" # pylint: disable=invalid-name\n\n\nclass ModbusFramerTypes(str, Enum):\n \"\"\"Framer types.\"\"\"\n\n # [\"socket\", \"rtu\", \"tls\", \"ascii\", \"binary\"]\n socket = \"socket\" # pylint: disable=invalid-name\n rtu = \"rtu\" # pylint: disable=invalid-name\n tls = \"tls\" # pylint: disable=invalid-name\n ascii = \"ascii\" # pylint: disable=invalid-name\n binary = \"binary\" # pylint: disable=invalid-name\n\n\ndef _completer(incomplete: str, valid_values: List[str]) -> List[str]:\n \"\"\"Complete value.\"\"\"\n completion = []\n for name in valid_values:\n if name.startswith(incomplete):\n completion.append(name)\n return completion\n\n\ndef framers(incomplete: str) -> List[str]:\n \"\"\"Return an autocompleted list of supported clouds.\"\"\"\n _framers = [\"socket\", \"rtu\", \"tls\", \"ascii\", \"binary\"]\n return _completer(incomplete, _framers)\n\n\ndef servers(incomplete: str) -> List[str]:\n \"\"\"Return an autocompleted list of supported clouds.\"\"\"\n _servers = [\"tcp\", \"serial\", \"tls\", \"udp\"]\n return _completer(incomplete, _servers)\n\n\ndef process_extra_args(extra_args: List[str], modbus_config: dict) -> dict:\n \"\"\"Process extra args passed to server.\"\"\"\n options_stripped = [x.strip().replace(\"--\", \"\") for x in extra_args[::2]]\n extra_args_dict = dict(list(zip(options_stripped, extra_args[1::2])))\n for option, value in extra_args_dict.items():\n if option in modbus_config:\n try:\n modbus_config[option] = type(modbus_config[option])(value)\n except ValueError as err:\n Log.error(\n \"Error parsing extra arg {} with value '{}'. {}\", option, value, err\n )\n sys.exit(1)\n return modbus_config\n\n\napp = typer.Typer(\n no_args_is_help=True,\n context_settings=CONTEXT_SETTING,\n help=\"Reactive Modbus server\",\n)\n\n\[email protected]()\ndef server(\n ctx: typer.Context,\n host: str = typer.Option(\"localhost\", \"--host\", help=\"Host address\"),\n web_port: int = typer.Option(8080, \"--web-port\", help=\"Web app port\"),\n broadcast_support: bool = typer.Option(\n False, \"-b\", help=\"Support broadcast messages\"\n ),\n repl: bool = typer.Option(True, help=\"Enable/Disable repl for server\"),\n verbose: bool = typer.Option(\n False, help=\"Run with debug logs enabled for pymodbus\"\n ),\n):\n \"\"\"Run server code.\"\"\"\n log_level = logging.DEBUG if verbose else logging.ERROR\n pymodbus_apply_logging_config(log_level)\n\n ctx.obj = {\n \"repl\": repl,\n \"host\": host,\n \"web_port\": web_port,\n \"broadcast\": broadcast_support,\n }\n\n\[email protected](\"run\", context_settings=CONTEXT_SETTING)\ndef run(\n ctx: typer.Context,\n modbus_server: str = typer.Option(\n ModbusServerTypes.tcp.value,\n \"--modbus-server\",\n \"-s\",\n case_sensitive=False,\n autocompletion=servers,\n help=\"Modbus Server\",\n ),\n modbus_framer: str = typer.Option(\n ModbusFramerTypes.socket.value,\n \"--framer\",\n \"-f\",\n case_sensitive=False,\n autocompletion=framers,\n help=\"Modbus framer to use\",\n ),\n modbus_port: int = typer.Option(5020, \"--modbus-port\", \"-p\", help=\"Modbus port\"),\n modbus_slave_id: List[int] = typer.Option(\n [1], \"--slave-id\", \"-u\", help=\"Supported Modbus slave id's\"\n ),\n modbus_config_path: Path = typer.Option(\n None, help=\"Path to additional modbus server config\"\n ),\n randomize: int = typer.Option(\n 0,\n \"--random\",\n \"-r\",\n help=\"Randomize every `r` reads. 0=never, 1=always,2=every-second-read\"\n \", and so on. Applicable IR and DI.\",\n ),\n change_rate: int = typer.Option(\n 0,\n \"--change-rate\",\n \"-c\",\n help=\"Rate in % registers to change. 0=none, 100=all, 12=12% of registers\"\n \", and so on. Applicable IR and DI.\",\n ),\n):\n \"\"\"Run Reactive Modbus server.\n\n Exposing REST endpoint for response manipulation.\n \"\"\"\n repl = ctx.obj.pop(\"repl\")\n # TBD extra_args = ctx.args\n web_app_config = ctx.obj\n loop = asyncio.get_event_loop()\n framer = DEFAULT_FRAMER.get(modbus_framer, ModbusSocketFramer)\n if modbus_config_path:\n with open(modbus_config_path, encoding=\"utf-8\") as my_file:\n modbus_config = json.load(my_file)\n else:\n modbus_config = DEFAULT_CONFIG\n\n extra_args = ctx.args\n data_block_settings = modbus_config.pop(\"data_block_settings\", {})\n modbus_config = modbus_config.get(modbus_server, {})\n modbus_config = process_extra_args(extra_args, modbus_config)\n if modbus_server != \"serial\":\n handler = modbus_config.pop(\"handler\", \"ModbusConnectedRequestHandler\")\n else:\n handler = modbus_config.pop(\"handler\", \"ModbusSingleRequestHandler\")\n handler = DEFUALT_HANDLERS.get(handler.strip())\n\n modbus_config[\"handler\"] = handler\n modbus_config[\"randomize\"] = randomize\n modbus_config[\"change_rate\"] = change_rate\n app = ReactiveServer.factory(\n modbus_server,\n framer,\n modbus_port=modbus_port,\n slave=modbus_slave_id,\n loop=loop,\n single=False,\n data_block_settings=data_block_settings,\n **web_app_config,\n **modbus_config,\n )\n loop.run_until_complete(app.run_async(repl))\n if repl:\n loop.run_until_complete(run_repl(app))\n else:\n loop.run_forever()\n\n\nif __name__ == \"__main__\":\n app()\n",
"path": "pymodbus/repl/server/main.py"
}
] | diff --git a/pymodbus/repl/server/main.py b/pymodbus/repl/server/main.py
index 1c72e9b0a..bb01e64d7 100644
--- a/pymodbus/repl/server/main.py
+++ b/pymodbus/repl/server/main.py
@@ -198,10 +198,10 @@ def run(
**web_app_config,
**modbus_config,
)
+ loop.run_until_complete(app.run_async(repl))
if repl:
loop.run_until_complete(run_repl(app))
else:
- loop.run_until_complete(app.run_async(repl))
loop.run_forever()
|
quantumlib__Cirq-5072 | [cirqflow] `KeyValueExecutableSpec` should provide a `to_dict` method / override `__getitem__`
**Is your feature request related to a use case or problem? Please describe.**
`cg.KeyValueExecutableSpec` provides a nice `from_dict()` method to convert a dict into a `Tuple[Tuple[str, Any], ...]` which is hashable. This is useful when constructing the executable spec. However, using the executable spec during analysis of the results forces one to use the stored tuples, which is cumbersome.
**Describe the solution you'd like**
The class should provide a similar `to_dict` method which can convert the stored `key_value_pairs` to a dictionary and return -- which are much easier to work with. Though the method would be a simple `return dict(self.key_value_pairs)`, there might be some value in explicitly having it on the class. We can also consider providing a custom `__getitem__` method.
**What is the urgency from your perspective for this issue? Is it blocking important work?**
P1 - I need this no later than the next release (end of quarter)
cc @mpharrigan
| [
{
"content": "# Copyright 2021 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Data structures for programs executable on a quantum runtime.\"\"\"\n\nimport abc\nimport dataclasses\nfrom dataclasses import dataclass\nfrom typing import Union, Tuple, Optional, Sequence, cast, Dict, Any, List, Iterator\n\nimport cirq\nfrom cirq import _compat, study\n\n\nclass ExecutableSpec(metaclass=abc.ABCMeta):\n \"\"\"Specification metadata about an executable.\n\n Subclasses should add problem-specific fields.\n \"\"\"\n\n executable_family: str = NotImplemented\n \"\"\"A unique name to group executables.\"\"\"\n\n\n@dataclass(frozen=True)\nclass KeyValueExecutableSpec(ExecutableSpec):\n \"\"\"A generic executable spec whose metadata is a list of key-value pairs.\n\n The key-value pairs define an implicit data schema. Consider defining a problem-specific\n subclass of `ExecutableSpec` instead of using this class to realize the benefits of having\n an explicit schema.\n\n See Also:\n `KeyValueExecutableSpec.from_dict` will use a dictionary to populate `key_value_pairs`.\n\n Args:\n executable_family: A unique name to group executables.\n key_value_pairs: A tuple of key-value pairs. The keys should be strings but the values\n can be any immutable object.\n \"\"\"\n\n executable_family: str\n key_value_pairs: Tuple[Tuple[str, Any], ...] = ()\n\n @classmethod\n def _json_namespace_(cls) -> str:\n return 'cirq.google'\n\n def _json_dict_(self) -> Dict[str, Any]:\n return cirq.dataclass_json_dict(self)\n\n @classmethod\n def from_dict(cls, d: Dict[str, Any], *, executable_family: str) -> 'KeyValueExecutableSpec':\n return cls(\n executable_family=executable_family,\n key_value_pairs=tuple((k, v) for k, v in d.items()),\n )\n\n @classmethod\n def _from_json_dict_(\n cls, executable_family: str, key_value_pairs: List[List[Union[str, Any]]], **kwargs\n ) -> 'KeyValueExecutableSpec':\n return cls(\n executable_family=executable_family,\n key_value_pairs=tuple((k, v) for k, v in key_value_pairs),\n )\n\n def __repr__(self) -> str:\n return cirq._compat.dataclass_repr(self, namespace='cirq_google')\n\n\n@dataclass(frozen=True)\nclass BitstringsMeasurement:\n \"\"\"Use in-circuit MeasurementGate to collect many repetitions of strings of bits.\n\n This is the lowest-level measurement type allowed in `QuantumExecutable` and behaves\n identically to the `cirq.Sampler.run` function. The executable's circuit must contain\n explicit measurement gates.\n\n Args:\n n_repeitions: The number of repetitions to execute the circuit.\n \"\"\"\n\n n_repetitions: int\n\n @classmethod\n def _json_namespace_(cls) -> str:\n return 'cirq.google'\n\n def _json_dict_(self):\n return cirq.dataclass_json_dict(self)\n\n def __repr__(self):\n return cirq._compat.dataclass_repr(self, namespace='cirq_google')\n\n\nTParamPair = Tuple[cirq.TParamKey, cirq.TParamVal]\n\n\n@dataclass(frozen=True)\nclass QuantumExecutable:\n \"\"\"An executable quantum program.\n\n This serves a similar purpose to `cirq.Circuit` with some key differences. First, a quantum\n executable contains all the relevant context for execution including parameters as well as\n the desired number of repetitions. Second, this object is immutable. Finally, there are\n optional fields enabling a higher level of abstraction for certain aspects of the executable.\n\n Attributes:\n circuit: A `cirq.Circuit` describing the quantum operations to execute.\n measurement: A description of the measurement properties or process.\n params: An immutable `cirq.ParamResolver` (or similar type). It's representation is\n normalized to a tuple of key value pairs.\n spec: Optional `cg.ExecutableSpec` containing metadata about this executable that is not\n used by the quantum runtime, but will be forwarded to all downstream result objects.\n problem_topology: Optional `cirq.NamedTopology` instance specifying the topology of the\n circuit. This is useful when optimizing on-device layout. If none is provided we\n assume `circuit` already has a valid on-device layout.\n initial_state: A `cirq.ProductState` specifying the desired initial state before executing\n `circuit`. If not specified, default to the all-zeros state.\n \"\"\"\n\n circuit: cirq.FrozenCircuit\n measurement: BitstringsMeasurement\n params: Optional[Tuple[TParamPair, ...]] = None\n spec: Optional[ExecutableSpec] = None\n problem_topology: Optional[cirq.NamedTopology] = None\n initial_state: Optional[cirq.ProductState] = None\n\n # pylint: disable=missing-raises-doc\n def __init__(\n self,\n circuit: cirq.AbstractCircuit,\n measurement: BitstringsMeasurement,\n params: Union[Sequence[TParamPair], cirq.ParamResolverOrSimilarType] = None,\n spec: Optional[ExecutableSpec] = None,\n problem_topology: Optional[cirq.NamedTopology] = None,\n initial_state: Optional[cirq.ProductState] = None,\n ):\n \"\"\"Initialize the quantum executable.\n\n The actual fields in this class are immutable, but we allow more liberal input types\n which will be frozen in this __init__ method.\n\n Args:\n circuit: The circuit. This will be frozen before being set as an attribute.\n measurement: A description of the measurement properties or process.\n params: A cirq.ParamResolverOrSimilarType which will be frozen into a tuple of\n key value pairs.\n spec: Specification metadata about this executable that is not used by the quantum\n runtime, but is persisted in result objects to associate executables with results.\n problem_topology: Description of the multiqubit gate topology present in the circuit.\n If not specified, the circuit must be compatible with the device topology.\n initial_state: How to initialize the quantum system before running `circuit`. If not\n specified, the device will be initialized into the all-zeros state.\n \"\"\"\n\n # We care a lot about mutability in this class. No object is truly immutable in Python,\n # but we can get pretty close by following the example of dataclass(frozen=True), which\n # deletes this class's __setattr__ magic method. To set values ever, we use\n # object.__setattr__ in this __init__ function.\n #\n # We write our own __init__ function to be able to accept a wider range of input formats\n # that can be easily converted to our native, immutable format.\n object.__setattr__(self, 'circuit', circuit.freeze())\n object.__setattr__(self, 'measurement', measurement)\n\n if isinstance(params, tuple) and all(\n isinstance(param_kv, tuple) and len(param_kv) == 2 for param_kv in params\n ):\n frozen_params = params\n elif isinstance(params, Sequence) and all(\n isinstance(param_kv, Sequence) and len(param_kv) == 2 for param_kv in params\n ):\n frozen_params = tuple((k, v) for k, v in params)\n elif study.resolver._is_param_resolver_or_similar_type(params):\n param_resolver = cirq.ParamResolver(cast(cirq.ParamResolverOrSimilarType, params))\n frozen_params = tuple(param_resolver.param_dict.items())\n else:\n raise ValueError(f\"`params` should be a ParamResolverOrSimilarType, not {params}.\")\n object.__setattr__(self, 'params', frozen_params)\n\n object.__setattr__(self, 'spec', spec)\n object.__setattr__(self, 'problem_topology', problem_topology)\n object.__setattr__(self, 'initial_state', initial_state)\n\n # Hash may be expensive to compute, especially for large circuits.\n # This should be safe since this class should be immutable. This line will\n # also check for hashibility of members at construction time.\n object.__setattr__(self, '_hash', hash(dataclasses.astuple(self)))\n\n def __str__(self):\n return f'QuantumExecutable(spec={self.spec})'\n\n def __repr__(self):\n return _compat.dataclass_repr(self, namespace='cirq_google')\n\n @classmethod\n def _json_namespace_(cls) -> str:\n return 'cirq.google'\n\n def _json_dict_(self):\n return cirq.dataclass_json_dict(self)\n\n\n@dataclass(frozen=True)\nclass QuantumExecutableGroup:\n \"\"\"A collection of `QuantumExecutable`s.\n\n Attributes:\n executables: A tuple of `cg.QuantumExecutable`.\n \"\"\"\n\n executables: Tuple[QuantumExecutable, ...]\n\n def __init__(\n self,\n executables: Sequence[QuantumExecutable],\n ):\n \"\"\"Initialize and normalize the quantum executable group.\n\n Args:\n executables: A sequence of `cg.QuantumExecutable` which will be frozen into a\n tuple.\n \"\"\"\n\n if not isinstance(executables, tuple):\n executables = tuple(executables)\n object.__setattr__(self, 'executables', executables)\n\n object.__setattr__(self, '_hash', hash(dataclasses.astuple(self)))\n\n def __len__(self) -> int:\n return len(self.executables)\n\n def __iter__(self) -> Iterator[QuantumExecutable]:\n yield from self.executables\n\n def __str__(self) -> str:\n exe_str = ', '.join(str(exe) for exe in self.executables[:2])\n if len(self.executables) > 2:\n exe_str += ', ...'\n\n return f'QuantumExecutableGroup(executables=[{exe_str}])'\n\n def __repr__(self) -> str:\n return _compat.dataclass_repr(self, namespace='cirq_google')\n\n def __hash__(self) -> int:\n return self._hash # type: ignore\n\n @classmethod\n def _json_namespace_(cls) -> str:\n return 'cirq.google'\n\n def _json_dict_(self) -> Dict[str, Any]:\n return cirq.dataclass_json_dict(self)\n",
"path": "cirq-google/cirq_google/workflow/quantum_executable.py"
}
] | [
{
"content": "# Copyright 2021 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Data structures for programs executable on a quantum runtime.\"\"\"\n\nimport abc\nimport dataclasses\nfrom dataclasses import dataclass\nfrom typing import Union, Tuple, Optional, Sequence, cast, Dict, Any, List, Iterator\n\nimport cirq\nfrom cirq import _compat, study\n\n\nclass ExecutableSpec(metaclass=abc.ABCMeta):\n \"\"\"Specification metadata about an executable.\n\n Subclasses should add problem-specific fields.\n \"\"\"\n\n executable_family: str = NotImplemented\n \"\"\"A unique name to group executables.\"\"\"\n\n\n@dataclass(frozen=True)\nclass KeyValueExecutableSpec(ExecutableSpec):\n \"\"\"A generic executable spec whose metadata is a list of key-value pairs.\n\n The key-value pairs define an implicit data schema. Consider defining a problem-specific\n subclass of `ExecutableSpec` instead of using this class to realize the benefits of having\n an explicit schema.\n\n See Also:\n `KeyValueExecutableSpec.from_dict` will use a dictionary to populate `key_value_pairs`.\n\n Args:\n executable_family: A unique name to group executables.\n key_value_pairs: A tuple of key-value pairs. The keys should be strings but the values\n can be any immutable object.\n \"\"\"\n\n executable_family: str\n key_value_pairs: Tuple[Tuple[str, Any], ...] = ()\n\n def to_dict(self) -> Dict[str, Any]:\n return dict(self.key_value_pairs)\n\n @classmethod\n def _json_namespace_(cls) -> str:\n return 'cirq.google'\n\n def _json_dict_(self) -> Dict[str, Any]:\n return cirq.dataclass_json_dict(self)\n\n @classmethod\n def from_dict(cls, d: Dict[str, Any], *, executable_family: str) -> 'KeyValueExecutableSpec':\n return cls(\n executable_family=executable_family,\n key_value_pairs=tuple((k, v) for k, v in d.items()),\n )\n\n @classmethod\n def _from_json_dict_(\n cls, executable_family: str, key_value_pairs: List[List[Union[str, Any]]], **kwargs\n ) -> 'KeyValueExecutableSpec':\n return cls(\n executable_family=executable_family,\n key_value_pairs=tuple((k, v) for k, v in key_value_pairs),\n )\n\n def __repr__(self) -> str:\n return cirq._compat.dataclass_repr(self, namespace='cirq_google')\n\n\n@dataclass(frozen=True)\nclass BitstringsMeasurement:\n \"\"\"Use in-circuit MeasurementGate to collect many repetitions of strings of bits.\n\n This is the lowest-level measurement type allowed in `QuantumExecutable` and behaves\n identically to the `cirq.Sampler.run` function. The executable's circuit must contain\n explicit measurement gates.\n\n Args:\n n_repeitions: The number of repetitions to execute the circuit.\n \"\"\"\n\n n_repetitions: int\n\n @classmethod\n def _json_namespace_(cls) -> str:\n return 'cirq.google'\n\n def _json_dict_(self):\n return cirq.dataclass_json_dict(self)\n\n def __repr__(self):\n return cirq._compat.dataclass_repr(self, namespace='cirq_google')\n\n\nTParamPair = Tuple[cirq.TParamKey, cirq.TParamVal]\n\n\n@dataclass(frozen=True)\nclass QuantumExecutable:\n \"\"\"An executable quantum program.\n\n This serves a similar purpose to `cirq.Circuit` with some key differences. First, a quantum\n executable contains all the relevant context for execution including parameters as well as\n the desired number of repetitions. Second, this object is immutable. Finally, there are\n optional fields enabling a higher level of abstraction for certain aspects of the executable.\n\n Attributes:\n circuit: A `cirq.Circuit` describing the quantum operations to execute.\n measurement: A description of the measurement properties or process.\n params: An immutable `cirq.ParamResolver` (or similar type). It's representation is\n normalized to a tuple of key value pairs.\n spec: Optional `cg.ExecutableSpec` containing metadata about this executable that is not\n used by the quantum runtime, but will be forwarded to all downstream result objects.\n problem_topology: Optional `cirq.NamedTopology` instance specifying the topology of the\n circuit. This is useful when optimizing on-device layout. If none is provided we\n assume `circuit` already has a valid on-device layout.\n initial_state: A `cirq.ProductState` specifying the desired initial state before executing\n `circuit`. If not specified, default to the all-zeros state.\n \"\"\"\n\n circuit: cirq.FrozenCircuit\n measurement: BitstringsMeasurement\n params: Optional[Tuple[TParamPair, ...]] = None\n spec: Optional[ExecutableSpec] = None\n problem_topology: Optional[cirq.NamedTopology] = None\n initial_state: Optional[cirq.ProductState] = None\n\n # pylint: disable=missing-raises-doc\n def __init__(\n self,\n circuit: cirq.AbstractCircuit,\n measurement: BitstringsMeasurement,\n params: Union[Sequence[TParamPair], cirq.ParamResolverOrSimilarType] = None,\n spec: Optional[ExecutableSpec] = None,\n problem_topology: Optional[cirq.NamedTopology] = None,\n initial_state: Optional[cirq.ProductState] = None,\n ):\n \"\"\"Initialize the quantum executable.\n\n The actual fields in this class are immutable, but we allow more liberal input types\n which will be frozen in this __init__ method.\n\n Args:\n circuit: The circuit. This will be frozen before being set as an attribute.\n measurement: A description of the measurement properties or process.\n params: A cirq.ParamResolverOrSimilarType which will be frozen into a tuple of\n key value pairs.\n spec: Specification metadata about this executable that is not used by the quantum\n runtime, but is persisted in result objects to associate executables with results.\n problem_topology: Description of the multiqubit gate topology present in the circuit.\n If not specified, the circuit must be compatible with the device topology.\n initial_state: How to initialize the quantum system before running `circuit`. If not\n specified, the device will be initialized into the all-zeros state.\n \"\"\"\n\n # We care a lot about mutability in this class. No object is truly immutable in Python,\n # but we can get pretty close by following the example of dataclass(frozen=True), which\n # deletes this class's __setattr__ magic method. To set values ever, we use\n # object.__setattr__ in this __init__ function.\n #\n # We write our own __init__ function to be able to accept a wider range of input formats\n # that can be easily converted to our native, immutable format.\n object.__setattr__(self, 'circuit', circuit.freeze())\n object.__setattr__(self, 'measurement', measurement)\n\n if isinstance(params, tuple) and all(\n isinstance(param_kv, tuple) and len(param_kv) == 2 for param_kv in params\n ):\n frozen_params = params\n elif isinstance(params, Sequence) and all(\n isinstance(param_kv, Sequence) and len(param_kv) == 2 for param_kv in params\n ):\n frozen_params = tuple((k, v) for k, v in params)\n elif study.resolver._is_param_resolver_or_similar_type(params):\n param_resolver = cirq.ParamResolver(cast(cirq.ParamResolverOrSimilarType, params))\n frozen_params = tuple(param_resolver.param_dict.items())\n else:\n raise ValueError(f\"`params` should be a ParamResolverOrSimilarType, not {params}.\")\n object.__setattr__(self, 'params', frozen_params)\n\n object.__setattr__(self, 'spec', spec)\n object.__setattr__(self, 'problem_topology', problem_topology)\n object.__setattr__(self, 'initial_state', initial_state)\n\n # Hash may be expensive to compute, especially for large circuits.\n # This should be safe since this class should be immutable. This line will\n # also check for hashibility of members at construction time.\n object.__setattr__(self, '_hash', hash(dataclasses.astuple(self)))\n\n def __str__(self):\n return f'QuantumExecutable(spec={self.spec})'\n\n def __repr__(self):\n return _compat.dataclass_repr(self, namespace='cirq_google')\n\n @classmethod\n def _json_namespace_(cls) -> str:\n return 'cirq.google'\n\n def _json_dict_(self):\n return cirq.dataclass_json_dict(self)\n\n\n@dataclass(frozen=True)\nclass QuantumExecutableGroup:\n \"\"\"A collection of `QuantumExecutable`s.\n\n Attributes:\n executables: A tuple of `cg.QuantumExecutable`.\n \"\"\"\n\n executables: Tuple[QuantumExecutable, ...]\n\n def __init__(\n self,\n executables: Sequence[QuantumExecutable],\n ):\n \"\"\"Initialize and normalize the quantum executable group.\n\n Args:\n executables: A sequence of `cg.QuantumExecutable` which will be frozen into a\n tuple.\n \"\"\"\n\n if not isinstance(executables, tuple):\n executables = tuple(executables)\n object.__setattr__(self, 'executables', executables)\n\n object.__setattr__(self, '_hash', hash(dataclasses.astuple(self)))\n\n def __len__(self) -> int:\n return len(self.executables)\n\n def __iter__(self) -> Iterator[QuantumExecutable]:\n yield from self.executables\n\n def __str__(self) -> str:\n exe_str = ', '.join(str(exe) for exe in self.executables[:2])\n if len(self.executables) > 2:\n exe_str += ', ...'\n\n return f'QuantumExecutableGroup(executables=[{exe_str}])'\n\n def __repr__(self) -> str:\n return _compat.dataclass_repr(self, namespace='cirq_google')\n\n def __hash__(self) -> int:\n return self._hash # type: ignore\n\n @classmethod\n def _json_namespace_(cls) -> str:\n return 'cirq.google'\n\n def _json_dict_(self) -> Dict[str, Any]:\n return cirq.dataclass_json_dict(self)\n",
"path": "cirq-google/cirq_google/workflow/quantum_executable.py"
}
] | diff --git a/cirq-google/cirq_google/workflow/quantum_executable.py b/cirq-google/cirq_google/workflow/quantum_executable.py
index a84fbf13b47..6287f2ea291 100644
--- a/cirq-google/cirq_google/workflow/quantum_executable.py
+++ b/cirq-google/cirq_google/workflow/quantum_executable.py
@@ -53,6 +53,9 @@ class KeyValueExecutableSpec(ExecutableSpec):
executable_family: str
key_value_pairs: Tuple[Tuple[str, Any], ...] = ()
+ def to_dict(self) -> Dict[str, Any]:
+ return dict(self.key_value_pairs)
+
@classmethod
def _json_namespace_(cls) -> str:
return 'cirq.google'
diff --git a/cirq-google/cirq_google/workflow/quantum_executable_test.py b/cirq-google/cirq_google/workflow/quantum_executable_test.py
index 96f17d66f72..ecfcbff8c99 100644
--- a/cirq-google/cirq_google/workflow/quantum_executable_test.py
+++ b/cirq-google/cirq_google/workflow/quantum_executable_test.py
@@ -63,6 +63,18 @@ def test_kv_executable_spec():
hash(KeyValueExecutableSpec(executable_family='', key_value_pairs=[('name', 'test')]))
+def test_dict_round_trip():
+ input_dict = dict(name='test', idx=5)
+
+ kv = KeyValueExecutableSpec.from_dict(
+ input_dict, executable_family='cirq_google.algo_benchmarks.example'
+ )
+
+ actual_dict = kv.to_dict()
+
+ assert input_dict == actual_dict
+
+
def test_kv_repr():
kv = _get_example_spec()
cirq.testing.assert_equivalent_repr(kv, global_vals={'cirq_google': cirq_google})
|
secdev__scapy-924 | scapy import fail: NameError: name 'get_working_if' is not defined
Hello,
On an Ubuntu 16.04.3 LTS, when launching `scapy` CLI or importing `scapy.route` without any interface in use, I got the following error:
```Python
Traceback (most recent call last):
File "/usr/local/bin/scapy", line 25, in <module>
interact()
File "tools/scapy/scapy/main.py", line 421, in interact
init_session(session_name, mydict)
File "tools/scapy/scapy/main.py", line 293, in init_session
scapy_builtins = {k: v for k, v in six.iteritems(importlib.import_module(".all", "scapy").__dict__) if _validate_local(k)}
File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "tools/scapy/scapy/all.py", line 25, in <module>
from scapy.route import *
File "tools/scapy/scapy/route.py", line 195, in <module>
conf.iface = get_working_if()
NameError: name 'get_working_if' is not defined
```
A bisect leads to the recent commit fd50b349263256e0aaa69780937ae02d4f4ee46c, more likely to [this code](https://github.com/secdev/scapy/commit/fd50b349263256e0aaa69780937ae02d4f4ee46c#diff-3fc486d3e1085d11c80c20bab07375f2R194)
I'm not sure to correctly understand for this code snippet and how it works on the different OS. This is why I prefer opening an issue :smiley:
| [
{
"content": "## This file is part of Scapy\n## See http://www.secdev.org/projects/scapy for more informations\n## Copyright (C) Philippe Biondi <[email protected]>\n## This program is published under a GPLv2 license\n\n\"\"\"\nRouting and handling of network interfaces.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom scapy.utils import atol, ltoa, itom, pretty_routes\nfrom scapy.config import conf\nfrom scapy.error import Scapy_Exception, warning\nfrom scapy.arch import WINDOWS\nimport scapy.consts\nimport scapy.modules.six as six\n\n##############################\n## Routing/Interfaces stuff ##\n##############################\n\nclass Route:\n def __init__(self):\n self.resync()\n self.cache = {}\n\n def invalidate_cache(self):\n self.cache = {}\n\n def resync(self):\n from scapy.arch import read_routes\n self.invalidate_cache()\n self.routes = read_routes()\n\n def __repr__(self):\n rtlst = []\n for net, msk, gw, iface, addr, metric in self.routes:\n rtlst.append((ltoa(net),\n ltoa(msk),\n gw,\n (iface.name if not isinstance(iface, six.string_types) else iface),\n addr,\n str(metric)))\n\n return pretty_routes(rtlst,\n [(\"Network\", \"Netmask\", \"Gateway\", \"Iface\", \"Output IP\", \"Metric\")])\n\n def make_route(self, host=None, net=None, gw=None, dev=None, metric=1):\n from scapy.arch import get_if_addr\n if host is not None:\n thenet,msk = host,32\n elif net is not None:\n thenet,msk = net.split(\"/\")\n msk = int(msk)\n else:\n raise Scapy_Exception(\"make_route: Incorrect parameters. You should specify a host or a net\")\n if gw is None:\n gw=\"0.0.0.0\"\n if dev is None:\n if gw:\n nhop = gw\n else:\n nhop = thenet\n dev,ifaddr,x = self.route(nhop)\n else:\n ifaddr = get_if_addr(dev)\n return (atol(thenet), itom(msk), gw, dev, ifaddr, metric)\n\n def add(self, *args, **kargs):\n \"\"\"Ex:\n add(net=\"192.168.1.0/24\",gw=\"1.2.3.4\")\n \"\"\"\n self.invalidate_cache()\n self.routes.append(self.make_route(*args,**kargs))\n\n \n def delt(self, *args, **kargs):\n \"\"\"delt(host|net, gw|dev)\"\"\"\n self.invalidate_cache()\n route = self.make_route(*args,**kargs)\n try:\n i=self.routes.index(route)\n del(self.routes[i])\n except ValueError:\n warning(\"no matching route found\")\n \n def ifchange(self, iff, addr):\n self.invalidate_cache()\n the_addr,the_msk = (addr.split(\"/\")+[\"32\"])[:2]\n the_msk = itom(int(the_msk))\n the_rawaddr = atol(the_addr)\n the_net = the_rawaddr & the_msk\n \n \n for i, route in enumerate(self.routes):\n net, msk, gw, iface, addr, metric = route\n if WINDOWS:\n if iff.guid != iface.guid:\n continue\n elif iff != iface:\n continue\n if gw == '0.0.0.0':\n self.routes[i] = (the_net,the_msk,gw,iface,the_addr,metric)\n else:\n self.routes[i] = (net,msk,gw,iface,the_addr,metric)\n conf.netcache.flush()\n \n \n\n def ifdel(self, iff):\n self.invalidate_cache()\n new_routes=[]\n for rt in self.routes:\n if WINDOWS:\n if iff.guid == rt[3].guid:\n continue\n elif iff == rt[3]:\n continue\n new_routes.append(rt)\n self.routes=new_routes\n \n def ifadd(self, iff, addr):\n self.invalidate_cache()\n the_addr,the_msk = (addr.split(\"/\")+[\"32\"])[:2]\n the_msk = itom(int(the_msk))\n the_rawaddr = atol(the_addr)\n the_net = the_rawaddr & the_msk\n self.routes.append((the_net,the_msk,'0.0.0.0',iff,the_addr,1))\n\n\n def route(self,dest,verbose=None):\n if isinstance(dest, list) and dest:\n dest = dest[0]\n if dest in self.cache:\n return self.cache[dest]\n if verbose is None:\n verbose=conf.verb\n # Transform \"192.168.*.1-5\" to one IP of the set\n dst = dest.split(\"/\")[0]\n dst = dst.replace(\"*\",\"0\") \n while True:\n l = dst.find(\"-\")\n if l < 0:\n break\n m = (dst[l:]+\".\").find(\".\")\n dst = dst[:l]+dst[l+m:]\n\n \n dst = atol(dst)\n pathes=[]\n for d,m,gw,i,a,me in self.routes:\n if not a: # some interfaces may not currently be connected\n continue\n aa = atol(a)\n if aa == dst:\n pathes.append((0xffffffff, 1, (scapy.consts.LOOPBACK_INTERFACE,a,\"0.0.0.0\")))\n if (dst & m) == (d & m):\n pathes.append((m, me, (i,a,gw)))\n if not pathes:\n if verbose:\n warning(\"No route found (no default route?)\")\n return scapy.consts.LOOPBACK_INTERFACE,\"0.0.0.0\",\"0.0.0.0\"\n # Choose the more specific route\n # Sort by greatest netmask\n pathes.sort(key=lambda x: x[0], reverse=True)\n # Get all pathes having the (same) greatest mask\n pathes = [i for i in pathes if i[0] == pathes[0][0]]\n # Tie-breaker: Metrics\n pathes.sort(key=lambda x: x[1])\n # Return interface\n ret = pathes[0][2]\n self.cache[dest] = ret\n return ret\n \n def get_if_bcast(self, iff):\n for net, msk, gw, iface, addr, metric in self.routes:\n if net == 0:\n continue\n if WINDOWS:\n if iff.guid != iface.guid:\n continue\n elif iff != iface:\n continue\n bcast = atol(addr)|(~msk&0xffffffff); # FIXME: check error in atol()\n return ltoa(bcast)\n warning(\"No broadcast address found for iface %s\\n\", iff);\n\nconf.route=Route()\n\n#XXX use \"with\"\n_betteriface = conf.route.route(\"0.0.0.0\", verbose=0)[0]\nif ((_betteriface if (isinstance(_betteriface, six.string_types) or _betteriface is None) else _betteriface.name) != scapy.consts.LOOPBACK_NAME):\n conf.iface = _betteriface\nelse:\n conf.iface = get_working_if()\ndel(_betteriface)\n",
"path": "scapy/route.py"
}
] | [
{
"content": "## This file is part of Scapy\n## See http://www.secdev.org/projects/scapy for more informations\n## Copyright (C) Philippe Biondi <[email protected]>\n## This program is published under a GPLv2 license\n\n\"\"\"\nRouting and handling of network interfaces.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom scapy.utils import atol, ltoa, itom, pretty_routes\nfrom scapy.config import conf\nfrom scapy.error import Scapy_Exception, warning\nfrom scapy.arch import WINDOWS, get_working_if\nimport scapy.consts\nimport scapy.modules.six as six\n\n##############################\n## Routing/Interfaces stuff ##\n##############################\n\nclass Route:\n def __init__(self):\n self.resync()\n self.cache = {}\n\n def invalidate_cache(self):\n self.cache = {}\n\n def resync(self):\n from scapy.arch import read_routes\n self.invalidate_cache()\n self.routes = read_routes()\n\n def __repr__(self):\n rtlst = []\n for net, msk, gw, iface, addr, metric in self.routes:\n rtlst.append((ltoa(net),\n ltoa(msk),\n gw,\n (iface.name if not isinstance(iface, six.string_types) else iface),\n addr,\n str(metric)))\n\n return pretty_routes(rtlst,\n [(\"Network\", \"Netmask\", \"Gateway\", \"Iface\", \"Output IP\", \"Metric\")])\n\n def make_route(self, host=None, net=None, gw=None, dev=None, metric=1):\n from scapy.arch import get_if_addr\n if host is not None:\n thenet,msk = host,32\n elif net is not None:\n thenet,msk = net.split(\"/\")\n msk = int(msk)\n else:\n raise Scapy_Exception(\"make_route: Incorrect parameters. You should specify a host or a net\")\n if gw is None:\n gw=\"0.0.0.0\"\n if dev is None:\n if gw:\n nhop = gw\n else:\n nhop = thenet\n dev,ifaddr,x = self.route(nhop)\n else:\n ifaddr = get_if_addr(dev)\n return (atol(thenet), itom(msk), gw, dev, ifaddr, metric)\n\n def add(self, *args, **kargs):\n \"\"\"Ex:\n add(net=\"192.168.1.0/24\",gw=\"1.2.3.4\")\n \"\"\"\n self.invalidate_cache()\n self.routes.append(self.make_route(*args,**kargs))\n\n \n def delt(self, *args, **kargs):\n \"\"\"delt(host|net, gw|dev)\"\"\"\n self.invalidate_cache()\n route = self.make_route(*args,**kargs)\n try:\n i=self.routes.index(route)\n del(self.routes[i])\n except ValueError:\n warning(\"no matching route found\")\n \n def ifchange(self, iff, addr):\n self.invalidate_cache()\n the_addr,the_msk = (addr.split(\"/\")+[\"32\"])[:2]\n the_msk = itom(int(the_msk))\n the_rawaddr = atol(the_addr)\n the_net = the_rawaddr & the_msk\n \n \n for i, route in enumerate(self.routes):\n net, msk, gw, iface, addr, metric = route\n if WINDOWS:\n if iff.guid != iface.guid:\n continue\n elif iff != iface:\n continue\n if gw == '0.0.0.0':\n self.routes[i] = (the_net,the_msk,gw,iface,the_addr,metric)\n else:\n self.routes[i] = (net,msk,gw,iface,the_addr,metric)\n conf.netcache.flush()\n \n \n\n def ifdel(self, iff):\n self.invalidate_cache()\n new_routes=[]\n for rt in self.routes:\n if WINDOWS:\n if iff.guid == rt[3].guid:\n continue\n elif iff == rt[3]:\n continue\n new_routes.append(rt)\n self.routes=new_routes\n \n def ifadd(self, iff, addr):\n self.invalidate_cache()\n the_addr,the_msk = (addr.split(\"/\")+[\"32\"])[:2]\n the_msk = itom(int(the_msk))\n the_rawaddr = atol(the_addr)\n the_net = the_rawaddr & the_msk\n self.routes.append((the_net,the_msk,'0.0.0.0',iff,the_addr,1))\n\n\n def route(self,dest,verbose=None):\n if isinstance(dest, list) and dest:\n dest = dest[0]\n if dest in self.cache:\n return self.cache[dest]\n if verbose is None:\n verbose=conf.verb\n # Transform \"192.168.*.1-5\" to one IP of the set\n dst = dest.split(\"/\")[0]\n dst = dst.replace(\"*\",\"0\") \n while True:\n l = dst.find(\"-\")\n if l < 0:\n break\n m = (dst[l:]+\".\").find(\".\")\n dst = dst[:l]+dst[l+m:]\n\n \n dst = atol(dst)\n pathes=[]\n for d,m,gw,i,a,me in self.routes:\n if not a: # some interfaces may not currently be connected\n continue\n aa = atol(a)\n if aa == dst:\n pathes.append((0xffffffff, 1, (scapy.consts.LOOPBACK_INTERFACE,a,\"0.0.0.0\")))\n if (dst & m) == (d & m):\n pathes.append((m, me, (i,a,gw)))\n if not pathes:\n if verbose:\n warning(\"No route found (no default route?)\")\n return scapy.consts.LOOPBACK_INTERFACE,\"0.0.0.0\",\"0.0.0.0\"\n # Choose the more specific route\n # Sort by greatest netmask\n pathes.sort(key=lambda x: x[0], reverse=True)\n # Get all pathes having the (same) greatest mask\n pathes = [i for i in pathes if i[0] == pathes[0][0]]\n # Tie-breaker: Metrics\n pathes.sort(key=lambda x: x[1])\n # Return interface\n ret = pathes[0][2]\n self.cache[dest] = ret\n return ret\n \n def get_if_bcast(self, iff):\n for net, msk, gw, iface, addr, metric in self.routes:\n if net == 0:\n continue\n if WINDOWS:\n if iff.guid != iface.guid:\n continue\n elif iff != iface:\n continue\n bcast = atol(addr)|(~msk&0xffffffff); # FIXME: check error in atol()\n return ltoa(bcast)\n warning(\"No broadcast address found for iface %s\\n\", iff);\n\nconf.route=Route()\n\n#XXX use \"with\"\n_betteriface = conf.route.route(\"0.0.0.0\", verbose=0)[0]\nif ((_betteriface if (isinstance(_betteriface, six.string_types) or _betteriface is None) else _betteriface.name) != scapy.consts.LOOPBACK_NAME):\n conf.iface = _betteriface\nelse:\n conf.iface = get_working_if()\ndel(_betteriface)\n",
"path": "scapy/route.py"
}
] | diff --git a/scapy/route.py b/scapy/route.py
index 580fe01216a..986b00d4d18 100644
--- a/scapy/route.py
+++ b/scapy/route.py
@@ -11,7 +11,7 @@
from scapy.utils import atol, ltoa, itom, pretty_routes
from scapy.config import conf
from scapy.error import Scapy_Exception, warning
-from scapy.arch import WINDOWS
+from scapy.arch import WINDOWS, get_working_if
import scapy.consts
import scapy.modules.six as six
|
dbt-labs__dbt-core-8922 | [CT-3210] [Bug] Error using `dbt list --select` when there is a cross-project model that is `version=0` in the parent project
### Is this a new bug in dbt-core?
- [X] I believe this is a new bug in dbt-core
- [X] I have searched the existing issues, and I could not find an existing issue for this bug
### Current Behavior
When you attempt to reference a model version 0, you get a stack trace error.
### Expected Behavior
We should allow you to set model version to be 0.
### Steps To Reproduce
1. On parent/hub project, add a versioned model with `v: 0`
2. On the child/spoke project, attempt to reference that versioned model in a model:
`select * from {{ ref('example_hub', 'my_second_dbt_model', v=0) }}`
3. run `dbt list --select anything`
Outstanding question - is this only affecting cross-project refs? Or all refs to a model with `v: 0`?
### Relevant log output
_No response_
### Environment
```markdown
- OS:
- Python:
- dbt:
```
### Which database adapter are you using with dbt?
_No response_
### Additional Context
_No response_
| [
{
"content": "from dataclasses import dataclass, field\nfrom datetime import datetime\nfrom typing import Optional, List\n\nfrom dbt.contracts.graph.unparsed import NodeVersion\nfrom dbt.node_types import NodeType, AccessType\n\n\n@dataclass\nclass ModelNodeArgs:\n name: str\n package_name: str\n identifier: str\n schema: str\n database: Optional[str] = None\n relation_name: Optional[str] = None\n version: Optional[NodeVersion] = None\n latest_version: Optional[NodeVersion] = None\n deprecation_date: Optional[datetime] = None\n access: Optional[str] = AccessType.Protected.value\n generated_at: datetime = field(default_factory=datetime.utcnow)\n depends_on_nodes: List[str] = field(default_factory=list)\n enabled: bool = True\n\n @property\n def unique_id(self) -> str:\n unique_id = f\"{NodeType.Model}.{self.package_name}.{self.name}\"\n if self.version:\n unique_id = f\"{unique_id}.v{self.version}\"\n\n return unique_id\n\n @property\n def fqn(self) -> List[str]:\n fqn = [self.package_name, self.name]\n if self.version:\n fqn.append(f\"v{self.version}\")\n\n return fqn\n",
"path": "core/dbt/contracts/graph/node_args.py"
}
] | [
{
"content": "from dataclasses import dataclass, field\nfrom datetime import datetime\nfrom typing import Optional, List\n\nfrom dbt.contracts.graph.unparsed import NodeVersion\nfrom dbt.node_types import NodeType, AccessType\n\n\n@dataclass\nclass ModelNodeArgs:\n name: str\n package_name: str\n identifier: str\n schema: str\n database: Optional[str] = None\n relation_name: Optional[str] = None\n version: Optional[NodeVersion] = None\n latest_version: Optional[NodeVersion] = None\n deprecation_date: Optional[datetime] = None\n access: Optional[str] = AccessType.Protected.value\n generated_at: datetime = field(default_factory=datetime.utcnow)\n depends_on_nodes: List[str] = field(default_factory=list)\n enabled: bool = True\n\n @property\n def unique_id(self) -> str:\n unique_id = f\"{NodeType.Model}.{self.package_name}.{self.name}\"\n if self.version:\n unique_id = f\"{unique_id}.v{self.version}\"\n\n return unique_id\n\n @property\n def fqn(self) -> List[str]:\n fqn = [self.package_name, self.name]\n # Test for None explicitly because version can be 0\n if self.version is not None:\n fqn.append(f\"v{self.version}\")\n\n return fqn\n",
"path": "core/dbt/contracts/graph/node_args.py"
}
] | diff --git a/.changes/unreleased/Fixes-20231026-002536.yaml b/.changes/unreleased/Fixes-20231026-002536.yaml
new file mode 100644
index 00000000000..f14c9ec0e0b
--- /dev/null
+++ b/.changes/unreleased/Fixes-20231026-002536.yaml
@@ -0,0 +1,6 @@
+kind: Fixes
+body: Add version to fqn when version==0
+time: 2023-10-26T00:25:36.259356-05:00
+custom:
+ Author: aranke
+ Issue: "8836"
diff --git a/core/dbt/contracts/graph/node_args.py b/core/dbt/contracts/graph/node_args.py
index d1f4770b184..18b286b2ecc 100644
--- a/core/dbt/contracts/graph/node_args.py
+++ b/core/dbt/contracts/graph/node_args.py
@@ -33,7 +33,8 @@ def unique_id(self) -> str:
@property
def fqn(self) -> List[str]:
fqn = [self.package_name, self.name]
- if self.version:
+ # Test for None explicitly because version can be 0
+ if self.version is not None:
fqn.append(f"v{self.version}")
return fqn
diff --git a/tests/unit/test_contracts_graph_node_args.py b/tests/unit/test_contracts_graph_node_args.py
index f3f2d323d9a..958dfa11d72 100644
--- a/tests/unit/test_contracts_graph_node_args.py
+++ b/tests/unit/test_contracts_graph_node_args.py
@@ -14,7 +14,7 @@ def test_model_node_args_unique_id_with_version(self) -> None:
package_name="package",
identifier="identifier",
schema="schema",
- version="1",
+ version=1,
)
assert model_node_args.unique_id == "model.package.name.v1"
@@ -33,6 +33,16 @@ def test_model_node_args_fqn_with_version(self) -> None:
package_name="package",
identifier="identifier",
schema="schema",
- version="1",
+ version=1,
)
assert model_node_args.fqn == ["package", "name", "v1"]
+
+ def test_model_node_args_fqn_with_version_zero(self) -> None:
+ model_node_args = ModelNodeArgs(
+ name="name",
+ package_name="package",
+ identifier="identifier",
+ schema="schema",
+ version=0,
+ )
+ assert model_node_args.fqn == ["package", "name", "v0"]
|
holoviz__panel-1349 | GridBox ncols not honored when rendering from a callable
#297 ## ALL software version info
```
In [3]: param.__version__
Out[3]: '1.10.0a2'
In [4]: pn.__version__
Out[4]: '0.10.0a2'
```
#### Description of expected behavior and the observed behavior
When a `GridBox` is first rendered dynamically (from a callable) then it first renders with only one column. After updating then the `ncols` parameter is respected.
#### Complete, minimal, self-contained example code that reproduces the issue
```
import random
import param
import panel as pn
pn.extension()
class Test(param.Parameterized):
ncols = param.Integer(default=6)
@param.depends('ncols')
def grid(self):
rcolor = lambda: "#%06x" % random.randint(0, 0xFFFFFF)
return pn.GridBox(*[pn.pane.HTML(background=rcolor(), width=50, height=50) for i in range(24)], ncols=self.ncols)
def panel(self):
return pn.Column(
self.param.ncols,
self.grid,
)
t = Test()
t.panel()
```
#### Screenshots or screencasts of the bug in action

| [
{
"content": "\"\"\"\nDefines Layout classes which may be used to arrange panes and widgets\nin flexible ways to build complex dashboards.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nfrom collections import defaultdict, namedtuple\n\nimport param\n\nfrom bokeh.models import Column as BkColumn, Row as BkRow\n\nfrom ..io.model import hold\nfrom ..io.state import state\nfrom ..reactive import Reactive\nfrom ..util import param_name, param_reprs\n\n_row = namedtuple(\"row\", [\"children\"])\n_col = namedtuple(\"col\", [\"children\"])\n\n\nclass Panel(Reactive):\n \"\"\"\n Abstract baseclass for a layout of Viewables.\n \"\"\"\n\n _bokeh_model = None\n\n __abstract = True\n\n _rename = {'objects': 'children'}\n\n _linked_props = []\n\n def __repr__(self, depth=0, max_depth=10):\n if depth > max_depth:\n return '...'\n spacer = '\\n' + (' ' * (depth+1))\n cls = type(self).__name__\n params = param_reprs(self, ['objects'])\n objs = ['[%d] %s' % (i, obj.__repr__(depth+1)) for i, obj in enumerate(self)]\n if not params and not objs:\n return super(Panel, self).__repr__(depth+1)\n elif not params:\n template = '{cls}{spacer}{objs}'\n elif not objs:\n template = '{cls}({params})'\n else:\n template = '{cls}({params}){spacer}{objs}'\n return template.format(\n cls=cls, params=', '.join(params),\n objs=('%s' % spacer).join(objs), spacer=spacer)\n\n #----------------------------------------------------------------\n # Callback API\n #----------------------------------------------------------------\n\n def _update_model(self, events, msg, root, model, doc, comm=None):\n msg = dict(msg)\n if self._rename['objects'] in msg:\n old = events['objects'].old\n msg[self._rename['objects']] = self._get_objects(model, old, doc, root, comm)\n\n with hold(doc):\n super(Panel, self)._update_model(events, msg, root, model, doc, comm)\n from ..io import state\n ref = root.ref['id']\n if ref in state._views:\n state._views[ref][0]._preprocess(root)\n\n #----------------------------------------------------------------\n # Model API\n #----------------------------------------------------------------\n\n def _init_properties(self):\n properties = {k: v for k, v in self.param.get_param_values()\n if v is not None}\n del properties['objects']\n return self._process_param_change(properties)\n\n def _get_objects(self, model, old_objects, doc, root, comm=None):\n \"\"\"\n Returns new child models for the layout while reusing unchanged\n models and cleaning up any dropped objects.\n \"\"\"\n from ..pane.base import panel, RerenderError\n new_models = []\n for i, pane in enumerate(self.objects):\n pane = panel(pane)\n self.objects[i] = pane\n\n for obj in old_objects:\n if obj not in self.objects:\n obj._cleanup(root)\n\n current_objects = list(self.objects)\n for i, pane in enumerate(self.objects):\n if pane in old_objects:\n child, _ = pane._models[root.ref['id']]\n else:\n try:\n child = pane._get_model(doc, root, model, comm)\n except RerenderError:\n return self._get_objects(model, current_objects[:i], doc, root, comm)\n new_models.append(child)\n return new_models\n\n def _get_model(self, doc, root=None, parent=None, comm=None):\n model = self._bokeh_model()\n if root is None:\n root = model\n objects = self._get_objects(model, [], doc, root, comm)\n props = dict(self._init_properties(), objects=objects)\n model.update(**self._process_param_change(props))\n self._models[root.ref['id']] = (model, parent)\n self._link_props(model, self._linked_props, doc, root, comm)\n return model\n\n #----------------------------------------------------------------\n # Public API\n #----------------------------------------------------------------\n\n def select(self, selector=None):\n \"\"\"\n Iterates over the Viewable and any potential children in the\n applying the Selector.\n\n Arguments\n ---------\n selector: type or callable or None\n The selector allows selecting a subset of Viewables by\n declaring a type or callable function to filter by.\n\n Returns\n -------\n viewables: list(Viewable)\n \"\"\"\n objects = super(Panel, self).select(selector)\n for obj in self:\n objects += obj.select(selector)\n return objects\n\n\n\nclass ListLike(param.Parameterized):\n\n objects = param.List(default=[], doc=\"\"\"\n The list of child objects that make up the layout.\"\"\")\n \n def __getitem__(self, index):\n return self.objects[index]\n\n def __len__(self):\n return len(self.objects)\n\n def __iter__(self):\n for obj in self.objects:\n yield obj\n\n def __iadd__(self, other):\n self.extend(other)\n return self\n\n def __add__(self, other):\n if isinstance(other, ListLike):\n other = other.objects\n if not isinstance(other, list):\n stype = type(self).__name__\n otype = type(other).__name__\n raise ValueError(\"Cannot add items of type %s and %s, can only \"\n \"combine %s.objects with list or ListLike object.\"\n % (stype, otype, stype))\n return self.clone(*(self.objects+other))\n\n def __radd__(self, other):\n if isinstance(other, ListLike):\n other = other.objects\n if not isinstance(other, list):\n stype = type(self).__name__\n otype = type(other).__name__\n raise ValueError(\"Cannot add items of type %s and %s, can only \"\n \"combine %s.objects with list or ListLike object.\"\n % (otype, stype, stype))\n return self.clone(*(other+self.objects))\n\n def __contains__(self, obj):\n return obj in self.objects\n\n def __setitem__(self, index, panes):\n from ..pane import panel\n new_objects = list(self)\n if not isinstance(index, slice):\n start, end = index, index+1\n if start > len(self.objects):\n raise IndexError('Index %d out of bounds on %s '\n 'containing %d objects.' %\n (end, type(self).__name__, len(self.objects)))\n panes = [panes]\n else:\n start = index.start or 0\n end = len(self) if index.stop is None else index.stop\n if index.start is None and index.stop is None:\n if not isinstance(panes, list):\n raise IndexError('Expected a list of objects to '\n 'replace the objects in the %s, '\n 'got a %s type.' %\n (type(self).__name__, type(panes).__name__))\n expected = len(panes)\n new_objects = [None]*expected\n end = expected\n elif end > len(self.objects):\n raise IndexError('Index %d out of bounds on %s '\n 'containing %d objects.' %\n (end, type(self).__name__, len(self.objects)))\n else:\n expected = end-start\n if not isinstance(panes, list) or len(panes) != expected:\n raise IndexError('Expected a list of %d objects to set '\n 'on the %s to match the supplied slice.' %\n (expected, type(self).__name__))\n for i, pane in zip(range(start, end), panes):\n new_objects[i] = panel(pane)\n\n self.objects = new_objects\n\n def clone(self, *objects, **params):\n \"\"\"\n Makes a copy of the layout sharing the same parameters.\n\n Arguments\n ---------\n objects: Objects to add to the cloned layout.\n params: Keyword arguments override the parameters on the clone.\n\n Returns\n -------\n Cloned layout object\n \"\"\"\n if not objects:\n if 'objects' in params:\n objects = params.pop('objects')\n else:\n objects = self.objects\n elif 'objects' in params:\n raise ValueError(\"A %s's objects should be supplied either \"\n \"as arguments or as a keyword, not both.\"\n % type(self).__name__)\n p = dict(self.param.get_param_values(), **params)\n del p['objects']\n return type(self)(*objects, **params)\n\n def append(self, obj):\n \"\"\"\n Appends an object to the layout.\n\n Arguments\n ---------\n obj (object): Panel component to add to the layout.\n \"\"\"\n from ..pane import panel\n new_objects = list(self)\n new_objects.append(panel(obj))\n self.objects = new_objects\n\n def clear(self):\n \"\"\"\n Clears the objects on this layout.\n \"\"\"\n self.objects = []\n\n def extend(self, objects):\n \"\"\"\n Extends the objects on this layout with a list.\n\n Arguments\n ---------\n objects (list): List of panel components to add to the layout.\n \"\"\"\n from ..pane import panel\n new_objects = list(self)\n new_objects.extend(list(map(panel, objects)))\n self.objects = new_objects\n\n def insert(self, index, obj):\n \"\"\"\n Inserts an object in the layout at the specified index.\n\n Arguments\n ---------\n index (int): Index at which to insert the object.\n object (object): Panel components to insert in the layout.\n \"\"\"\n from ..pane import panel\n new_objects = list(self)\n new_objects.insert(index, panel(obj))\n self.objects = new_objects\n\n def pop(self, index):\n \"\"\"\n Pops an item from the layout by index.\n\n Arguments\n ---------\n index (int): The index of the item to pop from the layout.\n \"\"\"\n new_objects = list(self)\n if index in new_objects:\n index = new_objects.index(index)\n obj = new_objects.pop(index)\n self.objects = new_objects\n return obj\n\n def remove(self, obj):\n \"\"\"\n Removes an object from the layout.\n\n Arguments\n ---------\n obj (object): The object to remove from the layout.\n \"\"\"\n new_objects = list(self)\n new_objects.remove(obj)\n self.objects = new_objects\n\n def reverse(self):\n \"\"\"\n Reverses the objects in the layout.\n \"\"\"\n new_objects = list(self)\n new_objects.reverse()\n self.objects = new_objects\n\n \n\nclass ListPanel(ListLike, Panel):\n \"\"\"\n An abstract baseclass for Panel objects with list-like children.\n \"\"\"\n\n margin = param.Parameter(default=0, doc=\"\"\"\n Allows to create additional space around the component. May\n be specified as a two-tuple of the form (vertical, horizontal)\n or a four-tuple (top, right, bottom, left).\"\"\")\n\n scroll = param.Boolean(default=False, doc=\"\"\"\n Whether to add scrollbars if the content overflows the size\n of the container.\"\"\")\n\n _source_transforms = {'scroll': None}\n\n __abstract = True\n\n def __init__(self, *objects, **params):\n from ..pane import panel\n if objects:\n if 'objects' in params:\n raise ValueError(\"A %s's objects should be supplied either \"\n \"as positional arguments or as a keyword, \"\n \"not both.\" % type(self).__name__)\n params['objects'] = [panel(pane) for pane in objects]\n super(Panel, self).__init__(**params)\n\n def _process_param_change(self, params):\n scroll = params.pop('scroll', None)\n css_classes = self.css_classes or []\n if scroll:\n params['css_classes'] = css_classes + ['scrollable']\n elif scroll == False:\n params['css_classes'] = css_classes\n return super(ListPanel, self)._process_param_change(params)\n\n def _cleanup(self, root):\n if root.ref['id'] in state._fake_roots:\n state._fake_roots.remove(root.ref['id'])\n super(ListPanel, self)._cleanup(root)\n for p in self.objects:\n p._cleanup(root)\n\n\nclass NamedListPanel(ListPanel):\n\n active = param.Integer(default=0, bounds=(0, None), doc=\"\"\"\n Index of the currently displayed objects.\"\"\")\n\n objects = param.List(default=[], doc=\"\"\"\n The list of child objects that make up the tabs.\"\"\")\n\n def __init__(self, *items, **params):\n if 'objects' in params:\n if items:\n raise ValueError('%s objects should be supplied either '\n 'as positional arguments or as a keyword, '\n 'not both.' % type(self).__name__)\n items = params['objects']\n objects, self._names = self._to_objects_and_names(items)\n super(NamedListPanel, self).__init__(*objects, **params)\n self._panels = defaultdict(dict)\n self.param.watch(self._update_names, 'objects')\n # ALERT: Ensure that name update happens first, should be\n # replaced by watch precedence support in param\n self._param_watchers['objects']['value'].reverse()\n\n def _to_object_and_name(self, item):\n from ..pane import panel\n if isinstance(item, tuple):\n name, item = item\n else:\n name = getattr(item, 'name', None)\n pane = panel(item, name=name)\n name = param_name(pane.name) if name is None else name\n return pane, name\n\n def _to_objects_and_names(self, items):\n objects, names = [], []\n for item in items:\n pane, name = self._to_object_and_name(item)\n objects.append(pane)\n names.append(name)\n return objects, names\n\n def _update_names(self, event):\n if len(event.new) == len(self._names):\n return\n names = []\n for obj in event.new:\n if obj in event.old:\n index = event.old.index(obj)\n name = self._names[index]\n else:\n name = obj.name\n names.append(name)\n self._names = names\n\n def _update_active(self, *events):\n pass\n\n #----------------------------------------------------------------\n # Public API\n #----------------------------------------------------------------\n\n def __add__(self, other):\n if isinstance(other, NamedListPanel):\n other = list(zip(other._names, other.objects))\n elif isinstance(other, ListLike):\n other = other.objects\n if not isinstance(other, list):\n stype = type(self).__name__\n otype = type(other).__name__\n raise ValueError(\"Cannot add items of type %s and %s, can only \"\n \"combine %s.objects with list or ListLike object.\"\n % (stype, otype, stype))\n objects = list(zip(self._names, self.objects))\n return self.clone(*(objects+other))\n\n def __radd__(self, other):\n if isinstance(other, NamedListPanel):\n other = list(zip(other._names, other.objects))\n elif isinstance(other, ListLike):\n other = other.objects\n if not isinstance(other, list):\n stype = type(self).__name__\n otype = type(other).__name__\n raise ValueError(\"Cannot add items of type %s and %s, can only \"\n \"combine %s.objects with list or ListLike object.\"\n % (otype, stype, stype))\n objects = list(zip(self._names, self.objects))\n return self.clone(*(other+objects))\n\n def __setitem__(self, index, panes):\n new_objects = list(self)\n if not isinstance(index, slice):\n if index > len(self.objects):\n raise IndexError('Index %d out of bounds on %s '\n 'containing %d objects.' %\n (index, type(self).__name__, len(self.objects)))\n start, end = index, index+1\n panes = [panes]\n else:\n start = index.start or 0\n end = len(self.objects) if index.stop is None else index.stop\n if index.start is None and index.stop is None:\n if not isinstance(panes, list):\n raise IndexError('Expected a list of objects to '\n 'replace the objects in the %s, '\n 'got a %s type.' %\n (type(self).__name__, type(panes).__name__))\n expected = len(panes)\n new_objects = [None]*expected\n self._names = [None]*len(panes)\n end = expected\n else:\n expected = end-start\n if end > len(self.objects):\n raise IndexError('Index %d out of bounds on %s '\n 'containing %d objects.' %\n (end, type(self).__name__, len(self.objects)))\n if not isinstance(panes, list) or len(panes) != expected:\n raise IndexError('Expected a list of %d objects to set '\n 'on the %s to match the supplied slice.' %\n (expected, type(self).__name__))\n for i, pane in zip(range(start, end), panes):\n new_objects[i], self._names[i] = self._to_object_and_name(pane)\n self.objects = new_objects\n\n def clone(self, *objects, **params):\n \"\"\"\n Makes a copy of the Tabs sharing the same parameters.\n\n Arguments\n ---------\n objects: Objects to add to the cloned Tabs object.\n params: Keyword arguments override the parameters on the clone.\n\n Returns\n -------\n Cloned Tabs object\n \"\"\"\n if not objects:\n if 'objects' in params:\n objects = params.pop('objects')\n else:\n objects = zip(self._names, self.objects)\n elif 'objects' in params:\n raise ValueError('Tabs objects should be supplied either '\n 'as positional arguments or as a keyword, '\n 'not both.')\n p = dict(self.param.get_param_values(), **params)\n del p['objects']\n return type(self)(*objects, **params)\n\n def append(self, pane):\n \"\"\"\n Appends an object to the tabs.\n\n Arguments\n ---------\n obj (object): Panel component to add as a tab.\n \"\"\"\n new_object, new_name = self._to_object_and_name(pane)\n new_objects = list(self)\n new_objects.append(new_object)\n self._names.append(new_name)\n self.objects = new_objects\n\n def clear(self):\n \"\"\"\n Clears the tabs.\n \"\"\"\n self._names = []\n self.objects = []\n\n def extend(self, panes):\n \"\"\"\n Extends the the tabs with a list.\n\n Arguments\n ---------\n objects (list): List of panel components to add as tabs.\n \"\"\"\n new_objects, new_names = self._to_objects_and_names(panes)\n objects = list(self)\n objects.extend(new_objects)\n self._names.extend(new_names)\n self.objects = objects\n\n def insert(self, index, pane):\n \"\"\"\n Inserts an object in the tabs at the specified index.\n\n Arguments\n ---------\n index (int): Index at which to insert the object.\n object (object): Panel components to insert as tabs.\n \"\"\"\n new_object, new_name = self._to_object_and_name(pane)\n new_objects = list(self.objects)\n new_objects.insert(index, new_object)\n self._names.insert(index, new_name)\n self.objects = new_objects\n\n def pop(self, index):\n \"\"\"\n Pops an item from the tabs by index.\n\n Arguments\n ---------\n index (int): The index of the item to pop from the tabs.\n \"\"\"\n new_objects = list(self)\n if index in new_objects:\n index = new_objects.index(index)\n new_objects.pop(index)\n self._names.pop(index)\n self.objects = new_objects\n\n def remove(self, pane):\n \"\"\"\n Removes an object from the tabs.\n\n Arguments\n ---------\n obj (object): The object to remove from the tabs.\n \"\"\"\n new_objects = list(self)\n if pane in new_objects:\n index = new_objects.index(pane)\n new_objects.remove(pane)\n self._names.pop(index)\n self.objects = new_objects\n\n def reverse(self):\n \"\"\"\n Reverses the tabs.\n \"\"\"\n new_objects = list(self)\n new_objects.reverse()\n self._names.reverse()\n self.objects = new_objects\n\n\n\nclass Row(ListPanel):\n \"\"\"\n Horizontal layout of Viewables.\n \"\"\"\n\n col_sizing = param.Parameter()\n\n _bokeh_model = BkRow\n\n _rename = dict(ListPanel._rename, col_sizing='cols')\n\n\nclass Column(ListPanel):\n \"\"\"\n Vertical layout of Viewables.\n \"\"\"\n\n row_sizing = param.Parameter()\n\n _bokeh_model = BkColumn\n\n _rename = dict(ListPanel._rename, row_sizing='rows')\n\n\nclass WidgetBox(ListPanel):\n \"\"\"\n Vertical layout of widgets.\n \"\"\"\n\n css_classes = param.List(default=['widget-box'], doc=\"\"\"\n CSS classes to apply to the layout.\"\"\")\n\n disabled = param.Boolean(default=False, doc=\"\"\"\n Whether the widget is disabled.\"\"\")\n\n horizontal = param.Boolean(default=False, doc=\"\"\"\n Whether to lay out the widgets in a Row layout as opposed \n to a Column layout.\"\"\")\n\n margin = param.Parameter(default=5, doc=\"\"\"\n Allows to create additional space around the component. May\n be specified as a two-tuple of the form (vertical, horizontal)\n or a four-tuple (top, right, bottom, left).\"\"\")\n\n _source_transforms = {'disabled': None, 'horizontal': None}\n\n _rename = {'objects': 'children', 'horizontal': None}\n\n @property\n def _bokeh_model(self):\n return BkRow if self.horizontal else BkColumn\n\n @param.depends('disabled', 'objects', watch=True)\n def _disable_widgets(self):\n for obj in self:\n if hasattr(obj, 'disabled'):\n obj.disabled = self.disabled\n\n def __init__(self, *objects, **params):\n super(WidgetBox, self).__init__(*objects, **params)\n if self.disabled:\n self._disable_widgets()\n",
"path": "panel/layout/base.py"
}
] | [
{
"content": "\"\"\"\nDefines Layout classes which may be used to arrange panes and widgets\nin flexible ways to build complex dashboards.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nfrom collections import defaultdict, namedtuple\n\nimport param\n\nfrom bokeh.models import Column as BkColumn, Row as BkRow\n\nfrom ..io.model import hold\nfrom ..io.state import state\nfrom ..reactive import Reactive\nfrom ..util import param_name, param_reprs\n\n_row = namedtuple(\"row\", [\"children\"])\n_col = namedtuple(\"col\", [\"children\"])\n\n\nclass Panel(Reactive):\n \"\"\"\n Abstract baseclass for a layout of Viewables.\n \"\"\"\n\n _bokeh_model = None\n\n __abstract = True\n\n _rename = {'objects': 'children'}\n\n _linked_props = []\n\n def __repr__(self, depth=0, max_depth=10):\n if depth > max_depth:\n return '...'\n spacer = '\\n' + (' ' * (depth+1))\n cls = type(self).__name__\n params = param_reprs(self, ['objects'])\n objs = ['[%d] %s' % (i, obj.__repr__(depth+1)) for i, obj in enumerate(self)]\n if not params and not objs:\n return super(Panel, self).__repr__(depth+1)\n elif not params:\n template = '{cls}{spacer}{objs}'\n elif not objs:\n template = '{cls}({params})'\n else:\n template = '{cls}({params}){spacer}{objs}'\n return template.format(\n cls=cls, params=', '.join(params),\n objs=('%s' % spacer).join(objs), spacer=spacer)\n\n #----------------------------------------------------------------\n # Callback API\n #----------------------------------------------------------------\n\n def _update_model(self, events, msg, root, model, doc, comm=None):\n msg = dict(msg)\n if self._rename['objects'] in msg:\n old = events['objects'].old\n msg[self._rename['objects']] = self._get_objects(model, old, doc, root, comm)\n\n with hold(doc):\n super(Panel, self)._update_model(events, msg, root, model, doc, comm)\n from ..io import state\n ref = root.ref['id']\n if ref in state._views:\n state._views[ref][0]._preprocess(root)\n\n #----------------------------------------------------------------\n # Model API\n #----------------------------------------------------------------\n\n def _init_properties(self):\n properties = {k: v for k, v in self.param.get_param_values()\n if v is not None}\n del properties['objects']\n return self._process_param_change(properties)\n\n def _get_objects(self, model, old_objects, doc, root, comm=None):\n \"\"\"\n Returns new child models for the layout while reusing unchanged\n models and cleaning up any dropped objects.\n \"\"\"\n from ..pane.base import panel, RerenderError\n new_models = []\n for i, pane in enumerate(self.objects):\n pane = panel(pane)\n self.objects[i] = pane\n\n for obj in old_objects:\n if obj not in self.objects:\n obj._cleanup(root)\n\n current_objects = list(self.objects)\n for i, pane in enumerate(self.objects):\n if pane in old_objects:\n child, _ = pane._models[root.ref['id']]\n else:\n try:\n child = pane._get_model(doc, root, model, comm)\n except RerenderError:\n return self._get_objects(model, current_objects[:i], doc, root, comm)\n new_models.append(child)\n return new_models\n\n def _get_model(self, doc, root=None, parent=None, comm=None):\n model = self._bokeh_model()\n if root is None:\n root = model\n objects = self._get_objects(model, [], doc, root, comm)\n props = dict(self._init_properties(), objects=objects)\n model.update(**self._process_param_change(props))\n self._models[root.ref['id']] = (model, parent)\n self._link_props(model, self._linked_props, doc, root, comm)\n return model\n\n #----------------------------------------------------------------\n # Public API\n #----------------------------------------------------------------\n\n def select(self, selector=None):\n \"\"\"\n Iterates over the Viewable and any potential children in the\n applying the Selector.\n\n Arguments\n ---------\n selector: type or callable or None\n The selector allows selecting a subset of Viewables by\n declaring a type or callable function to filter by.\n\n Returns\n -------\n viewables: list(Viewable)\n \"\"\"\n objects = super(Panel, self).select(selector)\n for obj in self:\n objects += obj.select(selector)\n return objects\n\n\n\nclass ListLike(param.Parameterized):\n\n objects = param.List(default=[], doc=\"\"\"\n The list of child objects that make up the layout.\"\"\")\n \n def __getitem__(self, index):\n return self.objects[index]\n\n def __len__(self):\n return len(self.objects)\n\n def __iter__(self):\n for obj in self.objects:\n yield obj\n\n def __iadd__(self, other):\n self.extend(other)\n return self\n\n def __add__(self, other):\n if isinstance(other, ListLike):\n other = other.objects\n if not isinstance(other, list):\n stype = type(self).__name__\n otype = type(other).__name__\n raise ValueError(\"Cannot add items of type %s and %s, can only \"\n \"combine %s.objects with list or ListLike object.\"\n % (stype, otype, stype))\n return self.clone(*(self.objects+other))\n\n def __radd__(self, other):\n if isinstance(other, ListLike):\n other = other.objects\n if not isinstance(other, list):\n stype = type(self).__name__\n otype = type(other).__name__\n raise ValueError(\"Cannot add items of type %s and %s, can only \"\n \"combine %s.objects with list or ListLike object.\"\n % (otype, stype, stype))\n return self.clone(*(other+self.objects))\n\n def __contains__(self, obj):\n return obj in self.objects\n\n def __setitem__(self, index, panes):\n from ..pane import panel\n new_objects = list(self)\n if not isinstance(index, slice):\n start, end = index, index+1\n if start > len(self.objects):\n raise IndexError('Index %d out of bounds on %s '\n 'containing %d objects.' %\n (end, type(self).__name__, len(self.objects)))\n panes = [panes]\n else:\n start = index.start or 0\n end = len(self) if index.stop is None else index.stop\n if index.start is None and index.stop is None:\n if not isinstance(panes, list):\n raise IndexError('Expected a list of objects to '\n 'replace the objects in the %s, '\n 'got a %s type.' %\n (type(self).__name__, type(panes).__name__))\n expected = len(panes)\n new_objects = [None]*expected\n end = expected\n elif end > len(self.objects):\n raise IndexError('Index %d out of bounds on %s '\n 'containing %d objects.' %\n (end, type(self).__name__, len(self.objects)))\n else:\n expected = end-start\n if not isinstance(panes, list) or len(panes) != expected:\n raise IndexError('Expected a list of %d objects to set '\n 'on the %s to match the supplied slice.' %\n (expected, type(self).__name__))\n for i, pane in zip(range(start, end), panes):\n new_objects[i] = panel(pane)\n\n self.objects = new_objects\n\n def clone(self, *objects, **params):\n \"\"\"\n Makes a copy of the layout sharing the same parameters.\n\n Arguments\n ---------\n objects: Objects to add to the cloned layout.\n params: Keyword arguments override the parameters on the clone.\n\n Returns\n -------\n Cloned layout object\n \"\"\"\n if not objects:\n if 'objects' in params:\n objects = params.pop('objects')\n else:\n objects = self.objects\n elif 'objects' in params:\n raise ValueError(\"A %s's objects should be supplied either \"\n \"as arguments or as a keyword, not both.\"\n % type(self).__name__)\n p = dict(self.param.get_param_values(), **params)\n del p['objects']\n return type(self)(*objects, **p)\n\n def append(self, obj):\n \"\"\"\n Appends an object to the layout.\n\n Arguments\n ---------\n obj (object): Panel component to add to the layout.\n \"\"\"\n from ..pane import panel\n new_objects = list(self)\n new_objects.append(panel(obj))\n self.objects = new_objects\n\n def clear(self):\n \"\"\"\n Clears the objects on this layout.\n \"\"\"\n self.objects = []\n\n def extend(self, objects):\n \"\"\"\n Extends the objects on this layout with a list.\n\n Arguments\n ---------\n objects (list): List of panel components to add to the layout.\n \"\"\"\n from ..pane import panel\n new_objects = list(self)\n new_objects.extend(list(map(panel, objects)))\n self.objects = new_objects\n\n def insert(self, index, obj):\n \"\"\"\n Inserts an object in the layout at the specified index.\n\n Arguments\n ---------\n index (int): Index at which to insert the object.\n object (object): Panel components to insert in the layout.\n \"\"\"\n from ..pane import panel\n new_objects = list(self)\n new_objects.insert(index, panel(obj))\n self.objects = new_objects\n\n def pop(self, index):\n \"\"\"\n Pops an item from the layout by index.\n\n Arguments\n ---------\n index (int): The index of the item to pop from the layout.\n \"\"\"\n new_objects = list(self)\n if index in new_objects:\n index = new_objects.index(index)\n obj = new_objects.pop(index)\n self.objects = new_objects\n return obj\n\n def remove(self, obj):\n \"\"\"\n Removes an object from the layout.\n\n Arguments\n ---------\n obj (object): The object to remove from the layout.\n \"\"\"\n new_objects = list(self)\n new_objects.remove(obj)\n self.objects = new_objects\n\n def reverse(self):\n \"\"\"\n Reverses the objects in the layout.\n \"\"\"\n new_objects = list(self)\n new_objects.reverse()\n self.objects = new_objects\n\n \n\nclass ListPanel(ListLike, Panel):\n \"\"\"\n An abstract baseclass for Panel objects with list-like children.\n \"\"\"\n\n margin = param.Parameter(default=0, doc=\"\"\"\n Allows to create additional space around the component. May\n be specified as a two-tuple of the form (vertical, horizontal)\n or a four-tuple (top, right, bottom, left).\"\"\")\n\n scroll = param.Boolean(default=False, doc=\"\"\"\n Whether to add scrollbars if the content overflows the size\n of the container.\"\"\")\n\n _source_transforms = {'scroll': None}\n\n __abstract = True\n\n def __init__(self, *objects, **params):\n from ..pane import panel\n if objects:\n if 'objects' in params:\n raise ValueError(\"A %s's objects should be supplied either \"\n \"as positional arguments or as a keyword, \"\n \"not both.\" % type(self).__name__)\n params['objects'] = [panel(pane) for pane in objects]\n super(Panel, self).__init__(**params)\n\n def _process_param_change(self, params):\n scroll = params.pop('scroll', None)\n css_classes = self.css_classes or []\n if scroll:\n params['css_classes'] = css_classes + ['scrollable']\n elif scroll == False:\n params['css_classes'] = css_classes\n return super(ListPanel, self)._process_param_change(params)\n\n def _cleanup(self, root):\n if root.ref['id'] in state._fake_roots:\n state._fake_roots.remove(root.ref['id'])\n super(ListPanel, self)._cleanup(root)\n for p in self.objects:\n p._cleanup(root)\n\n\nclass NamedListPanel(ListPanel):\n\n active = param.Integer(default=0, bounds=(0, None), doc=\"\"\"\n Index of the currently displayed objects.\"\"\")\n\n objects = param.List(default=[], doc=\"\"\"\n The list of child objects that make up the tabs.\"\"\")\n\n def __init__(self, *items, **params):\n if 'objects' in params:\n if items:\n raise ValueError('%s objects should be supplied either '\n 'as positional arguments or as a keyword, '\n 'not both.' % type(self).__name__)\n items = params['objects']\n objects, self._names = self._to_objects_and_names(items)\n super(NamedListPanel, self).__init__(*objects, **params)\n self._panels = defaultdict(dict)\n self.param.watch(self._update_names, 'objects')\n # ALERT: Ensure that name update happens first, should be\n # replaced by watch precedence support in param\n self._param_watchers['objects']['value'].reverse()\n\n def _to_object_and_name(self, item):\n from ..pane import panel\n if isinstance(item, tuple):\n name, item = item\n else:\n name = getattr(item, 'name', None)\n pane = panel(item, name=name)\n name = param_name(pane.name) if name is None else name\n return pane, name\n\n def _to_objects_and_names(self, items):\n objects, names = [], []\n for item in items:\n pane, name = self._to_object_and_name(item)\n objects.append(pane)\n names.append(name)\n return objects, names\n\n def _update_names(self, event):\n if len(event.new) == len(self._names):\n return\n names = []\n for obj in event.new:\n if obj in event.old:\n index = event.old.index(obj)\n name = self._names[index]\n else:\n name = obj.name\n names.append(name)\n self._names = names\n\n def _update_active(self, *events):\n pass\n\n #----------------------------------------------------------------\n # Public API\n #----------------------------------------------------------------\n\n def __add__(self, other):\n if isinstance(other, NamedListPanel):\n other = list(zip(other._names, other.objects))\n elif isinstance(other, ListLike):\n other = other.objects\n if not isinstance(other, list):\n stype = type(self).__name__\n otype = type(other).__name__\n raise ValueError(\"Cannot add items of type %s and %s, can only \"\n \"combine %s.objects with list or ListLike object.\"\n % (stype, otype, stype))\n objects = list(zip(self._names, self.objects))\n return self.clone(*(objects+other))\n\n def __radd__(self, other):\n if isinstance(other, NamedListPanel):\n other = list(zip(other._names, other.objects))\n elif isinstance(other, ListLike):\n other = other.objects\n if not isinstance(other, list):\n stype = type(self).__name__\n otype = type(other).__name__\n raise ValueError(\"Cannot add items of type %s and %s, can only \"\n \"combine %s.objects with list or ListLike object.\"\n % (otype, stype, stype))\n objects = list(zip(self._names, self.objects))\n return self.clone(*(other+objects))\n\n def __setitem__(self, index, panes):\n new_objects = list(self)\n if not isinstance(index, slice):\n if index > len(self.objects):\n raise IndexError('Index %d out of bounds on %s '\n 'containing %d objects.' %\n (index, type(self).__name__, len(self.objects)))\n start, end = index, index+1\n panes = [panes]\n else:\n start = index.start or 0\n end = len(self.objects) if index.stop is None else index.stop\n if index.start is None and index.stop is None:\n if not isinstance(panes, list):\n raise IndexError('Expected a list of objects to '\n 'replace the objects in the %s, '\n 'got a %s type.' %\n (type(self).__name__, type(panes).__name__))\n expected = len(panes)\n new_objects = [None]*expected\n self._names = [None]*len(panes)\n end = expected\n else:\n expected = end-start\n if end > len(self.objects):\n raise IndexError('Index %d out of bounds on %s '\n 'containing %d objects.' %\n (end, type(self).__name__, len(self.objects)))\n if not isinstance(panes, list) or len(panes) != expected:\n raise IndexError('Expected a list of %d objects to set '\n 'on the %s to match the supplied slice.' %\n (expected, type(self).__name__))\n for i, pane in zip(range(start, end), panes):\n new_objects[i], self._names[i] = self._to_object_and_name(pane)\n self.objects = new_objects\n\n def clone(self, *objects, **params):\n \"\"\"\n Makes a copy of the Tabs sharing the same parameters.\n\n Arguments\n ---------\n objects: Objects to add to the cloned Tabs object.\n params: Keyword arguments override the parameters on the clone.\n\n Returns\n -------\n Cloned Tabs object\n \"\"\"\n if not objects:\n if 'objects' in params:\n objects = params.pop('objects')\n else:\n objects = zip(self._names, self.objects)\n elif 'objects' in params:\n raise ValueError('Tabs objects should be supplied either '\n 'as positional arguments or as a keyword, '\n 'not both.')\n p = dict(self.param.get_param_values(), **params)\n del p['objects']\n return type(self)(*objects, **params)\n\n def append(self, pane):\n \"\"\"\n Appends an object to the tabs.\n\n Arguments\n ---------\n obj (object): Panel component to add as a tab.\n \"\"\"\n new_object, new_name = self._to_object_and_name(pane)\n new_objects = list(self)\n new_objects.append(new_object)\n self._names.append(new_name)\n self.objects = new_objects\n\n def clear(self):\n \"\"\"\n Clears the tabs.\n \"\"\"\n self._names = []\n self.objects = []\n\n def extend(self, panes):\n \"\"\"\n Extends the the tabs with a list.\n\n Arguments\n ---------\n objects (list): List of panel components to add as tabs.\n \"\"\"\n new_objects, new_names = self._to_objects_and_names(panes)\n objects = list(self)\n objects.extend(new_objects)\n self._names.extend(new_names)\n self.objects = objects\n\n def insert(self, index, pane):\n \"\"\"\n Inserts an object in the tabs at the specified index.\n\n Arguments\n ---------\n index (int): Index at which to insert the object.\n object (object): Panel components to insert as tabs.\n \"\"\"\n new_object, new_name = self._to_object_and_name(pane)\n new_objects = list(self.objects)\n new_objects.insert(index, new_object)\n self._names.insert(index, new_name)\n self.objects = new_objects\n\n def pop(self, index):\n \"\"\"\n Pops an item from the tabs by index.\n\n Arguments\n ---------\n index (int): The index of the item to pop from the tabs.\n \"\"\"\n new_objects = list(self)\n if index in new_objects:\n index = new_objects.index(index)\n new_objects.pop(index)\n self._names.pop(index)\n self.objects = new_objects\n\n def remove(self, pane):\n \"\"\"\n Removes an object from the tabs.\n\n Arguments\n ---------\n obj (object): The object to remove from the tabs.\n \"\"\"\n new_objects = list(self)\n if pane in new_objects:\n index = new_objects.index(pane)\n new_objects.remove(pane)\n self._names.pop(index)\n self.objects = new_objects\n\n def reverse(self):\n \"\"\"\n Reverses the tabs.\n \"\"\"\n new_objects = list(self)\n new_objects.reverse()\n self._names.reverse()\n self.objects = new_objects\n\n\n\nclass Row(ListPanel):\n \"\"\"\n Horizontal layout of Viewables.\n \"\"\"\n\n col_sizing = param.Parameter()\n\n _bokeh_model = BkRow\n\n _rename = dict(ListPanel._rename, col_sizing='cols')\n\n\nclass Column(ListPanel):\n \"\"\"\n Vertical layout of Viewables.\n \"\"\"\n\n row_sizing = param.Parameter()\n\n _bokeh_model = BkColumn\n\n _rename = dict(ListPanel._rename, row_sizing='rows')\n\n\nclass WidgetBox(ListPanel):\n \"\"\"\n Vertical layout of widgets.\n \"\"\"\n\n css_classes = param.List(default=['widget-box'], doc=\"\"\"\n CSS classes to apply to the layout.\"\"\")\n\n disabled = param.Boolean(default=False, doc=\"\"\"\n Whether the widget is disabled.\"\"\")\n\n horizontal = param.Boolean(default=False, doc=\"\"\"\n Whether to lay out the widgets in a Row layout as opposed \n to a Column layout.\"\"\")\n\n margin = param.Parameter(default=5, doc=\"\"\"\n Allows to create additional space around the component. May\n be specified as a two-tuple of the form (vertical, horizontal)\n or a four-tuple (top, right, bottom, left).\"\"\")\n\n _source_transforms = {'disabled': None, 'horizontal': None}\n\n _rename = {'objects': 'children', 'horizontal': None}\n\n @property\n def _bokeh_model(self):\n return BkRow if self.horizontal else BkColumn\n\n @param.depends('disabled', 'objects', watch=True)\n def _disable_widgets(self):\n for obj in self:\n if hasattr(obj, 'disabled'):\n obj.disabled = self.disabled\n\n def __init__(self, *objects, **params):\n super(WidgetBox, self).__init__(*objects, **params)\n if self.disabled:\n self._disable_widgets()\n",
"path": "panel/layout/base.py"
}
] | diff --git a/panel/layout/base.py b/panel/layout/base.py
index 6d79b53b96..0558896638 100644
--- a/panel/layout/base.py
+++ b/panel/layout/base.py
@@ -247,7 +247,7 @@ def clone(self, *objects, **params):
% type(self).__name__)
p = dict(self.param.get_param_values(), **params)
del p['objects']
- return type(self)(*objects, **params)
+ return type(self)(*objects, **p)
def append(self, obj):
"""
diff --git a/panel/tests/layout/test_base.py b/panel/tests/layout/test_base.py
index 9a104928ed..7c74365603 100644
--- a/panel/tests/layout/test_base.py
+++ b/panel/tests/layout/test_base.py
@@ -402,6 +402,46 @@ def test_layout_clone_kwargs(panel):
assert clone.sizing_mode == 'stretch_height'
[email protected]('panel', [Column, Row])
+def test_layout_clone_no_args_no_kwargs(panel):
+ div1 = Div()
+ div2 = Div()
+ layout = panel(div1, div2, width=400, sizing_mode='stretch_height')
+ clone = layout.clone()
+
+ assert layout.objects[0].object is clone.objects[0].object
+ assert layout.objects[1].object is clone.objects[1].object
+
+ assert clone.width == 400
+ assert clone.sizing_mode == 'stretch_height'
+
+
[email protected]('panel', [Column, Row])
+def test_layout_clone_objects_in_kwargs(panel):
+ div1 = Div()
+ div2 = Div()
+ layout = panel(div1, div2)
+ clone = layout.clone(
+ objects=(div2, div1),
+ width=400, sizing_mode='stretch_height'
+ )
+
+ assert layout.objects[0].object is clone.objects[1].object
+ assert layout.objects[1].object is clone.objects[0].object
+
+ assert clone.width == 400
+ assert clone.sizing_mode == 'stretch_height'
+
+
[email protected]('panel', [Column, Row])
+def test_layout_clone_objects_in_args_and_kwargs(panel):
+ div1 = Div()
+ div2 = Div()
+ layout = panel(div1, div2)
+ with pytest.raises(ValueError):
+ layout.clone(div1, objects=div1)
+
+
def test_widgetbox(document, comm):
widget_box = WidgetBox("WidgetBox")
|
hydroshare__hydroshare-4798 | Change Mezzanine Form to Disallow Username Changes
**Describe the feature you'd like and what it will do**
The internal mezzanine form for the admin account should be altered to no longer allow username changes if possible, since doing so breaks the resource.
**Why is this feature important?**
This will be policy moving forward, that usernames cannot be changed.
I will work with Scott to see if this change is feasible.
| [
{
"content": "from django import forms\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.gis import admin\nfrom django.contrib.contenttypes.admin import GenericTabularInline\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .models import *\n\n\nclass UserCreationFormExtended(UserCreationForm):\n def __init__(self, *args, **kwargs):\n super(UserCreationFormExtended, self).__init__(*args, **kwargs)\n self.fields['email'] = forms.EmailField(label=_(\"E-mail\"), max_length=75)\n\nUserAdmin.add_form = UserCreationFormExtended\nUserAdmin.add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('email', 'username', 'password1', 'password2',)\n }),\n)\nUserAdmin.list_display = [\n 'username', 'email', 'first_name', 'last_name', 'is_staff',\n 'is_active', 'date_joined', 'last_login'\n]\n\nclass InlineResourceFiles(GenericTabularInline):\n model = ResourceFile\n\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.unregister(GenericResource)\n",
"path": "hs_core/admin.py"
}
] | [
{
"content": "from django import forms\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.gis import admin\nfrom django.contrib.contenttypes.admin import GenericTabularInline\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .models import *\n\n\nclass UserCreationFormExtended(UserCreationForm):\n def __init__(self, *args, **kwargs):\n super(UserCreationFormExtended, self).__init__(*args, **kwargs)\n self.fields['email'] = forms.EmailField(label=_(\"E-mail\"), max_length=75)\n\nUserAdmin.add_form = UserCreationFormExtended\nUserAdmin.readonly_fields = ('username',)\nUserAdmin.add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('email', 'username', 'password1', 'password2',)\n }),\n)\nUserAdmin.list_display = [\n 'username', 'email', 'first_name', 'last_name', 'is_staff',\n 'is_active', 'date_joined', 'last_login'\n]\n\nclass InlineResourceFiles(GenericTabularInline):\n model = ResourceFile\n\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.unregister(GenericResource)\n",
"path": "hs_core/admin.py"
}
] | diff --git a/hs_core/admin.py b/hs_core/admin.py
index c46ed5c40a..ad38afaaae 100755
--- a/hs_core/admin.py
+++ b/hs_core/admin.py
@@ -14,6 +14,7 @@ def __init__(self, *args, **kwargs):
self.fields['email'] = forms.EmailField(label=_("E-mail"), max_length=75)
UserAdmin.add_form = UserCreationFormExtended
+UserAdmin.readonly_fields = ('username',)
UserAdmin.add_fieldsets = (
(None, {
'classes': ('wide',),
|
Chia-Network__chia-blockchain-15508 | [Bug] Module `chia.wallet.puzzles.clawback` not found
### What happened?
When installing `1.8.2-rc3` or `master` via `pip`, the module `chia.wallet.puzzles.clawback` is missing. The files are not included because the packages are not listed in `setup.py`. This is also true of the `prefarm` sibling package.
### Version
1.8.2-rc3
### What platform are you using?
Linux
### What ui mode are you using?
CLI
### Relevant log output
```shell
$ pip install git+https://github.com/chia-network/chia-blockchain
Collecting git+https://github.com/chia-network/chia-blockchain
Cloning https://github.com/chia-network/chia-blockchain to /tmp/pip-req-build-m26feywu
Running command git clone --filter=blob:none --quiet https://github.com/chia-network/chia-blockchain /tmp/pip-req-build-m26feywu
Resolved https://github.com/chia-network/chia-blockchain to commit 49140b2b3c0c128f2464c0b4e50c496e7029939d
Running command git submodule update --init --recursive -q
[snip]
$ python3
>>> import chia.wallet.wallet
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.11/site-packages/chia/wallet/wallet.py", line 23, in <module>
from chia.wallet.coin_selection import select_coins
File "/usr/local/lib/python3.11/site-packages/chia/wallet/coin_selection.py", line 10, in <module>
from chia.wallet.wallet_coin_record import WalletCoinRecord
File "/usr/local/lib/python3.11/site-packages/chia/wallet/wallet_coin_record.py", line 11, in <module>
from chia.wallet.puzzles.clawback.metadata import ClawbackMetadata, ClawbackVersion
ModuleNotFoundError: No module named 'chia.wallet.puzzles.clawback'
```
| [
{
"content": "from __future__ import annotations\n\nimport os\nimport sys\n\nfrom setuptools import setup\n\ndependencies = [\n \"aiofiles==23.1.0\", # Async IO for files\n \"anyio==3.6.2\",\n \"boto3==1.26.148\", # AWS S3 for DL s3 plugin\n \"blspy==1.0.16\", # Signature library\n \"chiavdf==1.0.8\", # timelord and vdf verification\n \"chiabip158==1.2\", # bip158-style wallet filters\n \"chiapos==1.0.11\", # proof of space\n \"clvm==0.9.7\",\n \"clvm_tools==0.4.6\", # Currying, Program.to, other conveniences\n \"chia_rs==0.2.7\",\n \"clvm-tools-rs==0.1.34\", # Rust implementation of clvm_tools' compiler\n \"aiohttp==3.8.4\", # HTTP server for full node rpc\n \"aiosqlite==0.19.0\", # asyncio wrapper for sqlite, to store blocks\n \"bitstring==4.0.2\", # Binary data management library\n \"colorama==0.4.6\", # Colorizes terminal output\n \"colorlog==6.7.0\", # Adds color to logs\n \"concurrent-log-handler==0.9.24\", # Concurrently log and rotate logs\n \"cryptography==41.0.0\", # Python cryptography library for TLS - keyring conflict\n \"filelock==3.12.0\", # For reading and writing config multiprocess and multithread safely (non-reentrant locks)\n \"keyring==23.13.1\", # Store keys in MacOS Keychain, Windows Credential Locker\n \"PyYAML==6.0\", # Used for config file format\n \"setproctitle==1.3.2\", # Gives the chia processes readable names\n \"sortedcontainers==2.4.0\", # For maintaining sorted mempools\n \"click==8.1.3\", # For the CLI\n \"dnspython==2.3.0\", # Query DNS seeds\n \"watchdog==2.2.0\", # Filesystem event watching - watches keyring.yaml\n \"dnslib==0.9.23\", # dns lib\n \"typing-extensions==4.6.0\", # typing backports like Protocol and TypedDict\n \"zstd==1.5.5.1\",\n \"packaging==23.1\",\n \"psutil==5.9.4\",\n]\n\nupnp_dependencies = [\n \"miniupnpc==2.2.2\", # Allows users to open ports on their router\n]\n\ndev_dependencies = [\n \"build\",\n # >=7.2.4 for https://github.com/nedbat/coveragepy/issues/1604\n \"coverage>=7.2.4\",\n \"diff-cover\",\n \"pre-commit\",\n \"py3createtorrent\",\n \"pylint\",\n \"pytest\",\n \"pytest-asyncio>=0.18.1\", # require attribute 'fixture'\n \"pytest-cov\",\n \"pytest-monitor; sys_platform == 'linux'\",\n \"pytest-xdist\",\n \"twine\",\n \"isort\",\n \"flake8\",\n \"mypy\",\n \"black==23.3.0\",\n \"aiohttp_cors\", # For blackd\n \"ipython\", # For asyncio debugging\n \"pyinstaller==5.11.0\",\n \"types-aiofiles\",\n \"types-cryptography\",\n \"types-pkg_resources\",\n \"types-pyyaml\",\n \"types-setuptools\",\n]\n\nlegacy_keyring_dependencies = [\n \"keyrings.cryptfile==1.3.9\",\n]\n\nkwargs = dict(\n name=\"chia-blockchain\",\n author=\"Mariano Sorgente\",\n author_email=\"[email protected]\",\n description=\"Chia blockchain full node, farmer, timelord, and wallet.\",\n url=\"https://chia.net/\",\n license=\"Apache License\",\n python_requires=\">=3.7, <4\",\n keywords=\"chia blockchain node\",\n install_requires=dependencies,\n extras_require=dict(\n dev=dev_dependencies,\n upnp=upnp_dependencies,\n legacy_keyring=legacy_keyring_dependencies,\n ),\n packages=[\n \"build_scripts\",\n \"chia\",\n \"chia.cmds\",\n \"chia.clvm\",\n \"chia.consensus\",\n \"chia.daemon\",\n \"chia.data_layer\",\n \"chia.full_node\",\n \"chia.timelord\",\n \"chia.farmer\",\n \"chia.harvester\",\n \"chia.introducer\",\n \"chia.plot_sync\",\n \"chia.plotters\",\n \"chia.plotting\",\n \"chia.pools\",\n \"chia.protocols\",\n \"chia.rpc\",\n \"chia.seeder\",\n \"chia.server\",\n \"chia.simulator\",\n \"chia.types.blockchain_format\",\n \"chia.types\",\n \"chia.util\",\n \"chia.wallet\",\n \"chia.wallet.db_wallet\",\n \"chia.wallet.puzzles\",\n \"chia.wallet.cat_wallet\",\n \"chia.wallet.did_wallet\",\n \"chia.wallet.nft_wallet\",\n \"chia.wallet.trading\",\n \"chia.wallet.util\",\n \"chia.wallet.vc_wallet\",\n \"chia.wallet.vc_wallet.vc_puzzles\",\n \"chia.wallet.vc_wallet.cr_puzzles\",\n \"chia.ssl\",\n \"mozilla-ca\",\n ],\n entry_points={\n \"console_scripts\": [\n \"chia = chia.cmds.chia:main\",\n \"chia_daemon = chia.daemon.server:main\",\n \"chia_wallet = chia.server.start_wallet:main\",\n \"chia_full_node = chia.server.start_full_node:main\",\n \"chia_harvester = chia.server.start_harvester:main\",\n \"chia_farmer = chia.server.start_farmer:main\",\n \"chia_introducer = chia.server.start_introducer:main\",\n \"chia_crawler = chia.seeder.start_crawler:main\",\n \"chia_seeder = chia.seeder.dns_server:main\",\n \"chia_timelord = chia.server.start_timelord:main\",\n \"chia_timelord_launcher = chia.timelord.timelord_launcher:main\",\n \"chia_full_node_simulator = chia.simulator.start_simulator:main\",\n \"chia_data_layer = chia.server.start_data_layer:main\",\n \"chia_data_layer_http = chia.data_layer.data_layer_server:main\",\n \"chia_data_layer_s3_plugin = chia.data_layer.s3_plugin_service:run_server\",\n ]\n },\n package_data={\n \"chia\": [\"pyinstaller.spec\"],\n \"\": [\"*.clsp\", \"*.clsp.hex\", \"*.clvm\", \"*.clib\", \"py.typed\"],\n \"chia.util\": [\"initial-*.yaml\", \"english.txt\"],\n \"chia.ssl\": [\"chia_ca.crt\", \"chia_ca.key\", \"dst_root_ca.pem\"],\n \"mozilla-ca\": [\"cacert.pem\"],\n },\n long_description=open(\"README.md\").read(),\n long_description_content_type=\"text/markdown\",\n zip_safe=False,\n project_urls={\n \"Source\": \"https://github.com/Chia-Network/chia-blockchain/\",\n \"Changelog\": \"https://github.com/Chia-Network/chia-blockchain/blob/main/CHANGELOG.md\",\n },\n)\n\nif \"setup_file\" in sys.modules:\n # include dev deps in regular deps when run in snyk\n dependencies.extend(dev_dependencies)\n\nif len(os.environ.get(\"CHIA_SKIP_SETUP\", \"\")) < 1:\n setup(**kwargs) # type: ignore\n",
"path": "setup.py"
}
] | [
{
"content": "from __future__ import annotations\n\nimport os\nimport sys\n\nfrom setuptools import setup\n\ndependencies = [\n \"aiofiles==23.1.0\", # Async IO for files\n \"anyio==3.6.2\",\n \"boto3==1.26.148\", # AWS S3 for DL s3 plugin\n \"blspy==1.0.16\", # Signature library\n \"chiavdf==1.0.8\", # timelord and vdf verification\n \"chiabip158==1.2\", # bip158-style wallet filters\n \"chiapos==1.0.11\", # proof of space\n \"clvm==0.9.7\",\n \"clvm_tools==0.4.6\", # Currying, Program.to, other conveniences\n \"chia_rs==0.2.7\",\n \"clvm-tools-rs==0.1.34\", # Rust implementation of clvm_tools' compiler\n \"aiohttp==3.8.4\", # HTTP server for full node rpc\n \"aiosqlite==0.19.0\", # asyncio wrapper for sqlite, to store blocks\n \"bitstring==4.0.2\", # Binary data management library\n \"colorama==0.4.6\", # Colorizes terminal output\n \"colorlog==6.7.0\", # Adds color to logs\n \"concurrent-log-handler==0.9.24\", # Concurrently log and rotate logs\n \"cryptography==41.0.0\", # Python cryptography library for TLS - keyring conflict\n \"filelock==3.12.0\", # For reading and writing config multiprocess and multithread safely (non-reentrant locks)\n \"keyring==23.13.1\", # Store keys in MacOS Keychain, Windows Credential Locker\n \"PyYAML==6.0\", # Used for config file format\n \"setproctitle==1.3.2\", # Gives the chia processes readable names\n \"sortedcontainers==2.4.0\", # For maintaining sorted mempools\n \"click==8.1.3\", # For the CLI\n \"dnspython==2.3.0\", # Query DNS seeds\n \"watchdog==2.2.0\", # Filesystem event watching - watches keyring.yaml\n \"dnslib==0.9.23\", # dns lib\n \"typing-extensions==4.6.0\", # typing backports like Protocol and TypedDict\n \"zstd==1.5.5.1\",\n \"packaging==23.1\",\n \"psutil==5.9.4\",\n]\n\nupnp_dependencies = [\n \"miniupnpc==2.2.2\", # Allows users to open ports on their router\n]\n\ndev_dependencies = [\n \"build\",\n # >=7.2.4 for https://github.com/nedbat/coveragepy/issues/1604\n \"coverage>=7.2.4\",\n \"diff-cover\",\n \"pre-commit\",\n \"py3createtorrent\",\n \"pylint\",\n \"pytest\",\n \"pytest-asyncio>=0.18.1\", # require attribute 'fixture'\n \"pytest-cov\",\n \"pytest-monitor; sys_platform == 'linux'\",\n \"pytest-xdist\",\n \"twine\",\n \"isort\",\n \"flake8\",\n \"mypy\",\n \"black==23.3.0\",\n \"aiohttp_cors\", # For blackd\n \"ipython\", # For asyncio debugging\n \"pyinstaller==5.11.0\",\n \"types-aiofiles\",\n \"types-cryptography\",\n \"types-pkg_resources\",\n \"types-pyyaml\",\n \"types-setuptools\",\n]\n\nlegacy_keyring_dependencies = [\n \"keyrings.cryptfile==1.3.9\",\n]\n\nkwargs = dict(\n name=\"chia-blockchain\",\n author=\"Mariano Sorgente\",\n author_email=\"[email protected]\",\n description=\"Chia blockchain full node, farmer, timelord, and wallet.\",\n url=\"https://chia.net/\",\n license=\"Apache License\",\n python_requires=\">=3.7, <4\",\n keywords=\"chia blockchain node\",\n install_requires=dependencies,\n extras_require=dict(\n dev=dev_dependencies,\n upnp=upnp_dependencies,\n legacy_keyring=legacy_keyring_dependencies,\n ),\n packages=[\n \"build_scripts\",\n \"chia\",\n \"chia.cmds\",\n \"chia.clvm\",\n \"chia.consensus\",\n \"chia.daemon\",\n \"chia.data_layer\",\n \"chia.full_node\",\n \"chia.timelord\",\n \"chia.farmer\",\n \"chia.harvester\",\n \"chia.introducer\",\n \"chia.plot_sync\",\n \"chia.plotters\",\n \"chia.plotting\",\n \"chia.pools\",\n \"chia.protocols\",\n \"chia.rpc\",\n \"chia.seeder\",\n \"chia.server\",\n \"chia.simulator\",\n \"chia.types.blockchain_format\",\n \"chia.types\",\n \"chia.util\",\n \"chia.wallet\",\n \"chia.wallet.db_wallet\",\n \"chia.wallet.puzzles\",\n \"chia.wallet.puzzles.clawback\",\n \"chia.wallet.puzzles.prefarm\",\n \"chia.wallet.cat_wallet\",\n \"chia.wallet.did_wallet\",\n \"chia.wallet.nft_wallet\",\n \"chia.wallet.trading\",\n \"chia.wallet.util\",\n \"chia.wallet.vc_wallet\",\n \"chia.wallet.vc_wallet.vc_puzzles\",\n \"chia.wallet.vc_wallet.cr_puzzles\",\n \"chia.ssl\",\n \"mozilla-ca\",\n ],\n entry_points={\n \"console_scripts\": [\n \"chia = chia.cmds.chia:main\",\n \"chia_daemon = chia.daemon.server:main\",\n \"chia_wallet = chia.server.start_wallet:main\",\n \"chia_full_node = chia.server.start_full_node:main\",\n \"chia_harvester = chia.server.start_harvester:main\",\n \"chia_farmer = chia.server.start_farmer:main\",\n \"chia_introducer = chia.server.start_introducer:main\",\n \"chia_crawler = chia.seeder.start_crawler:main\",\n \"chia_seeder = chia.seeder.dns_server:main\",\n \"chia_timelord = chia.server.start_timelord:main\",\n \"chia_timelord_launcher = chia.timelord.timelord_launcher:main\",\n \"chia_full_node_simulator = chia.simulator.start_simulator:main\",\n \"chia_data_layer = chia.server.start_data_layer:main\",\n \"chia_data_layer_http = chia.data_layer.data_layer_server:main\",\n \"chia_data_layer_s3_plugin = chia.data_layer.s3_plugin_service:run_server\",\n ]\n },\n package_data={\n \"chia\": [\"pyinstaller.spec\"],\n \"\": [\"*.clsp\", \"*.clsp.hex\", \"*.clvm\", \"*.clib\", \"py.typed\"],\n \"chia.util\": [\"initial-*.yaml\", \"english.txt\"],\n \"chia.ssl\": [\"chia_ca.crt\", \"chia_ca.key\", \"dst_root_ca.pem\"],\n \"mozilla-ca\": [\"cacert.pem\"],\n },\n long_description=open(\"README.md\").read(),\n long_description_content_type=\"text/markdown\",\n zip_safe=False,\n project_urls={\n \"Source\": \"https://github.com/Chia-Network/chia-blockchain/\",\n \"Changelog\": \"https://github.com/Chia-Network/chia-blockchain/blob/main/CHANGELOG.md\",\n },\n)\n\nif \"setup_file\" in sys.modules:\n # include dev deps in regular deps when run in snyk\n dependencies.extend(dev_dependencies)\n\nif len(os.environ.get(\"CHIA_SKIP_SETUP\", \"\")) < 1:\n setup(**kwargs) # type: ignore\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index f46b99d318ad..98339d93856f 100644
--- a/setup.py
+++ b/setup.py
@@ -118,6 +118,8 @@
"chia.wallet",
"chia.wallet.db_wallet",
"chia.wallet.puzzles",
+ "chia.wallet.puzzles.clawback",
+ "chia.wallet.puzzles.prefarm",
"chia.wallet.cat_wallet",
"chia.wallet.did_wallet",
"chia.wallet.nft_wallet",
|
pulp__pulpcore-2245 | PulpImporter assumes tempfiles can always go to /tmp
This issue is a copy of https://pulp.plan.io/issues/8610 , to allow us to backport the fix from core/3.17 into 14/15/16 correctly.
**Version**
core/3.14+
**Describe the bug**
importer.pulp_import uses tempfile.TemporaryDirectory() in places like this:
https://github.com/pulp/pulpcore/blob/master/pulpcore/app/tasks/importer.py#L118
If your /tmp is small, and your export is Large, this can cause Bad Things to happen.
We should perhas set dir= to the workers work-directory?
| [
{
"content": "import hashlib\nimport json\nimport os\nimport re\nimport subprocess\nimport tempfile\nimport tarfile\nfrom gettext import gettext as _\nfrom logging import getLogger\n\nfrom django.core.files.storage import default_storage\nfrom django.db.models import F\n\nfrom pkg_resources import DistributionNotFound, get_distribution\nfrom rest_framework.serializers import ValidationError\nfrom tablib import Dataset\n\nfrom pulpcore.app.apps import get_plugin_config\nfrom pulpcore.app.models import (\n Artifact,\n Content,\n CreatedResource,\n GroupProgressReport,\n ProgressReport,\n PulpImport,\n PulpImporter,\n Repository,\n Task,\n TaskGroup,\n)\nfrom pulpcore.app.modelresource import (\n ArtifactResource,\n ContentArtifactResource,\n)\nfrom pulpcore.constants import TASK_STATES\nfrom pulpcore.tasking.tasks import dispatch\n\nlog = getLogger(__name__)\n\nARTIFACT_FILE = \"pulpcore.app.modelresource.ArtifactResource.json\"\nREPO_FILE = \"pulpcore.app.modelresource.RepositoryResource.json\"\nCONTENT_FILE = \"pulpcore.app.modelresource.ContentResource.json\"\nCA_FILE = \"pulpcore.app.modelresource.ContentArtifactResource.json\"\nVERSIONS_FILE = \"versions.json\"\nCONTENT_MAPPING_FILE = \"content_mapping.json\"\n\n\ndef _destination_repo(importer, source_repo_name):\n \"\"\"Find the destination repository based on source repo's name.\"\"\"\n if importer.repo_mapping and importer.repo_mapping.get(source_repo_name):\n dest_repo_name = importer.repo_mapping[source_repo_name]\n else:\n dest_repo_name = source_repo_name\n return Repository.objects.get(name=dest_repo_name)\n\n\ndef _import_file(fpath, resource_class, do_raise=True):\n try:\n log.info(_(\"Importing file {}.\").format(fpath))\n with open(fpath, \"r\") as json_file:\n data = Dataset().load(json_file, format=\"json\")\n resource = resource_class()\n log.info(_(\"...Importing resource {}.\").format(resource.__class__.__name__))\n return resource.import_data(data, raise_errors=do_raise)\n except AttributeError:\n log.error(_(\"FAILURE importing file {}!\").format(fpath))\n raise\n\n\ndef _check_versions(version_json):\n \"\"\"Compare the export version_json to the installed components.\"\"\"\n error_messages = []\n for component in version_json:\n try:\n version = get_distribution(component[\"component\"]).version\n except DistributionNotFound:\n error_messages.append(\n _(\"Export uses {} which is not installed.\").format(component[\"component\"])\n )\n else:\n if version != component[\"version\"]:\n error_messages.append(\n _(\n \"Export version {export_ver} of {component} does not match \"\n \"installed version {ver}.\"\n ).format(\n export_ver=component[\"version\"],\n component=component[\"component\"],\n ver=version,\n )\n )\n\n if error_messages:\n raise ValidationError((\" \".join(error_messages)))\n\n\ndef import_repository_version(importer_pk, destination_repo_pk, source_repo_name, tar_path):\n \"\"\"\n Import a repository version from a Pulp export.\n\n Args:\n importer_pk (str): Importer we are working with\n destination_repo_pk (str): Primary key of Repository to import into.\n source_repo_name (str): Name of the Repository in the export.\n tar_path (str): A path to export tar.\n \"\"\"\n dest_repo = Repository.objects.get(pk=destination_repo_pk)\n importer = PulpImporter.objects.get(pk=importer_pk)\n\n pb = ProgressReport(\n message=f\"Importing content for {dest_repo.name}\",\n code=\"import.repo.version.content\",\n state=TASK_STATES.RUNNING,\n )\n pb.save()\n\n with tempfile.TemporaryDirectory() as temp_dir:\n # Extract the repo file for the repo info\n with tarfile.open(tar_path, \"r:gz\") as tar:\n tar.extract(REPO_FILE, path=temp_dir)\n\n with open(os.path.join(temp_dir, REPO_FILE), \"r\") as repo_data_file:\n data = json.load(repo_data_file)\n\n src_repo = next(repo for repo in data if repo[\"name\"] == source_repo_name)\n\n if dest_repo.pulp_type != src_repo[\"pulp_type\"]:\n raise ValidationError(\n _(\n \"Repository type mismatch: {src_repo} ({src_type}) vs {dest_repo} \"\n \"({dest_type}).\"\n ).format(\n src_repo=src_repo[\"name\"],\n src_type=src_repo[\"pulp_type\"],\n dest_repo=dest_repo.name,\n dest_type=dest_repo.pulp_type,\n )\n )\n\n rv_name = \"\"\n # Extract the repo version files\n with tarfile.open(tar_path, \"r:gz\") as tar:\n for mem in tar.getmembers():\n match = re.search(rf\"(^repository-{source_repo_name}_[0-9]+)/.+\", mem.name)\n if match:\n rv_name = match.group(1)\n tar.extract(mem, path=temp_dir)\n\n if not rv_name:\n raise ValidationError(_(\"No RepositoryVersion found for {}\").format(rv_name))\n\n rv_path = os.path.join(temp_dir, rv_name)\n # Content\n plugin_name = src_repo[\"pulp_type\"].split(\".\")[0]\n cfg = get_plugin_config(plugin_name)\n\n resulting_content_ids = []\n for res_class in cfg.exportable_classes:\n filename = f\"{res_class.__module__}.{res_class.__name__}.json\"\n a_result = _import_file(os.path.join(rv_path, filename), res_class, do_raise=False)\n # django import-export can have a problem with concurrent-imports that are\n # importing the same 'thing' (e.g., a Package that exists in two different\n # repo-versions that are being imported at the same time). We will try an import\n # that will simply record errors as they happen (rather than failing with an exception)\n # first. If errors happen, we'll do one retry before we give up on this repo-version's\n # import.\n if a_result.has_errors():\n log.info(\n _(\"...{} import-errors encountered importing {} from {}, retrying\").format(\n a_result.totals[\"error\"], filename, rv_name\n )\n )\n # Second attempt, we allow to raise an exception on any problem.\n # This will either succeed, or log a fatal error and fail.\n try:\n a_result = _import_file(os.path.join(rv_path, filename), res_class)\n except Exception as e: # noqa log on ANY exception and then re-raise\n log.error(\n _(\"FATAL import-failure importing {} from {}\").format(filename, rv_name)\n )\n raise\n\n resulting_content_ids.extend(\n row.object_id for row in a_result.rows if row.import_type in (\"new\", \"update\")\n )\n\n # Once all content exists, create the ContentArtifact links\n ca_path = os.path.join(rv_path, CA_FILE)\n _import_file(ca_path, ContentArtifactResource)\n\n # see if we have a content mapping\n mapping_path = f\"{rv_name}/{CONTENT_MAPPING_FILE}\"\n mapping = {}\n with tarfile.open(tar_path, \"r:gz\") as tar:\n if mapping_path in tar.getnames():\n tar.extract(mapping_path, path=temp_dir)\n with open(os.path.join(temp_dir, mapping_path), \"r\") as mapping_file:\n mapping = json.load(mapping_file)\n\n if mapping:\n # use the content mapping to map content to repos\n for repo_name, content_ids in mapping.items():\n repo = _destination_repo(importer, repo_name)\n content = Content.objects.filter(upstream_id__in=content_ids)\n with repo.new_version() as new_version:\n new_version.set_content(content)\n else:\n # just map all the content to our destination repo\n content = Content.objects.filter(pk__in=resulting_content_ids)\n with dest_repo.new_version() as new_version:\n new_version.set_content(content)\n\n content_count = content.count()\n pb.total = content_count\n pb.done = content_count\n pb.state = TASK_STATES.COMPLETED\n pb.save()\n\n gpr = TaskGroup.current().group_progress_reports.filter(code=\"import.repo.versions\")\n gpr.update(done=F(\"done\") + 1)\n\n\ndef pulp_import(importer_pk, path, toc):\n \"\"\"\n Import a Pulp export into Pulp.\n\n Args:\n importer_pk (str): Primary key of PulpImporter to do the import\n path (str): Path to the export to be imported\n \"\"\"\n\n def _compute_hash(filename):\n sha256_hash = hashlib.sha256()\n with open(filename, \"rb\") as f:\n # Read and update hash string value in blocks of 4K\n for byte_block in iter(lambda: f.read(4096), b\"\"):\n sha256_hash.update(byte_block)\n return sha256_hash.hexdigest()\n\n def validate_toc(toc_filename):\n \"\"\"\n Check validity of table-of-contents file.\n\n table-of-contents must:\n * exist\n * be valid JSON\n * point to chunked-export-files that exist 'next to' the 'toc' file\n * point to chunks whose checksums match the checksums stored in the 'toc' file\n\n Args:\n toc_filename (str): The user-provided toc-file-path to be validated.\n\n Raises:\n ValidationError: If toc is not a valid JSON table-of-contents file,\n or when toc points to chunked-export-files that can't be found in the same\n directory as the toc-file, or the checksums of the chunks do not match the\n checksums stored in toc.\n \"\"\"\n with open(toc_filename) as json_file:\n # Valid JSON?\n the_toc = json.load(json_file)\n if not the_toc.get(\"files\", None) or not the_toc.get(\"meta\", None):\n raise ValidationError(_(\"Missing 'files' or 'meta' keys in table-of-contents!\"))\n\n base_dir = os.path.dirname(toc_filename)\n # Points at chunks that exist?\n missing_files = []\n for f in sorted(the_toc[\"files\"].keys()):\n if not os.path.isfile(os.path.join(base_dir, f)):\n missing_files.append(f)\n if missing_files:\n raise ValidationError(\n _(\n \"Missing import-chunks named in table-of-contents: {}.\".format(\n str(missing_files)\n )\n )\n )\n\n errs = []\n # validate the sha256 of the toc-entries\n # gather errors for reporting at the end\n chunks = sorted(the_toc[\"files\"].keys())\n data = dict(message=\"Validating Chunks\", code=\"validate.chunks\", total=len(chunks))\n with ProgressReport(**data) as pb:\n for chunk in pb.iter(chunks):\n a_hash = _compute_hash(os.path.join(base_dir, chunk))\n if not a_hash == the_toc[\"files\"][chunk]:\n err_str = \"File {} expected checksum : {}, computed checksum : {}\".format(\n chunk, the_toc[\"files\"][chunk], a_hash\n )\n errs.append(err_str)\n\n # if there are any errors, report and fail\n if errs:\n raise ValidationError(_(\"Import chunk hash mismatch: {}).\").format(str(errs)))\n\n return the_toc\n\n def validate_and_assemble(toc_filename):\n \"\"\"Validate checksums of, and reassemble, chunks in table-of-contents file.\"\"\"\n the_toc = validate_toc(toc_filename)\n toc_dir = os.path.dirname(toc_filename)\n result_file = os.path.join(toc_dir, the_toc[\"meta\"][\"file\"])\n\n # if we have only one entry in \"files\", it must be the full .tar.gz - return it\n if len(the_toc[\"files\"]) == 1:\n return os.path.join(toc_dir, list(the_toc[\"files\"].keys())[0])\n\n # We have multiple chunks.\n # reassemble into one file 'next to' the toc and return the resulting full-path\n chunk_size = int(the_toc[\"meta\"][\"chunk_size\"])\n offset = 0\n block_size = 1024\n blocks_per_chunk = int(chunk_size / block_size)\n\n # sorting-by-filename is REALLY IMPORTANT here\n # keys are of the form <base-export-name>.00..<base-export-name>.NN,\n # and must be reassembled IN ORDER\n the_chunk_files = sorted(the_toc[\"files\"].keys())\n\n data = dict(\n message=\"Recombining Chunks\", code=\"recombine.chunks\", total=len(the_chunk_files)\n )\n with ProgressReport(**data) as pb:\n for chunk in pb.iter(the_chunk_files):\n # For each chunk, add it to the reconstituted tar.gz, picking up where the previous\n # chunk left off\n subprocess.run(\n [\n \"dd\",\n \"if={}\".format(os.path.join(toc_dir, chunk)),\n \"of={}\".format(result_file),\n \"bs={}\".format(str(block_size)),\n \"seek={}\".format(str(offset)),\n ],\n )\n offset += blocks_per_chunk\n # To keep from taking up All The Disk, we delete each chunk after it has been added\n # to the recombined file.\n try:\n subprocess.run([\"rm\", \"-f\", os.path.join(toc_dir, chunk)])\n except OSError:\n log.warning(\n _(\"Failed to remove chunk {} after recombining. Continuing.\").format(\n os.path.join(toc_dir, chunk)\n ),\n exc_info=True,\n )\n\n combined_hash = _compute_hash(result_file)\n if combined_hash != the_toc[\"meta\"][\"global_hash\"]:\n raise ValidationError(\n _(\"Mismatch between combined .tar.gz checksum [{}] and originating [{}]).\").format(\n combined_hash, the_toc[\"meta\"][\"global_hash\"]\n )\n )\n # if we get this far, then: the chunk-files all existed, they all pass checksum validation,\n # and there exists a combined .tar.gz, which *also* passes checksum-validation.\n # Let the rest of the import process do its thing on the new combined-file.\n return result_file\n\n if toc:\n log.info(_(\"Validating TOC {}.\").format(toc))\n path = validate_and_assemble(toc)\n\n log.info(_(\"Importing {}.\").format(path))\n current_task = Task.current()\n task_group = TaskGroup.current()\n importer = PulpImporter.objects.get(pk=importer_pk)\n the_import = PulpImport.objects.create(\n importer=importer, task=current_task, params={\"path\": path}\n )\n CreatedResource.objects.create(content_object=the_import)\n\n with tempfile.TemporaryDirectory() as temp_dir:\n with tarfile.open(path, \"r:gz\") as tar:\n tar.extractall(path=temp_dir)\n\n # Check version info\n with open(os.path.join(temp_dir, VERSIONS_FILE)) as version_file:\n version_json = json.load(version_file)\n _check_versions(version_json)\n\n # Artifacts\n ar_result = _import_file(os.path.join(temp_dir, ARTIFACT_FILE), ArtifactResource)\n data = dict(\n message=\"Importing Artifacts\", code=\"import.artifacts\", total=len(ar_result.rows)\n )\n with ProgressReport(**data) as pb:\n for row in pb.iter(ar_result.rows):\n artifact = Artifact.objects.get(pk=row.object_id)\n base_path = os.path.join(\"artifact\", artifact.sha256[0:2], artifact.sha256[2:])\n src = os.path.join(temp_dir, base_path)\n\n if not default_storage.exists(base_path):\n with open(src, \"rb\") as f:\n default_storage.save(base_path, f)\n\n with open(os.path.join(temp_dir, REPO_FILE), \"r\") as repo_data_file:\n data = json.load(repo_data_file)\n gpr = GroupProgressReport(\n message=\"Importing repository versions\",\n code=\"import.repo.versions\",\n total=len(data),\n done=0,\n task_group=task_group,\n )\n gpr.save()\n\n for src_repo in data:\n try:\n dest_repo = _destination_repo(importer, src_repo[\"name\"])\n except Repository.DoesNotExist:\n log.warning(\n _(\"Could not find destination repo for {}. Skipping.\").format(\n src_repo[\"name\"]\n )\n )\n continue\n\n dispatch(\n import_repository_version,\n exclusive_resources=[dest_repo],\n args=[importer.pk, dest_repo.pk, src_repo[\"name\"], path],\n task_group=task_group,\n )\n\n task_group.finish()\n",
"path": "pulpcore/app/tasks/importer.py"
}
] | [
{
"content": "import hashlib\nimport json\nimport os\nimport re\nimport subprocess\nimport tempfile\nimport tarfile\nfrom gettext import gettext as _\nfrom logging import getLogger\n\nfrom django.core.files.storage import default_storage\nfrom django.db.models import F\n\nfrom pkg_resources import DistributionNotFound, get_distribution\nfrom rest_framework.serializers import ValidationError\nfrom tablib import Dataset\n\nfrom pulpcore.app.apps import get_plugin_config\nfrom pulpcore.app.models import (\n Artifact,\n Content,\n CreatedResource,\n GroupProgressReport,\n ProgressReport,\n PulpImport,\n PulpImporter,\n Repository,\n Task,\n TaskGroup,\n)\nfrom pulpcore.app.modelresource import (\n ArtifactResource,\n ContentArtifactResource,\n)\nfrom pulpcore.constants import TASK_STATES\nfrom pulpcore.tasking.tasks import dispatch\n\nlog = getLogger(__name__)\n\nARTIFACT_FILE = \"pulpcore.app.modelresource.ArtifactResource.json\"\nREPO_FILE = \"pulpcore.app.modelresource.RepositoryResource.json\"\nCONTENT_FILE = \"pulpcore.app.modelresource.ContentResource.json\"\nCA_FILE = \"pulpcore.app.modelresource.ContentArtifactResource.json\"\nVERSIONS_FILE = \"versions.json\"\nCONTENT_MAPPING_FILE = \"content_mapping.json\"\n\n\ndef _destination_repo(importer, source_repo_name):\n \"\"\"Find the destination repository based on source repo's name.\"\"\"\n if importer.repo_mapping and importer.repo_mapping.get(source_repo_name):\n dest_repo_name = importer.repo_mapping[source_repo_name]\n else:\n dest_repo_name = source_repo_name\n return Repository.objects.get(name=dest_repo_name)\n\n\ndef _import_file(fpath, resource_class, do_raise=True):\n try:\n log.info(_(\"Importing file {}.\").format(fpath))\n with open(fpath, \"r\") as json_file:\n data = Dataset().load(json_file, format=\"json\")\n resource = resource_class()\n log.info(_(\"...Importing resource {}.\").format(resource.__class__.__name__))\n return resource.import_data(data, raise_errors=do_raise)\n except AttributeError:\n log.error(_(\"FAILURE importing file {}!\").format(fpath))\n raise\n\n\ndef _check_versions(version_json):\n \"\"\"Compare the export version_json to the installed components.\"\"\"\n error_messages = []\n for component in version_json:\n try:\n version = get_distribution(component[\"component\"]).version\n except DistributionNotFound:\n error_messages.append(\n _(\"Export uses {} which is not installed.\").format(component[\"component\"])\n )\n else:\n if version != component[\"version\"]:\n error_messages.append(\n _(\n \"Export version {export_ver} of {component} does not match \"\n \"installed version {ver}.\"\n ).format(\n export_ver=component[\"version\"],\n component=component[\"component\"],\n ver=version,\n )\n )\n\n if error_messages:\n raise ValidationError((\" \".join(error_messages)))\n\n\ndef import_repository_version(importer_pk, destination_repo_pk, source_repo_name, tar_path):\n \"\"\"\n Import a repository version from a Pulp export.\n\n Args:\n importer_pk (str): Importer we are working with\n destination_repo_pk (str): Primary key of Repository to import into.\n source_repo_name (str): Name of the Repository in the export.\n tar_path (str): A path to export tar.\n \"\"\"\n dest_repo = Repository.objects.get(pk=destination_repo_pk)\n importer = PulpImporter.objects.get(pk=importer_pk)\n\n pb = ProgressReport(\n message=f\"Importing content for {dest_repo.name}\",\n code=\"import.repo.version.content\",\n state=TASK_STATES.RUNNING,\n )\n pb.save()\n\n with tempfile.TemporaryDirectory() as temp_dir:\n # Extract the repo file for the repo info\n with tarfile.open(tar_path, \"r:gz\") as tar:\n tar.extract(REPO_FILE, path=temp_dir)\n\n with open(os.path.join(temp_dir, REPO_FILE), \"r\") as repo_data_file:\n data = json.load(repo_data_file)\n\n src_repo = next(repo for repo in data if repo[\"name\"] == source_repo_name)\n\n if dest_repo.pulp_type != src_repo[\"pulp_type\"]:\n raise ValidationError(\n _(\n \"Repository type mismatch: {src_repo} ({src_type}) vs {dest_repo} \"\n \"({dest_type}).\"\n ).format(\n src_repo=src_repo[\"name\"],\n src_type=src_repo[\"pulp_type\"],\n dest_repo=dest_repo.name,\n dest_type=dest_repo.pulp_type,\n )\n )\n\n rv_name = \"\"\n # Extract the repo version files\n with tarfile.open(tar_path, \"r:gz\") as tar:\n for mem in tar.getmembers():\n match = re.search(rf\"(^repository-{source_repo_name}_[0-9]+)/.+\", mem.name)\n if match:\n rv_name = match.group(1)\n tar.extract(mem, path=temp_dir)\n\n if not rv_name:\n raise ValidationError(_(\"No RepositoryVersion found for {}\").format(rv_name))\n\n rv_path = os.path.join(temp_dir, rv_name)\n # Content\n plugin_name = src_repo[\"pulp_type\"].split(\".\")[0]\n cfg = get_plugin_config(plugin_name)\n\n resulting_content_ids = []\n for res_class in cfg.exportable_classes:\n filename = f\"{res_class.__module__}.{res_class.__name__}.json\"\n a_result = _import_file(os.path.join(rv_path, filename), res_class, do_raise=False)\n # django import-export can have a problem with concurrent-imports that are\n # importing the same 'thing' (e.g., a Package that exists in two different\n # repo-versions that are being imported at the same time). We will try an import\n # that will simply record errors as they happen (rather than failing with an exception)\n # first. If errors happen, we'll do one retry before we give up on this repo-version's\n # import.\n if a_result.has_errors():\n log.info(\n _(\"...{} import-errors encountered importing {} from {}, retrying\").format(\n a_result.totals[\"error\"], filename, rv_name\n )\n )\n # Second attempt, we allow to raise an exception on any problem.\n # This will either succeed, or log a fatal error and fail.\n try:\n a_result = _import_file(os.path.join(rv_path, filename), res_class)\n except Exception as e: # noqa log on ANY exception and then re-raise\n log.error(\n _(\"FATAL import-failure importing {} from {}\").format(filename, rv_name)\n )\n raise\n\n resulting_content_ids.extend(\n row.object_id for row in a_result.rows if row.import_type in (\"new\", \"update\")\n )\n\n # Once all content exists, create the ContentArtifact links\n ca_path = os.path.join(rv_path, CA_FILE)\n _import_file(ca_path, ContentArtifactResource)\n\n # see if we have a content mapping\n mapping_path = f\"{rv_name}/{CONTENT_MAPPING_FILE}\"\n mapping = {}\n with tarfile.open(tar_path, \"r:gz\") as tar:\n if mapping_path in tar.getnames():\n tar.extract(mapping_path, path=temp_dir)\n with open(os.path.join(temp_dir, mapping_path), \"r\") as mapping_file:\n mapping = json.load(mapping_file)\n\n if mapping:\n # use the content mapping to map content to repos\n for repo_name, content_ids in mapping.items():\n repo = _destination_repo(importer, repo_name)\n content = Content.objects.filter(upstream_id__in=content_ids)\n with repo.new_version() as new_version:\n new_version.set_content(content)\n else:\n # just map all the content to our destination repo\n content = Content.objects.filter(pk__in=resulting_content_ids)\n with dest_repo.new_version() as new_version:\n new_version.set_content(content)\n\n content_count = content.count()\n pb.total = content_count\n pb.done = content_count\n pb.state = TASK_STATES.COMPLETED\n pb.save()\n\n gpr = TaskGroup.current().group_progress_reports.filter(code=\"import.repo.versions\")\n gpr.update(done=F(\"done\") + 1)\n\n\ndef pulp_import(importer_pk, path, toc):\n \"\"\"\n Import a Pulp export into Pulp.\n\n Args:\n importer_pk (str): Primary key of PulpImporter to do the import\n path (str): Path to the export to be imported\n \"\"\"\n\n def _compute_hash(filename):\n sha256_hash = hashlib.sha256()\n with open(filename, \"rb\") as f:\n # Read and update hash string value in blocks of 4K\n for byte_block in iter(lambda: f.read(4096), b\"\"):\n sha256_hash.update(byte_block)\n return sha256_hash.hexdigest()\n\n def validate_toc(toc_filename):\n \"\"\"\n Check validity of table-of-contents file.\n\n table-of-contents must:\n * exist\n * be valid JSON\n * point to chunked-export-files that exist 'next to' the 'toc' file\n * point to chunks whose checksums match the checksums stored in the 'toc' file\n\n Args:\n toc_filename (str): The user-provided toc-file-path to be validated.\n\n Raises:\n ValidationError: If toc is not a valid JSON table-of-contents file,\n or when toc points to chunked-export-files that can't be found in the same\n directory as the toc-file, or the checksums of the chunks do not match the\n checksums stored in toc.\n \"\"\"\n with open(toc_filename) as json_file:\n # Valid JSON?\n the_toc = json.load(json_file)\n if not the_toc.get(\"files\", None) or not the_toc.get(\"meta\", None):\n raise ValidationError(_(\"Missing 'files' or 'meta' keys in table-of-contents!\"))\n\n base_dir = os.path.dirname(toc_filename)\n # Points at chunks that exist?\n missing_files = []\n for f in sorted(the_toc[\"files\"].keys()):\n if not os.path.isfile(os.path.join(base_dir, f)):\n missing_files.append(f)\n if missing_files:\n raise ValidationError(\n _(\n \"Missing import-chunks named in table-of-contents: {}.\".format(\n str(missing_files)\n )\n )\n )\n\n errs = []\n # validate the sha256 of the toc-entries\n # gather errors for reporting at the end\n chunks = sorted(the_toc[\"files\"].keys())\n data = dict(message=\"Validating Chunks\", code=\"validate.chunks\", total=len(chunks))\n with ProgressReport(**data) as pb:\n for chunk in pb.iter(chunks):\n a_hash = _compute_hash(os.path.join(base_dir, chunk))\n if not a_hash == the_toc[\"files\"][chunk]:\n err_str = \"File {} expected checksum : {}, computed checksum : {}\".format(\n chunk, the_toc[\"files\"][chunk], a_hash\n )\n errs.append(err_str)\n\n # if there are any errors, report and fail\n if errs:\n raise ValidationError(_(\"Import chunk hash mismatch: {}).\").format(str(errs)))\n\n return the_toc\n\n def validate_and_assemble(toc_filename):\n \"\"\"Validate checksums of, and reassemble, chunks in table-of-contents file.\"\"\"\n the_toc = validate_toc(toc_filename)\n toc_dir = os.path.dirname(toc_filename)\n result_file = os.path.join(toc_dir, the_toc[\"meta\"][\"file\"])\n\n # if we have only one entry in \"files\", it must be the full .tar.gz - return it\n if len(the_toc[\"files\"]) == 1:\n return os.path.join(toc_dir, list(the_toc[\"files\"].keys())[0])\n\n # We have multiple chunks.\n # reassemble into one file 'next to' the toc and return the resulting full-path\n chunk_size = int(the_toc[\"meta\"][\"chunk_size\"])\n offset = 0\n block_size = 1024\n blocks_per_chunk = int(chunk_size / block_size)\n\n # sorting-by-filename is REALLY IMPORTANT here\n # keys are of the form <base-export-name>.00..<base-export-name>.NN,\n # and must be reassembled IN ORDER\n the_chunk_files = sorted(the_toc[\"files\"].keys())\n\n data = dict(\n message=\"Recombining Chunks\", code=\"recombine.chunks\", total=len(the_chunk_files)\n )\n with ProgressReport(**data) as pb:\n for chunk in pb.iter(the_chunk_files):\n # For each chunk, add it to the reconstituted tar.gz, picking up where the previous\n # chunk left off\n subprocess.run(\n [\n \"dd\",\n \"if={}\".format(os.path.join(toc_dir, chunk)),\n \"of={}\".format(result_file),\n \"bs={}\".format(str(block_size)),\n \"seek={}\".format(str(offset)),\n ],\n )\n offset += blocks_per_chunk\n # To keep from taking up All The Disk, we delete each chunk after it has been added\n # to the recombined file.\n try:\n subprocess.run([\"rm\", \"-f\", os.path.join(toc_dir, chunk)])\n except OSError:\n log.warning(\n _(\"Failed to remove chunk {} after recombining. Continuing.\").format(\n os.path.join(toc_dir, chunk)\n ),\n exc_info=True,\n )\n\n combined_hash = _compute_hash(result_file)\n if combined_hash != the_toc[\"meta\"][\"global_hash\"]:\n raise ValidationError(\n _(\"Mismatch between combined .tar.gz checksum [{}] and originating [{}]).\").format(\n combined_hash, the_toc[\"meta\"][\"global_hash\"]\n )\n )\n # if we get this far, then: the chunk-files all existed, they all pass checksum validation,\n # and there exists a combined .tar.gz, which *also* passes checksum-validation.\n # Let the rest of the import process do its thing on the new combined-file.\n return result_file\n\n if toc:\n log.info(_(\"Validating TOC {}.\").format(toc))\n path = validate_and_assemble(toc)\n\n log.info(_(\"Importing {}.\").format(path))\n current_task = Task.current()\n task_group = TaskGroup.current()\n importer = PulpImporter.objects.get(pk=importer_pk)\n the_import = PulpImport.objects.create(\n importer=importer, task=current_task, params={\"path\": path}\n )\n CreatedResource.objects.create(content_object=the_import)\n\n with tempfile.TemporaryDirectory(dir=\".\") as temp_dir:\n with tarfile.open(path, \"r:gz\") as tar:\n tar.extractall(path=temp_dir)\n\n # Check version info\n with open(os.path.join(temp_dir, VERSIONS_FILE)) as version_file:\n version_json = json.load(version_file)\n _check_versions(version_json)\n\n # Artifacts\n ar_result = _import_file(os.path.join(temp_dir, ARTIFACT_FILE), ArtifactResource)\n data = dict(\n message=\"Importing Artifacts\", code=\"import.artifacts\", total=len(ar_result.rows)\n )\n with ProgressReport(**data) as pb:\n for row in pb.iter(ar_result.rows):\n artifact = Artifact.objects.get(pk=row.object_id)\n base_path = os.path.join(\"artifact\", artifact.sha256[0:2], artifact.sha256[2:])\n src = os.path.join(temp_dir, base_path)\n\n if not default_storage.exists(base_path):\n with open(src, \"rb\") as f:\n default_storage.save(base_path, f)\n\n with open(os.path.join(temp_dir, REPO_FILE), \"r\") as repo_data_file:\n data = json.load(repo_data_file)\n gpr = GroupProgressReport(\n message=\"Importing repository versions\",\n code=\"import.repo.versions\",\n total=len(data),\n done=0,\n task_group=task_group,\n )\n gpr.save()\n\n for src_repo in data:\n try:\n dest_repo = _destination_repo(importer, src_repo[\"name\"])\n except Repository.DoesNotExist:\n log.warning(\n _(\"Could not find destination repo for {}. Skipping.\").format(\n src_repo[\"name\"]\n )\n )\n continue\n\n dispatch(\n import_repository_version,\n exclusive_resources=[dest_repo],\n args=[importer.pk, dest_repo.pk, src_repo[\"name\"], path],\n task_group=task_group,\n )\n\n task_group.finish()\n",
"path": "pulpcore/app/tasks/importer.py"
}
] | diff --git a/CHANGES/2247.bugfix b/CHANGES/2247.bugfix
new file mode 100644
index 0000000000..851f66d2af
--- /dev/null
+++ b/CHANGES/2247.bugfix
@@ -0,0 +1,4 @@
+PulpImporter now unpacks into the task-worker's working directory rather than /tmp. Unpacking
+large files into /tmp could cause the operation to fail, or even cause stability issues for
+Pulp instance, due to running /tmp out of space.
+
diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
index 173e8b4e41..1974e7f73a 100644
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -373,7 +373,7 @@ def validate_and_assemble(toc_filename):
)
CreatedResource.objects.create(content_object=the_import)
- with tempfile.TemporaryDirectory() as temp_dir:
+ with tempfile.TemporaryDirectory(dir=".") as temp_dir:
with tarfile.open(path, "r:gz") as tar:
tar.extractall(path=temp_dir)
|
pennersr__django-allauth-2978 | 0.46.0 Apple provider test broken
Hi! When trying to package 0.46.0 for Arch Linux I ran into failing tests which appear to be pyjwt related:
```
Creating test database for alias 'default'...
System check identified some issues:
WARNINGS:
openid.OpenIDNonce: (models.W042) Auto-created primary key used when not defining a primary key type, by default 'django.db.models.AutoField'.
HINT: Configure the DEFAULT_AUTO_FIELD setting or the AppConfig.default_auto_field attribute to point to a subclass of AutoField, e.g. 'django.db.models.BigAutoField'.
openid.OpenIDStore: (models.W042) Auto-created primary key used when not defining a primary key type, by default 'django.db.models.AutoField'.
HINT: Configure the DEFAULT_AUTO_FIELD setting or the AppConfig.default_auto_field attribute to point to a subclass of AutoField, e.g. 'django.db.models.BigAutoField'.
System check identified 2 issues (0 silenced).
....................................................................................................E.E..................................................................................................................................../build/python-django-allauth/src/python-django-allauth-0.46.0/allauth/socialaccount/
tests.py:87: UserWarning: Cannot test provider evernote, no oauth mock
warnings.warn("Cannot test provider %s, no oauth mock" % self.provider.id)
..................................................................................................................................................................................xx.........................................................................................../build/python-django-allauth/src/python-django-a
llauth-0.46.0/allauth/socialaccount/tests.py:87: UserWarning: Cannot test provider trello, no oauth mock
warnings.warn("Cannot test provider %s, no oauth mock" % self.provider.id)
./build/python-django-allauth/src/python-django-allauth-0.46.0/allauth/socialaccount/tests.py:55: UserWarning: Cannot test provider trello, no oauth mock
warnings.warn("Cannot test provider %s, no oauth mock" % self.provider.id)
......................................................................................
======================================================================
ERROR: test_apple_finish (allauth.socialaccount.providers.apple.tests.AppleTests)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/build/python-django-allauth/src/python-django-allauth-0.46.0/allauth/socialaccount/providers/apple/tests.py", line 231, in test_apple_finish
resp = self.login(self.get_mocked_response())
File "/build/python-django-allauth/src/python-django-allauth-0.46.0/allauth/socialaccount/providers/apple/tests.py", line 210, in login
resp = self.client.get(resp.url)
File "/usr/lib/python3.9/site-packages/django/test/client.py", line 742, in get
response = super().get(path, data=data, secure=secure, **extra)
File "/usr/lib/python3.9/site-packages/django/test/client.py", line 396, in get
return self.generic('GET', path, secure=secure, **{
File "/usr/lib/python3.9/site-packages/django/test/client.py", line 473, in generic
return self.request(**r)
File "/usr/lib/python3.9/site-packages/django/test/client.py", line 719, in request
self.check_exception(response)
File "/usr/lib/python3.9/site-packages/django/test/client.py", line 580, in check_exception
raise exc_value
File "/usr/lib/python3.9/site-packages/django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "/usr/lib/python3.9/site-packages/django/core/handlers/base.py", line 181, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/build/python-django-allauth/src/python-django-allauth-0.46.0/allauth/socialaccount/providers/oauth2/views.py", line 77, in view
return self.dispatch(request, *args, **kwargs)
File "/build/python-django-allauth/src/python-django-allauth-0.46.0/allauth/socialaccount/providers/oauth2/views.py", line 134, in dispatch
token = self.adapter.parse_token(access_token)
File "/build/python-django-allauth/src/python-django-allauth-0.46.0/allauth/socialaccount/providers/apple/views.py", line 92, in parse_token
identity_data = self.get_verified_identity_data(data["id_token"])
File "/build/python-django-allauth/src/python-django-allauth-0.46.0/allauth/socialaccount/providers/apple/views.py", line 67, in get_verified_identity_data
identity_data = jwt.decode(
TypeError: decode() got an unexpected keyword argument 'verify'
======================================================================
ERROR: test_login (allauth.socialaccount.providers.apple.tests.AppleTests)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/lib/python3.9/site-packages/django/test/utils.py", line 387, in inner
return func(*args, **kwargs)
File "/build/python-django-allauth/src/python-django-allauth-0.46.0/allauth/socialaccount/tests.py", line 167, in test_login
resp = self.login(
File "/build/python-django-allauth/src/python-django-allauth-0.46.0/allauth/socialaccount/providers/apple/tests.py", line 210, in login
resp = self.client.get(resp.url)
File "/usr/lib/python3.9/site-packages/django/test/client.py", line 742, in get
response = super().get(path, data=data, secure=secure, **extra)
File "/usr/lib/python3.9/site-packages/django/test/client.py", line 396, in get
return self.generic('GET', path, secure=secure, **{
File "/usr/lib/python3.9/site-packages/django/test/client.py", line 473, in generic
return self.request(**r)
File "/usr/lib/python3.9/site-packages/django/test/client.py", line 719, in request
self.check_exception(response)
File "/usr/lib/python3.9/site-packages/django/test/client.py", line 580, in check_exception
raise exc_value
File "/usr/lib/python3.9/site-packages/django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "/usr/lib/python3.9/site-packages/django/core/handlers/base.py", line 181, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/build/python-django-allauth/src/python-django-allauth-0.46.0/allauth/socialaccount/providers/oauth2/views.py", line 77, in view
return self.dispatch(request, *args, **kwargs)
File "/build/python-django-allauth/src/python-django-allauth-0.46.0/allauth/socialaccount/providers/oauth2/views.py", line 134, in dispatch
token = self.adapter.parse_token(access_token)
File "/build/python-django-allauth/src/python-django-allauth-0.46.0/allauth/socialaccount/providers/apple/views.py", line 92, in parse_token
identity_data = self.get_verified_identity_data(data["id_token"])
File "/build/python-django-allauth/src/python-django-allauth-0.46.0/allauth/socialaccount/providers/apple/views.py", line 67, in get_verified_identity_data
identity_data = jwt.decode(
TypeError: decode() got an unexpected keyword argument 'verify'
----------------------------------------------------------------------
Ran 593 tests in 8.129s
FAILED (errors=2, expected failures=2)
Destroying test database for alias 'default'...
test_account_refresh_token_saved_next_login (allauth.socialaccount.providers.edx.tests.EdxTests)
test_account_refresh_token_saved_next_login (allauth.socialaccount.providers.edx.tests.EdxTests)
test_account_tokens (allauth.socialaccount.providers.edx.tests.EdxTests)
test_login (allauth.socialaccount.providers.edx.tests.EdxTests)
```
On Arch Linux we currently have:
* pyjwt 2.2.0
* requests 2.26.0
* requests-oauthlib 1.3.0
* openid 3.2.0
* django 3.2.9
| [
{
"content": "import json\nimport requests\nfrom datetime import timedelta\n\nfrom django.http import HttpResponseNotAllowed, HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.http import urlencode\nfrom django.views.decorators.csrf import csrf_exempt\n\nimport jwt\n\nfrom allauth.socialaccount.adapter import get_adapter\nfrom allauth.socialaccount.models import SocialToken\nfrom allauth.socialaccount.providers.oauth2.client import OAuth2Error\nfrom allauth.socialaccount.providers.oauth2.views import (\n OAuth2Adapter,\n OAuth2CallbackView,\n OAuth2LoginView,\n)\nfrom allauth.utils import build_absolute_uri, get_request_param\n\nfrom .apple_session import add_apple_session, persist_apple_session\nfrom .client import AppleOAuth2Client\nfrom .provider import AppleProvider\n\n\nclass AppleOAuth2Adapter(OAuth2Adapter):\n client_class = AppleOAuth2Client\n provider_id = AppleProvider.id\n access_token_url = \"https://appleid.apple.com/auth/token\"\n authorize_url = \"https://appleid.apple.com/auth/authorize\"\n public_key_url = \"https://appleid.apple.com/auth/keys\"\n\n def _get_apple_public_key(self, kid):\n response = requests.get(self.public_key_url)\n response.raise_for_status()\n try:\n data = response.json()\n except json.JSONDecodeError as e:\n raise OAuth2Error(\"Error retrieving apple public key.\") from e\n\n for d in data[\"keys\"]:\n if d[\"kid\"] == kid:\n return d\n\n def get_public_key(self, id_token):\n \"\"\"\n Get the public key which matches the `kid` in the id_token header.\n \"\"\"\n kid = jwt.get_unverified_header(id_token)[\"kid\"]\n apple_public_key = self._get_apple_public_key(kid=kid)\n\n public_key = jwt.algorithms.RSAAlgorithm.from_jwk(json.dumps(apple_public_key))\n return public_key\n\n def get_client_id(self, provider):\n app = get_adapter().get_app(request=None, provider=self.provider_id)\n return [aud.strip() for aud in app.client_id.split(\",\")]\n\n def get_verified_identity_data(self, id_token):\n provider = self.get_provider()\n allowed_auds = self.get_client_id(provider)\n\n try:\n public_key = self.get_public_key(id_token)\n identity_data = jwt.decode(\n id_token,\n public_key,\n algorithms=[\"RS256\"],\n verify=True,\n audience=allowed_auds,\n issuer=\"https://appleid.apple.com\",\n )\n return identity_data\n\n except jwt.PyJWTError as e:\n raise OAuth2Error(\"Invalid id_token\") from e\n\n def parse_token(self, data):\n token = SocialToken(\n token=data[\"access_token\"],\n )\n token.token_secret = data.get(\"refresh_token\", \"\")\n\n expires_in = data.get(self.expires_in_key)\n if expires_in:\n token.expires_at = timezone.now() + timedelta(seconds=int(expires_in))\n\n # `user_data` is a big flat dictionary with the parsed JWT claims\n # access_tokens, and user info from the apple post.\n identity_data = self.get_verified_identity_data(data[\"id_token\"])\n token.user_data = {**data, **identity_data}\n\n return token\n\n def complete_login(self, request, app, token, **kwargs):\n extra_data = token.user_data\n login = self.get_provider().sociallogin_from_response(\n request=request, response=extra_data\n )\n login.state[\"id_token\"] = token.user_data\n\n # We can safely remove the apple login session now\n # Note: The cookie will remain, but it's set to delete on browser close\n add_apple_session(request)\n request.apple_login_session.delete()\n\n return login\n\n def get_user_scope_data(self, request):\n user_scope_data = request.apple_login_session.get(\"user\", \"\")\n try:\n return json.loads(user_scope_data)\n except json.JSONDecodeError:\n # We do not care much about user scope data as it maybe blank\n # so return blank dictionary instead\n return {}\n\n def get_access_token_data(self, request, app, client):\n \"\"\"We need to gather the info from the apple specific login\"\"\"\n add_apple_session(request)\n\n # Exchange `code`\n code = get_request_param(request, \"code\")\n access_token_data = client.get_access_token(code)\n\n return {\n **access_token_data,\n **self.get_user_scope_data(request),\n \"id_token\": request.apple_login_session.get(\"id_token\"),\n }\n\n\n@csrf_exempt\ndef apple_post_callback(request, finish_endpoint_name=\"apple_finish_callback\"):\n \"\"\"\n Apple uses a `form_post` response type, which due to\n CORS/Samesite-cookie rules means this request cannot access\n the request since the session cookie is unavailable.\n\n We work around this by storing the apple response in a\n separate, temporary session and redirecting to a more normal\n oauth flow.\n\n args:\n finish_endpoint_name (str): The name of a defined URL, which can be\n overridden in your url configuration if you have more than one\n callback endpoint.\n \"\"\"\n if request.method != \"POST\":\n raise HttpResponseNotAllowed([\"POST\"])\n add_apple_session(request)\n\n # Add regular OAuth2 params to the URL - reduces the overrides required\n keys_to_put_in_url = [\"code\", \"state\", \"error\"]\n url_params = {}\n for key in keys_to_put_in_url:\n value = get_request_param(request, key, \"\")\n if value:\n url_params[key] = value\n\n # Add other params to the apple_login_session\n keys_to_save_to_session = [\"user\", \"id_token\"]\n for key in keys_to_save_to_session:\n request.apple_login_session[key] = get_request_param(request, key, \"\")\n\n url = build_absolute_uri(request, reverse(finish_endpoint_name))\n response = HttpResponseRedirect(\n \"{url}?{query}\".format(url=url, query=urlencode(url_params))\n )\n persist_apple_session(request, response)\n return response\n\n\noauth2_login = OAuth2LoginView.adapter_view(AppleOAuth2Adapter)\noauth2_callback = apple_post_callback\noauth2_finish_login = OAuth2CallbackView.adapter_view(AppleOAuth2Adapter)\n",
"path": "allauth/socialaccount/providers/apple/views.py"
}
] | [
{
"content": "import json\nimport requests\nfrom datetime import timedelta\n\nfrom django.http import HttpResponseNotAllowed, HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.http import urlencode\nfrom django.views.decorators.csrf import csrf_exempt\n\nimport jwt\n\nfrom allauth.socialaccount.adapter import get_adapter\nfrom allauth.socialaccount.models import SocialToken\nfrom allauth.socialaccount.providers.oauth2.client import OAuth2Error\nfrom allauth.socialaccount.providers.oauth2.views import (\n OAuth2Adapter,\n OAuth2CallbackView,\n OAuth2LoginView,\n)\nfrom allauth.utils import build_absolute_uri, get_request_param\n\nfrom .apple_session import add_apple_session, persist_apple_session\nfrom .client import AppleOAuth2Client\nfrom .provider import AppleProvider\n\n\nclass AppleOAuth2Adapter(OAuth2Adapter):\n client_class = AppleOAuth2Client\n provider_id = AppleProvider.id\n access_token_url = \"https://appleid.apple.com/auth/token\"\n authorize_url = \"https://appleid.apple.com/auth/authorize\"\n public_key_url = \"https://appleid.apple.com/auth/keys\"\n\n def _get_apple_public_key(self, kid):\n response = requests.get(self.public_key_url)\n response.raise_for_status()\n try:\n data = response.json()\n except json.JSONDecodeError as e:\n raise OAuth2Error(\"Error retrieving apple public key.\") from e\n\n for d in data[\"keys\"]:\n if d[\"kid\"] == kid:\n return d\n\n def get_public_key(self, id_token):\n \"\"\"\n Get the public key which matches the `kid` in the id_token header.\n \"\"\"\n kid = jwt.get_unverified_header(id_token)[\"kid\"]\n apple_public_key = self._get_apple_public_key(kid=kid)\n\n public_key = jwt.algorithms.RSAAlgorithm.from_jwk(json.dumps(apple_public_key))\n return public_key\n\n def get_client_id(self, provider):\n app = get_adapter().get_app(request=None, provider=self.provider_id)\n return [aud.strip() for aud in app.client_id.split(\",\")]\n\n def get_verified_identity_data(self, id_token):\n provider = self.get_provider()\n allowed_auds = self.get_client_id(provider)\n\n try:\n public_key = self.get_public_key(id_token)\n identity_data = jwt.decode(\n id_token,\n public_key,\n algorithms=[\"RS256\"],\n audience=allowed_auds,\n issuer=\"https://appleid.apple.com\",\n )\n return identity_data\n\n except jwt.PyJWTError as e:\n raise OAuth2Error(\"Invalid id_token\") from e\n\n def parse_token(self, data):\n token = SocialToken(\n token=data[\"access_token\"],\n )\n token.token_secret = data.get(\"refresh_token\", \"\")\n\n expires_in = data.get(self.expires_in_key)\n if expires_in:\n token.expires_at = timezone.now() + timedelta(seconds=int(expires_in))\n\n # `user_data` is a big flat dictionary with the parsed JWT claims\n # access_tokens, and user info from the apple post.\n identity_data = self.get_verified_identity_data(data[\"id_token\"])\n token.user_data = {**data, **identity_data}\n\n return token\n\n def complete_login(self, request, app, token, **kwargs):\n extra_data = token.user_data\n login = self.get_provider().sociallogin_from_response(\n request=request, response=extra_data\n )\n login.state[\"id_token\"] = token.user_data\n\n # We can safely remove the apple login session now\n # Note: The cookie will remain, but it's set to delete on browser close\n add_apple_session(request)\n request.apple_login_session.delete()\n\n return login\n\n def get_user_scope_data(self, request):\n user_scope_data = request.apple_login_session.get(\"user\", \"\")\n try:\n return json.loads(user_scope_data)\n except json.JSONDecodeError:\n # We do not care much about user scope data as it maybe blank\n # so return blank dictionary instead\n return {}\n\n def get_access_token_data(self, request, app, client):\n \"\"\"We need to gather the info from the apple specific login\"\"\"\n add_apple_session(request)\n\n # Exchange `code`\n code = get_request_param(request, \"code\")\n access_token_data = client.get_access_token(code)\n\n return {\n **access_token_data,\n **self.get_user_scope_data(request),\n \"id_token\": request.apple_login_session.get(\"id_token\"),\n }\n\n\n@csrf_exempt\ndef apple_post_callback(request, finish_endpoint_name=\"apple_finish_callback\"):\n \"\"\"\n Apple uses a `form_post` response type, which due to\n CORS/Samesite-cookie rules means this request cannot access\n the request since the session cookie is unavailable.\n\n We work around this by storing the apple response in a\n separate, temporary session and redirecting to a more normal\n oauth flow.\n\n args:\n finish_endpoint_name (str): The name of a defined URL, which can be\n overridden in your url configuration if you have more than one\n callback endpoint.\n \"\"\"\n if request.method != \"POST\":\n raise HttpResponseNotAllowed([\"POST\"])\n add_apple_session(request)\n\n # Add regular OAuth2 params to the URL - reduces the overrides required\n keys_to_put_in_url = [\"code\", \"state\", \"error\"]\n url_params = {}\n for key in keys_to_put_in_url:\n value = get_request_param(request, key, \"\")\n if value:\n url_params[key] = value\n\n # Add other params to the apple_login_session\n keys_to_save_to_session = [\"user\", \"id_token\"]\n for key in keys_to_save_to_session:\n request.apple_login_session[key] = get_request_param(request, key, \"\")\n\n url = build_absolute_uri(request, reverse(finish_endpoint_name))\n response = HttpResponseRedirect(\n \"{url}?{query}\".format(url=url, query=urlencode(url_params))\n )\n persist_apple_session(request, response)\n return response\n\n\noauth2_login = OAuth2LoginView.adapter_view(AppleOAuth2Adapter)\noauth2_callback = apple_post_callback\noauth2_finish_login = OAuth2CallbackView.adapter_view(AppleOAuth2Adapter)\n",
"path": "allauth/socialaccount/providers/apple/views.py"
}
] | diff --git a/allauth/socialaccount/providers/apple/views.py b/allauth/socialaccount/providers/apple/views.py
index 873ba110fc..82a62348ee 100644
--- a/allauth/socialaccount/providers/apple/views.py
+++ b/allauth/socialaccount/providers/apple/views.py
@@ -68,7 +68,6 @@ def get_verified_identity_data(self, id_token):
id_token,
public_key,
algorithms=["RS256"],
- verify=True,
audience=allowed_auds,
issuer="https://appleid.apple.com",
)
|
Textualize__rich-2225 | Missing whitespace in traceback
In tracebacks sometimes there is a missing blank line between traceback frames. Not sure what causes it.
| [
{
"content": "from __future__ import absolute_import\n\nimport os\nimport platform\nimport sys\nfrom dataclasses import dataclass, field\nfrom traceback import walk_tb\nfrom types import ModuleType, TracebackType\nfrom typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Type, Union\n\nfrom pygments.lexers import guess_lexer_for_filename\nfrom pygments.token import Comment, Keyword, Name, Number, Operator, String\nfrom pygments.token import Text as TextToken\nfrom pygments.token import Token\nfrom pygments.util import ClassNotFound\n\nfrom . import pretty\nfrom ._loop import loop_last\nfrom .columns import Columns\nfrom .console import Console, ConsoleOptions, ConsoleRenderable, RenderResult, group\nfrom .constrain import Constrain\nfrom .highlighter import RegexHighlighter, ReprHighlighter\nfrom .panel import Panel\nfrom .scope import render_scope\nfrom .style import Style\nfrom .syntax import Syntax\nfrom .text import Text\nfrom .theme import Theme\n\nWINDOWS = platform.system() == \"Windows\"\n\nLOCALS_MAX_LENGTH = 10\nLOCALS_MAX_STRING = 80\n\n\ndef install(\n *,\n console: Optional[Console] = None,\n width: Optional[int] = 100,\n extra_lines: int = 3,\n theme: Optional[str] = None,\n word_wrap: bool = False,\n show_locals: bool = False,\n indent_guides: bool = True,\n suppress: Iterable[Union[str, ModuleType]] = (),\n max_frames: int = 100,\n) -> Callable[[Type[BaseException], BaseException, Optional[TracebackType]], Any]:\n \"\"\"Install a rich traceback handler.\n\n Once installed, any tracebacks will be printed with syntax highlighting and rich formatting.\n\n\n Args:\n console (Optional[Console], optional): Console to write exception to. Default uses internal Console instance.\n width (Optional[int], optional): Width (in characters) of traceback. Defaults to 100.\n extra_lines (int, optional): Extra lines of code. Defaults to 3.\n theme (Optional[str], optional): Pygments theme to use in traceback. Defaults to ``None`` which will pick\n a theme appropriate for the platform.\n word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.\n show_locals (bool, optional): Enable display of local variables. Defaults to False.\n indent_guides (bool, optional): Enable indent guides in code and locals. Defaults to True.\n suppress (Sequence[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.\n\n Returns:\n Callable: The previous exception handler that was replaced.\n\n \"\"\"\n traceback_console = Console(file=sys.stderr) if console is None else console\n\n def excepthook(\n type_: Type[BaseException],\n value: BaseException,\n traceback: Optional[TracebackType],\n ) -> None:\n traceback_console.print(\n Traceback.from_exception(\n type_,\n value,\n traceback,\n width=width,\n extra_lines=extra_lines,\n theme=theme,\n word_wrap=word_wrap,\n show_locals=show_locals,\n indent_guides=indent_guides,\n suppress=suppress,\n max_frames=max_frames,\n )\n )\n\n def ipy_excepthook_closure(ip: Any) -> None: # pragma: no cover\n tb_data = {} # store information about showtraceback call\n default_showtraceback = ip.showtraceback # keep reference of default traceback\n\n def ipy_show_traceback(*args: Any, **kwargs: Any) -> None:\n \"\"\"wrap the default ip.showtraceback to store info for ip._showtraceback\"\"\"\n nonlocal tb_data\n tb_data = kwargs\n default_showtraceback(*args, **kwargs)\n\n def ipy_display_traceback(\n *args: Any, is_syntax: bool = False, **kwargs: Any\n ) -> None:\n \"\"\"Internally called traceback from ip._showtraceback\"\"\"\n nonlocal tb_data\n exc_tuple = ip._get_exc_info()\n\n # do not display trace on syntax error\n tb: Optional[TracebackType] = None if is_syntax else exc_tuple[2]\n\n # determine correct tb_offset\n compiled = tb_data.get(\"running_compiled_code\", False)\n tb_offset = tb_data.get(\"tb_offset\", 1 if compiled else 0)\n # remove ipython internal frames from trace with tb_offset\n for _ in range(tb_offset):\n if tb is None:\n break\n tb = tb.tb_next\n\n excepthook(exc_tuple[0], exc_tuple[1], tb)\n tb_data = {} # clear data upon usage\n\n # replace _showtraceback instead of showtraceback to allow ipython features such as debugging to work\n # this is also what the ipython docs recommends to modify when subclassing InteractiveShell\n ip._showtraceback = ipy_display_traceback\n # add wrapper to capture tb_data\n ip.showtraceback = ipy_show_traceback\n ip.showsyntaxerror = lambda *args, **kwargs: ipy_display_traceback(\n *args, is_syntax=True, **kwargs\n )\n\n try: # pragma: no cover\n # if within ipython, use customized traceback\n ip = get_ipython() # type: ignore[name-defined]\n ipy_excepthook_closure(ip)\n return sys.excepthook\n except Exception:\n # otherwise use default system hook\n old_excepthook = sys.excepthook\n sys.excepthook = excepthook\n return old_excepthook\n\n\n@dataclass\nclass Frame:\n filename: str\n lineno: int\n name: str\n line: str = \"\"\n locals: Optional[Dict[str, pretty.Node]] = None\n\n\n@dataclass\nclass _SyntaxError:\n offset: int\n filename: str\n line: str\n lineno: int\n msg: str\n\n\n@dataclass\nclass Stack:\n exc_type: str\n exc_value: str\n syntax_error: Optional[_SyntaxError] = None\n is_cause: bool = False\n frames: List[Frame] = field(default_factory=list)\n\n\n@dataclass\nclass Trace:\n stacks: List[Stack]\n\n\nclass PathHighlighter(RegexHighlighter):\n highlights = [r\"(?P<dim>.*/)(?P<bold>.+)\"]\n\n\nclass Traceback:\n \"\"\"A Console renderable that renders a traceback.\n\n Args:\n trace (Trace, optional): A `Trace` object produced from `extract`. Defaults to None, which uses\n the last exception.\n width (Optional[int], optional): Number of characters used to traceback. Defaults to 100.\n extra_lines (int, optional): Additional lines of code to render. Defaults to 3.\n theme (str, optional): Override pygments theme used in traceback.\n word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.\n show_locals (bool, optional): Enable display of local variables. Defaults to False.\n indent_guides (bool, optional): Enable indent guides in code and locals. Defaults to True.\n locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.\n Defaults to 10.\n locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80.\n suppress (Sequence[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.\n max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.\n\n \"\"\"\n\n LEXERS = {\n \"\": \"text\",\n \".py\": \"python\",\n \".pxd\": \"cython\",\n \".pyx\": \"cython\",\n \".pxi\": \"pyrex\",\n }\n\n def __init__(\n self,\n trace: Optional[Trace] = None,\n width: Optional[int] = 100,\n extra_lines: int = 3,\n theme: Optional[str] = None,\n word_wrap: bool = False,\n show_locals: bool = False,\n indent_guides: bool = True,\n locals_max_length: int = LOCALS_MAX_LENGTH,\n locals_max_string: int = LOCALS_MAX_STRING,\n suppress: Iterable[Union[str, ModuleType]] = (),\n max_frames: int = 100,\n ):\n if trace is None:\n exc_type, exc_value, traceback = sys.exc_info()\n if exc_type is None or exc_value is None or traceback is None:\n raise ValueError(\n \"Value for 'trace' required if not called in except: block\"\n )\n trace = self.extract(\n exc_type, exc_value, traceback, show_locals=show_locals\n )\n self.trace = trace\n self.width = width\n self.extra_lines = extra_lines\n self.theme = Syntax.get_theme(theme or \"ansi_dark\")\n self.word_wrap = word_wrap\n self.show_locals = show_locals\n self.indent_guides = indent_guides\n self.locals_max_length = locals_max_length\n self.locals_max_string = locals_max_string\n\n self.suppress: Sequence[str] = []\n for suppress_entity in suppress:\n if not isinstance(suppress_entity, str):\n assert (\n suppress_entity.__file__ is not None\n ), f\"{suppress_entity!r} must be a module with '__file__' attribute\"\n path = os.path.dirname(suppress_entity.__file__)\n else:\n path = suppress_entity\n path = os.path.normpath(os.path.abspath(path))\n self.suppress.append(path)\n self.max_frames = max(4, max_frames) if max_frames > 0 else 0\n\n @classmethod\n def from_exception(\n cls,\n exc_type: Type[Any],\n exc_value: BaseException,\n traceback: Optional[TracebackType],\n width: Optional[int] = 100,\n extra_lines: int = 3,\n theme: Optional[str] = None,\n word_wrap: bool = False,\n show_locals: bool = False,\n indent_guides: bool = True,\n locals_max_length: int = LOCALS_MAX_LENGTH,\n locals_max_string: int = LOCALS_MAX_STRING,\n suppress: Iterable[Union[str, ModuleType]] = (),\n max_frames: int = 100,\n ) -> \"Traceback\":\n \"\"\"Create a traceback from exception info\n\n Args:\n exc_type (Type[BaseException]): Exception type.\n exc_value (BaseException): Exception value.\n traceback (TracebackType): Python Traceback object.\n width (Optional[int], optional): Number of characters used to traceback. Defaults to 100.\n extra_lines (int, optional): Additional lines of code to render. Defaults to 3.\n theme (str, optional): Override pygments theme used in traceback.\n word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.\n show_locals (bool, optional): Enable display of local variables. Defaults to False.\n indent_guides (bool, optional): Enable indent guides in code and locals. Defaults to True.\n locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.\n Defaults to 10.\n locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80.\n suppress (Iterable[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.\n max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.\n\n Returns:\n Traceback: A Traceback instance that may be printed.\n \"\"\"\n rich_traceback = cls.extract(\n exc_type, exc_value, traceback, show_locals=show_locals\n )\n return cls(\n rich_traceback,\n width=width,\n extra_lines=extra_lines,\n theme=theme,\n word_wrap=word_wrap,\n show_locals=show_locals,\n indent_guides=indent_guides,\n locals_max_length=locals_max_length,\n locals_max_string=locals_max_string,\n suppress=suppress,\n max_frames=max_frames,\n )\n\n @classmethod\n def extract(\n cls,\n exc_type: Type[BaseException],\n exc_value: BaseException,\n traceback: Optional[TracebackType],\n show_locals: bool = False,\n locals_max_length: int = LOCALS_MAX_LENGTH,\n locals_max_string: int = LOCALS_MAX_STRING,\n ) -> Trace:\n \"\"\"Extract traceback information.\n\n Args:\n exc_type (Type[BaseException]): Exception type.\n exc_value (BaseException): Exception value.\n traceback (TracebackType): Python Traceback object.\n show_locals (bool, optional): Enable display of local variables. Defaults to False.\n locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.\n Defaults to 10.\n locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80.\n\n Returns:\n Trace: A Trace instance which you can use to construct a `Traceback`.\n \"\"\"\n\n stacks: List[Stack] = []\n is_cause = False\n\n from rich import _IMPORT_CWD\n\n def safe_str(_object: Any) -> str:\n \"\"\"Don't allow exceptions from __str__ to propegate.\"\"\"\n try:\n return str(_object)\n except Exception:\n return \"<exception str() failed>\"\n\n while True:\n stack = Stack(\n exc_type=safe_str(exc_type.__name__),\n exc_value=safe_str(exc_value),\n is_cause=is_cause,\n )\n\n if isinstance(exc_value, SyntaxError):\n stack.syntax_error = _SyntaxError(\n offset=exc_value.offset or 0,\n filename=exc_value.filename or \"?\",\n lineno=exc_value.lineno or 0,\n line=exc_value.text or \"\",\n msg=exc_value.msg,\n )\n\n stacks.append(stack)\n append = stack.frames.append\n\n for frame_summary, line_no in walk_tb(traceback):\n filename = frame_summary.f_code.co_filename\n if filename and not filename.startswith(\"<\"):\n if not os.path.isabs(filename):\n filename = os.path.join(_IMPORT_CWD, filename)\n frame = Frame(\n filename=filename or \"?\",\n lineno=line_no,\n name=frame_summary.f_code.co_name,\n locals={\n key: pretty.traverse(\n value,\n max_length=locals_max_length,\n max_string=locals_max_string,\n )\n for key, value in frame_summary.f_locals.items()\n }\n if show_locals\n else None,\n )\n append(frame)\n if \"_rich_traceback_guard\" in frame_summary.f_locals:\n del stack.frames[:]\n\n cause = getattr(exc_value, \"__cause__\", None)\n if cause and cause.__traceback__:\n exc_type = cause.__class__\n exc_value = cause\n traceback = cause.__traceback__\n is_cause = True\n continue\n\n cause = exc_value.__context__\n if (\n cause\n and cause.__traceback__\n and not getattr(exc_value, \"__suppress_context__\", False)\n ):\n exc_type = cause.__class__\n exc_value = cause\n traceback = cause.__traceback__\n is_cause = False\n continue\n # No cover, code is reached but coverage doesn't recognize it.\n break # pragma: no cover\n\n trace = Trace(stacks=stacks)\n return trace\n\n def __rich_console__(\n self, console: Console, options: ConsoleOptions\n ) -> RenderResult:\n theme = self.theme\n background_style = theme.get_background_style()\n token_style = theme.get_style_for_token\n\n traceback_theme = Theme(\n {\n \"pretty\": token_style(TextToken),\n \"pygments.text\": token_style(Token),\n \"pygments.string\": token_style(String),\n \"pygments.function\": token_style(Name.Function),\n \"pygments.number\": token_style(Number),\n \"repr.indent\": token_style(Comment) + Style(dim=True),\n \"repr.str\": token_style(String),\n \"repr.brace\": token_style(TextToken) + Style(bold=True),\n \"repr.number\": token_style(Number),\n \"repr.bool_true\": token_style(Keyword.Constant),\n \"repr.bool_false\": token_style(Keyword.Constant),\n \"repr.none\": token_style(Keyword.Constant),\n \"scope.border\": token_style(String.Delimiter),\n \"scope.equals\": token_style(Operator),\n \"scope.key\": token_style(Name),\n \"scope.key.special\": token_style(Name.Constant) + Style(dim=True),\n },\n inherit=False,\n )\n\n highlighter = ReprHighlighter()\n for last, stack in loop_last(reversed(self.trace.stacks)):\n if stack.frames:\n stack_renderable: ConsoleRenderable = Panel(\n self._render_stack(stack),\n title=\"[traceback.title]Traceback [dim](most recent call last)\",\n style=background_style,\n border_style=\"traceback.border\",\n expand=True,\n padding=(0, 1),\n )\n stack_renderable = Constrain(stack_renderable, self.width)\n with console.use_theme(traceback_theme):\n yield stack_renderable\n if stack.syntax_error is not None:\n with console.use_theme(traceback_theme):\n yield Constrain(\n Panel(\n self._render_syntax_error(stack.syntax_error),\n style=background_style,\n border_style=\"traceback.border.syntax_error\",\n expand=True,\n padding=(0, 1),\n width=self.width,\n ),\n self.width,\n )\n yield Text.assemble(\n (f\"{stack.exc_type}: \", \"traceback.exc_type\"),\n highlighter(stack.syntax_error.msg),\n )\n elif stack.exc_value:\n yield Text.assemble(\n (f\"{stack.exc_type}: \", \"traceback.exc_type\"),\n highlighter(stack.exc_value),\n )\n else:\n yield Text.assemble((f\"{stack.exc_type}\", \"traceback.exc_type\"))\n\n if not last:\n if stack.is_cause:\n yield Text.from_markup(\n \"\\n[i]The above exception was the direct cause of the following exception:\\n\",\n )\n else:\n yield Text.from_markup(\n \"\\n[i]During handling of the above exception, another exception occurred:\\n\",\n )\n\n @group()\n def _render_syntax_error(self, syntax_error: _SyntaxError) -> RenderResult:\n highlighter = ReprHighlighter()\n path_highlighter = PathHighlighter()\n if syntax_error.filename != \"<stdin>\":\n text = Text.assemble(\n (f\" {syntax_error.filename}\", \"pygments.string\"),\n (\":\", \"pygments.text\"),\n (str(syntax_error.lineno), \"pygments.number\"),\n style=\"pygments.text\",\n )\n yield path_highlighter(text)\n syntax_error_text = highlighter(syntax_error.line.rstrip())\n syntax_error_text.no_wrap = True\n offset = min(syntax_error.offset - 1, len(syntax_error_text))\n syntax_error_text.stylize(\"bold underline\", offset, offset)\n syntax_error_text += Text.from_markup(\n \"\\n\" + \" \" * offset + \"[traceback.offset]▲[/]\",\n style=\"pygments.text\",\n )\n yield syntax_error_text\n\n @classmethod\n def _guess_lexer(cls, filename: str, code: str) -> str:\n ext = os.path.splitext(filename)[-1]\n if not ext:\n # No extension, look at first line to see if it is a hashbang\n # Note, this is an educated guess and not a guarantee\n # If it fails, the only downside is that the code is highlighted strangely\n new_line_index = code.index(\"\\n\")\n first_line = code[:new_line_index] if new_line_index != -1 else code\n if first_line.startswith(\"#!\") and \"python\" in first_line.lower():\n return \"python\"\n try:\n return cls.LEXERS.get(ext) or guess_lexer_for_filename(filename, code).name\n except ClassNotFound:\n return \"text\"\n\n @group()\n def _render_stack(self, stack: Stack) -> RenderResult:\n path_highlighter = PathHighlighter()\n theme = self.theme\n code_cache: Dict[str, str] = {}\n\n def read_code(filename: str) -> str:\n \"\"\"Read files, and cache results on filename.\n\n Args:\n filename (str): Filename to read\n\n Returns:\n str: Contents of file\n \"\"\"\n code = code_cache.get(filename)\n if code is None:\n with open(\n filename, \"rt\", encoding=\"utf-8\", errors=\"replace\"\n ) as code_file:\n code = code_file.read()\n code_cache[filename] = code\n return code\n\n def render_locals(frame: Frame) -> Iterable[ConsoleRenderable]:\n if frame.locals:\n yield render_scope(\n frame.locals,\n title=\"locals\",\n indent_guides=self.indent_guides,\n max_length=self.locals_max_length,\n max_string=self.locals_max_string,\n )\n\n exclude_frames: Optional[range] = None\n if self.max_frames != 0:\n exclude_frames = range(\n self.max_frames // 2,\n len(stack.frames) - self.max_frames // 2,\n )\n\n excluded = False\n for frame_index, frame in enumerate(stack.frames):\n\n if exclude_frames and frame_index in exclude_frames:\n excluded = True\n continue\n\n if excluded:\n assert exclude_frames is not None\n yield Text(\n f\"\\n... {len(exclude_frames)} frames hidden ...\",\n justify=\"center\",\n style=\"traceback.error\",\n )\n excluded = False\n\n first = frame_index == 1\n frame_filename = frame.filename\n suppressed = any(frame_filename.startswith(path) for path in self.suppress)\n\n text = Text.assemble(\n path_highlighter(Text(frame.filename, style=\"pygments.string\")),\n (\":\", \"pygments.text\"),\n (str(frame.lineno), \"pygments.number\"),\n \" in \",\n (frame.name, \"pygments.function\"),\n style=\"pygments.text\",\n )\n if not frame.filename.startswith(\"<\") and not first:\n yield \"\"\n yield text\n if frame.filename.startswith(\"<\"):\n yield from render_locals(frame)\n continue\n if not suppressed:\n try:\n code = read_code(frame.filename)\n lexer_name = self._guess_lexer(frame.filename, code)\n syntax = Syntax(\n code,\n lexer_name,\n theme=theme,\n line_numbers=True,\n line_range=(\n frame.lineno - self.extra_lines,\n frame.lineno + self.extra_lines,\n ),\n highlight_lines={frame.lineno},\n word_wrap=self.word_wrap,\n code_width=88,\n indent_guides=self.indent_guides,\n dedent=False,\n )\n yield \"\"\n except Exception as error:\n yield Text.assemble(\n (f\"\\n{error}\", \"traceback.error\"),\n )\n else:\n yield (\n Columns(\n [\n syntax,\n *render_locals(frame),\n ],\n padding=1,\n )\n if frame.locals\n else syntax\n )\n\n\nif __name__ == \"__main__\": # pragma: no cover\n\n from .console import Console\n\n console = Console()\n import sys\n\n def bar(a: Any) -> None: # 这是对亚洲语言支持的测试。面对模棱两可的想法,拒绝猜测的诱惑\n one = 1\n print(one / a)\n\n def foo(a: Any) -> None:\n _rich_traceback_guard = True\n zed = {\n \"characters\": {\n \"Paul Atreides\",\n \"Vladimir Harkonnen\",\n \"Thufir Hawat\",\n \"Duncan Idaho\",\n },\n \"atomic_types\": (None, False, True),\n }\n bar(a)\n\n def error() -> None:\n\n try:\n try:\n foo(0)\n except:\n slfkjsldkfj # type: ignore[name-defined]\n except:\n console.print_exception(show_locals=True)\n\n error()\n",
"path": "rich/traceback.py"
}
] | [
{
"content": "from __future__ import absolute_import\n\nimport os\nimport platform\nimport sys\nfrom dataclasses import dataclass, field\nfrom traceback import walk_tb\nfrom types import ModuleType, TracebackType\nfrom typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Type, Union\n\nfrom pygments.lexers import guess_lexer_for_filename\nfrom pygments.token import Comment, Keyword, Name, Number, Operator, String\nfrom pygments.token import Text as TextToken\nfrom pygments.token import Token\nfrom pygments.util import ClassNotFound\n\nfrom . import pretty\nfrom ._loop import loop_last\nfrom .columns import Columns\nfrom .console import Console, ConsoleOptions, ConsoleRenderable, RenderResult, group\nfrom .constrain import Constrain\nfrom .highlighter import RegexHighlighter, ReprHighlighter\nfrom .panel import Panel\nfrom .scope import render_scope\nfrom .style import Style\nfrom .syntax import Syntax\nfrom .text import Text\nfrom .theme import Theme\n\nWINDOWS = platform.system() == \"Windows\"\n\nLOCALS_MAX_LENGTH = 10\nLOCALS_MAX_STRING = 80\n\n\ndef install(\n *,\n console: Optional[Console] = None,\n width: Optional[int] = 100,\n extra_lines: int = 3,\n theme: Optional[str] = None,\n word_wrap: bool = False,\n show_locals: bool = False,\n indent_guides: bool = True,\n suppress: Iterable[Union[str, ModuleType]] = (),\n max_frames: int = 100,\n) -> Callable[[Type[BaseException], BaseException, Optional[TracebackType]], Any]:\n \"\"\"Install a rich traceback handler.\n\n Once installed, any tracebacks will be printed with syntax highlighting and rich formatting.\n\n\n Args:\n console (Optional[Console], optional): Console to write exception to. Default uses internal Console instance.\n width (Optional[int], optional): Width (in characters) of traceback. Defaults to 100.\n extra_lines (int, optional): Extra lines of code. Defaults to 3.\n theme (Optional[str], optional): Pygments theme to use in traceback. Defaults to ``None`` which will pick\n a theme appropriate for the platform.\n word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.\n show_locals (bool, optional): Enable display of local variables. Defaults to False.\n indent_guides (bool, optional): Enable indent guides in code and locals. Defaults to True.\n suppress (Sequence[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.\n\n Returns:\n Callable: The previous exception handler that was replaced.\n\n \"\"\"\n traceback_console = Console(file=sys.stderr) if console is None else console\n\n def excepthook(\n type_: Type[BaseException],\n value: BaseException,\n traceback: Optional[TracebackType],\n ) -> None:\n traceback_console.print(\n Traceback.from_exception(\n type_,\n value,\n traceback,\n width=width,\n extra_lines=extra_lines,\n theme=theme,\n word_wrap=word_wrap,\n show_locals=show_locals,\n indent_guides=indent_guides,\n suppress=suppress,\n max_frames=max_frames,\n )\n )\n\n def ipy_excepthook_closure(ip: Any) -> None: # pragma: no cover\n tb_data = {} # store information about showtraceback call\n default_showtraceback = ip.showtraceback # keep reference of default traceback\n\n def ipy_show_traceback(*args: Any, **kwargs: Any) -> None:\n \"\"\"wrap the default ip.showtraceback to store info for ip._showtraceback\"\"\"\n nonlocal tb_data\n tb_data = kwargs\n default_showtraceback(*args, **kwargs)\n\n def ipy_display_traceback(\n *args: Any, is_syntax: bool = False, **kwargs: Any\n ) -> None:\n \"\"\"Internally called traceback from ip._showtraceback\"\"\"\n nonlocal tb_data\n exc_tuple = ip._get_exc_info()\n\n # do not display trace on syntax error\n tb: Optional[TracebackType] = None if is_syntax else exc_tuple[2]\n\n # determine correct tb_offset\n compiled = tb_data.get(\"running_compiled_code\", False)\n tb_offset = tb_data.get(\"tb_offset\", 1 if compiled else 0)\n # remove ipython internal frames from trace with tb_offset\n for _ in range(tb_offset):\n if tb is None:\n break\n tb = tb.tb_next\n\n excepthook(exc_tuple[0], exc_tuple[1], tb)\n tb_data = {} # clear data upon usage\n\n # replace _showtraceback instead of showtraceback to allow ipython features such as debugging to work\n # this is also what the ipython docs recommends to modify when subclassing InteractiveShell\n ip._showtraceback = ipy_display_traceback\n # add wrapper to capture tb_data\n ip.showtraceback = ipy_show_traceback\n ip.showsyntaxerror = lambda *args, **kwargs: ipy_display_traceback(\n *args, is_syntax=True, **kwargs\n )\n\n try: # pragma: no cover\n # if within ipython, use customized traceback\n ip = get_ipython() # type: ignore[name-defined]\n ipy_excepthook_closure(ip)\n return sys.excepthook\n except Exception:\n # otherwise use default system hook\n old_excepthook = sys.excepthook\n sys.excepthook = excepthook\n return old_excepthook\n\n\n@dataclass\nclass Frame:\n filename: str\n lineno: int\n name: str\n line: str = \"\"\n locals: Optional[Dict[str, pretty.Node]] = None\n\n\n@dataclass\nclass _SyntaxError:\n offset: int\n filename: str\n line: str\n lineno: int\n msg: str\n\n\n@dataclass\nclass Stack:\n exc_type: str\n exc_value: str\n syntax_error: Optional[_SyntaxError] = None\n is_cause: bool = False\n frames: List[Frame] = field(default_factory=list)\n\n\n@dataclass\nclass Trace:\n stacks: List[Stack]\n\n\nclass PathHighlighter(RegexHighlighter):\n highlights = [r\"(?P<dim>.*/)(?P<bold>.+)\"]\n\n\nclass Traceback:\n \"\"\"A Console renderable that renders a traceback.\n\n Args:\n trace (Trace, optional): A `Trace` object produced from `extract`. Defaults to None, which uses\n the last exception.\n width (Optional[int], optional): Number of characters used to traceback. Defaults to 100.\n extra_lines (int, optional): Additional lines of code to render. Defaults to 3.\n theme (str, optional): Override pygments theme used in traceback.\n word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.\n show_locals (bool, optional): Enable display of local variables. Defaults to False.\n indent_guides (bool, optional): Enable indent guides in code and locals. Defaults to True.\n locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.\n Defaults to 10.\n locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80.\n suppress (Sequence[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.\n max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.\n\n \"\"\"\n\n LEXERS = {\n \"\": \"text\",\n \".py\": \"python\",\n \".pxd\": \"cython\",\n \".pyx\": \"cython\",\n \".pxi\": \"pyrex\",\n }\n\n def __init__(\n self,\n trace: Optional[Trace] = None,\n width: Optional[int] = 100,\n extra_lines: int = 3,\n theme: Optional[str] = None,\n word_wrap: bool = False,\n show_locals: bool = False,\n indent_guides: bool = True,\n locals_max_length: int = LOCALS_MAX_LENGTH,\n locals_max_string: int = LOCALS_MAX_STRING,\n suppress: Iterable[Union[str, ModuleType]] = (),\n max_frames: int = 100,\n ):\n if trace is None:\n exc_type, exc_value, traceback = sys.exc_info()\n if exc_type is None or exc_value is None or traceback is None:\n raise ValueError(\n \"Value for 'trace' required if not called in except: block\"\n )\n trace = self.extract(\n exc_type, exc_value, traceback, show_locals=show_locals\n )\n self.trace = trace\n self.width = width\n self.extra_lines = extra_lines\n self.theme = Syntax.get_theme(theme or \"ansi_dark\")\n self.word_wrap = word_wrap\n self.show_locals = show_locals\n self.indent_guides = indent_guides\n self.locals_max_length = locals_max_length\n self.locals_max_string = locals_max_string\n\n self.suppress: Sequence[str] = []\n for suppress_entity in suppress:\n if not isinstance(suppress_entity, str):\n assert (\n suppress_entity.__file__ is not None\n ), f\"{suppress_entity!r} must be a module with '__file__' attribute\"\n path = os.path.dirname(suppress_entity.__file__)\n else:\n path = suppress_entity\n path = os.path.normpath(os.path.abspath(path))\n self.suppress.append(path)\n self.max_frames = max(4, max_frames) if max_frames > 0 else 0\n\n @classmethod\n def from_exception(\n cls,\n exc_type: Type[Any],\n exc_value: BaseException,\n traceback: Optional[TracebackType],\n width: Optional[int] = 100,\n extra_lines: int = 3,\n theme: Optional[str] = None,\n word_wrap: bool = False,\n show_locals: bool = False,\n indent_guides: bool = True,\n locals_max_length: int = LOCALS_MAX_LENGTH,\n locals_max_string: int = LOCALS_MAX_STRING,\n suppress: Iterable[Union[str, ModuleType]] = (),\n max_frames: int = 100,\n ) -> \"Traceback\":\n \"\"\"Create a traceback from exception info\n\n Args:\n exc_type (Type[BaseException]): Exception type.\n exc_value (BaseException): Exception value.\n traceback (TracebackType): Python Traceback object.\n width (Optional[int], optional): Number of characters used to traceback. Defaults to 100.\n extra_lines (int, optional): Additional lines of code to render. Defaults to 3.\n theme (str, optional): Override pygments theme used in traceback.\n word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.\n show_locals (bool, optional): Enable display of local variables. Defaults to False.\n indent_guides (bool, optional): Enable indent guides in code and locals. Defaults to True.\n locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.\n Defaults to 10.\n locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80.\n suppress (Iterable[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.\n max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.\n\n Returns:\n Traceback: A Traceback instance that may be printed.\n \"\"\"\n rich_traceback = cls.extract(\n exc_type, exc_value, traceback, show_locals=show_locals\n )\n return cls(\n rich_traceback,\n width=width,\n extra_lines=extra_lines,\n theme=theme,\n word_wrap=word_wrap,\n show_locals=show_locals,\n indent_guides=indent_guides,\n locals_max_length=locals_max_length,\n locals_max_string=locals_max_string,\n suppress=suppress,\n max_frames=max_frames,\n )\n\n @classmethod\n def extract(\n cls,\n exc_type: Type[BaseException],\n exc_value: BaseException,\n traceback: Optional[TracebackType],\n show_locals: bool = False,\n locals_max_length: int = LOCALS_MAX_LENGTH,\n locals_max_string: int = LOCALS_MAX_STRING,\n ) -> Trace:\n \"\"\"Extract traceback information.\n\n Args:\n exc_type (Type[BaseException]): Exception type.\n exc_value (BaseException): Exception value.\n traceback (TracebackType): Python Traceback object.\n show_locals (bool, optional): Enable display of local variables. Defaults to False.\n locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.\n Defaults to 10.\n locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80.\n\n Returns:\n Trace: A Trace instance which you can use to construct a `Traceback`.\n \"\"\"\n\n stacks: List[Stack] = []\n is_cause = False\n\n from rich import _IMPORT_CWD\n\n def safe_str(_object: Any) -> str:\n \"\"\"Don't allow exceptions from __str__ to propegate.\"\"\"\n try:\n return str(_object)\n except Exception:\n return \"<exception str() failed>\"\n\n while True:\n stack = Stack(\n exc_type=safe_str(exc_type.__name__),\n exc_value=safe_str(exc_value),\n is_cause=is_cause,\n )\n\n if isinstance(exc_value, SyntaxError):\n stack.syntax_error = _SyntaxError(\n offset=exc_value.offset or 0,\n filename=exc_value.filename or \"?\",\n lineno=exc_value.lineno or 0,\n line=exc_value.text or \"\",\n msg=exc_value.msg,\n )\n\n stacks.append(stack)\n append = stack.frames.append\n\n for frame_summary, line_no in walk_tb(traceback):\n filename = frame_summary.f_code.co_filename\n if filename and not filename.startswith(\"<\"):\n if not os.path.isabs(filename):\n filename = os.path.join(_IMPORT_CWD, filename)\n frame = Frame(\n filename=filename or \"?\",\n lineno=line_no,\n name=frame_summary.f_code.co_name,\n locals={\n key: pretty.traverse(\n value,\n max_length=locals_max_length,\n max_string=locals_max_string,\n )\n for key, value in frame_summary.f_locals.items()\n }\n if show_locals\n else None,\n )\n append(frame)\n if \"_rich_traceback_guard\" in frame_summary.f_locals:\n del stack.frames[:]\n\n cause = getattr(exc_value, \"__cause__\", None)\n if cause and cause.__traceback__:\n exc_type = cause.__class__\n exc_value = cause\n traceback = cause.__traceback__\n is_cause = True\n continue\n\n cause = exc_value.__context__\n if (\n cause\n and cause.__traceback__\n and not getattr(exc_value, \"__suppress_context__\", False)\n ):\n exc_type = cause.__class__\n exc_value = cause\n traceback = cause.__traceback__\n is_cause = False\n continue\n # No cover, code is reached but coverage doesn't recognize it.\n break # pragma: no cover\n\n trace = Trace(stacks=stacks)\n return trace\n\n def __rich_console__(\n self, console: Console, options: ConsoleOptions\n ) -> RenderResult:\n theme = self.theme\n background_style = theme.get_background_style()\n token_style = theme.get_style_for_token\n\n traceback_theme = Theme(\n {\n \"pretty\": token_style(TextToken),\n \"pygments.text\": token_style(Token),\n \"pygments.string\": token_style(String),\n \"pygments.function\": token_style(Name.Function),\n \"pygments.number\": token_style(Number),\n \"repr.indent\": token_style(Comment) + Style(dim=True),\n \"repr.str\": token_style(String),\n \"repr.brace\": token_style(TextToken) + Style(bold=True),\n \"repr.number\": token_style(Number),\n \"repr.bool_true\": token_style(Keyword.Constant),\n \"repr.bool_false\": token_style(Keyword.Constant),\n \"repr.none\": token_style(Keyword.Constant),\n \"scope.border\": token_style(String.Delimiter),\n \"scope.equals\": token_style(Operator),\n \"scope.key\": token_style(Name),\n \"scope.key.special\": token_style(Name.Constant) + Style(dim=True),\n },\n inherit=False,\n )\n\n highlighter = ReprHighlighter()\n for last, stack in loop_last(reversed(self.trace.stacks)):\n if stack.frames:\n stack_renderable: ConsoleRenderable = Panel(\n self._render_stack(stack),\n title=\"[traceback.title]Traceback [dim](most recent call last)\",\n style=background_style,\n border_style=\"traceback.border\",\n expand=True,\n padding=(0, 1),\n )\n stack_renderable = Constrain(stack_renderable, self.width)\n with console.use_theme(traceback_theme):\n yield stack_renderable\n if stack.syntax_error is not None:\n with console.use_theme(traceback_theme):\n yield Constrain(\n Panel(\n self._render_syntax_error(stack.syntax_error),\n style=background_style,\n border_style=\"traceback.border.syntax_error\",\n expand=True,\n padding=(0, 1),\n width=self.width,\n ),\n self.width,\n )\n yield Text.assemble(\n (f\"{stack.exc_type}: \", \"traceback.exc_type\"),\n highlighter(stack.syntax_error.msg),\n )\n elif stack.exc_value:\n yield Text.assemble(\n (f\"{stack.exc_type}: \", \"traceback.exc_type\"),\n highlighter(stack.exc_value),\n )\n else:\n yield Text.assemble((f\"{stack.exc_type}\", \"traceback.exc_type\"))\n\n if not last:\n if stack.is_cause:\n yield Text.from_markup(\n \"\\n[i]The above exception was the direct cause of the following exception:\\n\",\n )\n else:\n yield Text.from_markup(\n \"\\n[i]During handling of the above exception, another exception occurred:\\n\",\n )\n\n @group()\n def _render_syntax_error(self, syntax_error: _SyntaxError) -> RenderResult:\n highlighter = ReprHighlighter()\n path_highlighter = PathHighlighter()\n if syntax_error.filename != \"<stdin>\":\n text = Text.assemble(\n (f\" {syntax_error.filename}\", \"pygments.string\"),\n (\":\", \"pygments.text\"),\n (str(syntax_error.lineno), \"pygments.number\"),\n style=\"pygments.text\",\n )\n yield path_highlighter(text)\n syntax_error_text = highlighter(syntax_error.line.rstrip())\n syntax_error_text.no_wrap = True\n offset = min(syntax_error.offset - 1, len(syntax_error_text))\n syntax_error_text.stylize(\"bold underline\", offset, offset)\n syntax_error_text += Text.from_markup(\n \"\\n\" + \" \" * offset + \"[traceback.offset]▲[/]\",\n style=\"pygments.text\",\n )\n yield syntax_error_text\n\n @classmethod\n def _guess_lexer(cls, filename: str, code: str) -> str:\n ext = os.path.splitext(filename)[-1]\n if not ext:\n # No extension, look at first line to see if it is a hashbang\n # Note, this is an educated guess and not a guarantee\n # If it fails, the only downside is that the code is highlighted strangely\n new_line_index = code.index(\"\\n\")\n first_line = code[:new_line_index] if new_line_index != -1 else code\n if first_line.startswith(\"#!\") and \"python\" in first_line.lower():\n return \"python\"\n try:\n return cls.LEXERS.get(ext) or guess_lexer_for_filename(filename, code).name\n except ClassNotFound:\n return \"text\"\n\n @group()\n def _render_stack(self, stack: Stack) -> RenderResult:\n path_highlighter = PathHighlighter()\n theme = self.theme\n code_cache: Dict[str, str] = {}\n\n def read_code(filename: str) -> str:\n \"\"\"Read files, and cache results on filename.\n\n Args:\n filename (str): Filename to read\n\n Returns:\n str: Contents of file\n \"\"\"\n code = code_cache.get(filename)\n if code is None:\n with open(\n filename, \"rt\", encoding=\"utf-8\", errors=\"replace\"\n ) as code_file:\n code = code_file.read()\n code_cache[filename] = code\n return code\n\n def render_locals(frame: Frame) -> Iterable[ConsoleRenderable]:\n if frame.locals:\n yield render_scope(\n frame.locals,\n title=\"locals\",\n indent_guides=self.indent_guides,\n max_length=self.locals_max_length,\n max_string=self.locals_max_string,\n )\n\n exclude_frames: Optional[range] = None\n if self.max_frames != 0:\n exclude_frames = range(\n self.max_frames // 2,\n len(stack.frames) - self.max_frames // 2,\n )\n\n excluded = False\n for frame_index, frame in enumerate(stack.frames):\n\n if exclude_frames and frame_index in exclude_frames:\n excluded = True\n continue\n\n if excluded:\n assert exclude_frames is not None\n yield Text(\n f\"\\n... {len(exclude_frames)} frames hidden ...\",\n justify=\"center\",\n style=\"traceback.error\",\n )\n excluded = False\n\n first = frame_index == 0\n frame_filename = frame.filename\n suppressed = any(frame_filename.startswith(path) for path in self.suppress)\n\n text = Text.assemble(\n path_highlighter(Text(frame.filename, style=\"pygments.string\")),\n (\":\", \"pygments.text\"),\n (str(frame.lineno), \"pygments.number\"),\n \" in \",\n (frame.name, \"pygments.function\"),\n style=\"pygments.text\",\n )\n if not frame.filename.startswith(\"<\") and not first:\n yield \"\"\n yield text\n if frame.filename.startswith(\"<\"):\n yield from render_locals(frame)\n continue\n if not suppressed:\n try:\n code = read_code(frame.filename)\n lexer_name = self._guess_lexer(frame.filename, code)\n syntax = Syntax(\n code,\n lexer_name,\n theme=theme,\n line_numbers=True,\n line_range=(\n frame.lineno - self.extra_lines,\n frame.lineno + self.extra_lines,\n ),\n highlight_lines={frame.lineno},\n word_wrap=self.word_wrap,\n code_width=88,\n indent_guides=self.indent_guides,\n dedent=False,\n )\n yield \"\"\n except Exception as error:\n yield Text.assemble(\n (f\"\\n{error}\", \"traceback.error\"),\n )\n else:\n yield (\n Columns(\n [\n syntax,\n *render_locals(frame),\n ],\n padding=1,\n )\n if frame.locals\n else syntax\n )\n\n\nif __name__ == \"__main__\": # pragma: no cover\n\n from .console import Console\n\n console = Console()\n import sys\n\n def bar(a: Any) -> None: # 这是对亚洲语言支持的测试。面对模棱两可的想法,拒绝猜测的诱惑\n one = 1\n print(one / a)\n\n def foo(a: Any) -> None:\n _rich_traceback_guard = True\n zed = {\n \"characters\": {\n \"Paul Atreides\",\n \"Vladimir Harkonnen\",\n \"Thufir Hawat\",\n \"Duncan Idaho\",\n },\n \"atomic_types\": (None, False, True),\n }\n bar(a)\n\n def error() -> None:\n\n try:\n try:\n foo(0)\n except:\n slfkjsldkfj # type: ignore[name-defined]\n except:\n console.print_exception(show_locals=True)\n\n error()\n",
"path": "rich/traceback.py"
}
] | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 638ede8d3..5001a075c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -20,6 +20,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Fixed recursion error in Jupyter progress bars https://github.com/Textualize/rich/issues/2047
- Complex numbers are now identified by the highlighter https://github.com/Textualize/rich/issues/2214
- Fix crash on IDLE and forced is_terminal detection to False because IDLE can't do escape codes https://github.com/Textualize/rich/issues/2222
+- Fixed missing blank line in traceback rendering https://github.com/Textualize/rich/issues/2206
### Changed
diff --git a/rich/traceback.py b/rich/traceback.py
index 8f092c661..5feefb93b 100644
--- a/rich/traceback.py
+++ b/rich/traceback.py
@@ -584,7 +584,7 @@ def render_locals(frame: Frame) -> Iterable[ConsoleRenderable]:
)
excluded = False
- first = frame_index == 1
+ first = frame_index == 0
frame_filename = frame.filename
suppressed = any(frame_filename.startswith(path) for path in self.suppress)
diff --git a/tests/test_traceback.py b/tests/test_traceback.py
index e6b4de3f1..c4994c87b 100644
--- a/tests/test_traceback.py
+++ b/tests/test_traceback.py
@@ -1,4 +1,5 @@
import io
+import re
import sys
import pytest
@@ -21,10 +22,17 @@
def test_handler():
console = Console(file=io.StringIO(), width=100, color_system=None)
expected_old_handler = sys.excepthook
+
+ def level1():
+ level2()
+
+ def level2():
+ return 1 / 0
+
try:
old_handler = install(console=console)
try:
- 1 / 0
+ level1()
except Exception:
exc_type, exc_value, traceback = sys.exc_info()
sys.excepthook(exc_type, exc_value, traceback)
@@ -32,6 +40,30 @@ def test_handler():
print(repr(rendered_exception))
assert "Traceback" in rendered_exception
assert "ZeroDivisionError" in rendered_exception
+
+ frame_blank_line_possible_preambles = (
+ # Start of the stack rendering:
+ "╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮",
+ # Each subsequent frame (starting with the file name) should then be preceded with a blank line:
+ "│" + (" " * 98) + "│",
+ )
+ for frame_start in re.finditer(
+ "^│ .+rich/tests/test_traceback\.py:",
+ rendered_exception,
+ flags=re.MULTILINE,
+ ):
+ frame_start_index = frame_start.start()
+ for preamble in frame_blank_line_possible_preambles:
+ preamble_start, preamble_end = (
+ frame_start_index - len(preamble) - 1,
+ frame_start_index - 1,
+ )
+ if rendered_exception[preamble_start:preamble_end] == preamble:
+ break
+ else:
+ pytest.fail(
+ f"Frame {frame_start[0]} doesn't have the expected preamble"
+ )
finally:
sys.excepthook = old_handler
assert old_handler == expected_old_handler
|
netbox-community__netbox-1517 | No field to edit Cluster comments
### Issue type
[ ] Feature request <!-- Requesting the implementation of a new feature -->
[X] Bug report <!-- Reporting unexpected or erroneous behavior -->
[ ] Documentation <!-- Proposing a modification to the documentation -->
### Environment
* Python version: 3.5.2
* NetBox version: origin/dev-2.2 e93129f1
### Description
When you view Clusters you can see there is a 'comments' field. However, when you create a Cluster there is no field to enter the comments. Ditto when you click Edit on an existing cluster (e.g. `/virtualization/clusters/1/edit/`).
| [
{
"content": "from __future__ import unicode_literals\n\nfrom mptt.forms import TreeNodeChoiceField\n\nfrom django import forms\nfrom django.db.models import Count\n\nfrom dcim.constants import IFACE_FF_VIRTUAL, VIFACE_FF_CHOICES\nfrom dcim.formfields import MACAddressFormField\nfrom dcim.models import Device, Interface, Platform, Rack, Region, Site\nfrom extras.forms import CustomFieldBulkEditForm, CustomFieldForm, CustomFieldFilterForm\nfrom tenancy.forms import TenancyForm\nfrom tenancy.models import Tenant\nfrom utilities.forms import (\n add_blank_choice, APISelect, APISelectMultiple, BootstrapMixin, BulkEditForm, BulkEditNullBooleanSelect,\n ChainedFieldsMixin, ChainedModelChoiceField, ChainedModelMultipleChoiceField, CommentField, ComponentForm,\n ConfirmationForm, CSVChoiceField, ExpandableNameField, FilterChoiceField, SlugField, SmallTextarea,\n)\nfrom .constants import STATUS_CHOICES\nfrom .models import Cluster, ClusterGroup, ClusterType, VirtualMachine\n\n\n#\n# Cluster types\n#\n\nclass ClusterTypeForm(BootstrapMixin, forms.ModelForm):\n slug = SlugField()\n\n class Meta:\n model = ClusterType\n fields = ['name', 'slug']\n\n\n#\n# Cluster groups\n#\n\nclass ClusterGroupForm(BootstrapMixin, forms.ModelForm):\n slug = SlugField()\n\n class Meta:\n model = ClusterGroup\n fields = ['name', 'slug']\n\n\n#\n# Clusters\n#\n\nclass ClusterForm(BootstrapMixin, CustomFieldForm):\n\n class Meta:\n model = Cluster\n fields = ['name', 'type', 'group']\n\n\nclass ClusterCSVForm(forms.ModelForm):\n type = forms.ModelChoiceField(\n queryset=ClusterType.objects.all(),\n to_field_name='name',\n help_text='Name of cluster type',\n error_messages={\n 'invalid_choice': 'Invalid cluster type name.',\n }\n )\n group = forms.ModelChoiceField(\n queryset=ClusterGroup.objects.all(),\n to_field_name='name',\n required=False,\n help_text='Name of cluster group',\n error_messages={\n 'invalid_choice': 'Invalid cluster group name.',\n }\n )\n\n class Meta:\n model = Cluster\n fields = ['name', 'type', 'group', 'comments']\n\n\nclass ClusterBulkEditForm(BootstrapMixin, CustomFieldBulkEditForm):\n pk = forms.ModelMultipleChoiceField(queryset=Cluster.objects.all(), widget=forms.MultipleHiddenInput)\n type = forms.ModelChoiceField(queryset=ClusterType.objects.all(), required=False)\n group = forms.ModelChoiceField(queryset=ClusterGroup.objects.all(), required=False)\n comments = CommentField(widget=SmallTextarea)\n\n class Meta:\n nullable_fields = ['group', 'comments']\n\n\nclass ClusterFilterForm(BootstrapMixin, CustomFieldFilterForm):\n model = Cluster\n q = forms.CharField(required=False, label='Search')\n group = FilterChoiceField(\n queryset=ClusterGroup.objects.annotate(filter_count=Count('clusters')),\n to_field_name='slug',\n null_option=(0, 'None'),\n required=False,\n )\n type = FilterChoiceField(\n queryset=ClusterType.objects.annotate(filter_count=Count('clusters')),\n to_field_name='slug',\n required=False,\n )\n\n\nclass ClusterAddDevicesForm(BootstrapMixin, ChainedFieldsMixin, forms.Form):\n region = TreeNodeChoiceField(\n queryset=Region.objects.all(),\n required=False,\n widget=forms.Select(\n attrs={'filter-for': 'site', 'nullable': 'true'}\n )\n )\n site = ChainedModelChoiceField(\n queryset=Site.objects.all(),\n chains=(\n ('region', 'region'),\n ),\n required=False,\n widget=APISelect(\n api_url='/api/dcim/sites/?region_id={{region}}',\n attrs={'filter-for': 'rack'}\n )\n )\n rack = ChainedModelChoiceField(\n queryset=Rack.objects.all(),\n chains=(\n ('site', 'site'),\n ),\n required=False,\n widget=APISelect(\n api_url='/api/dcim/racks/?site_id={{site}}',\n attrs={'filter-for': 'devices', 'nullable': 'true'}\n )\n )\n devices = ChainedModelMultipleChoiceField(\n queryset=Device.objects.filter(cluster__isnull=True),\n chains=(\n ('site', 'site'),\n ('rack', 'rack'),\n ),\n label='Device',\n widget=APISelectMultiple(\n api_url='/api/dcim/devices/?site_id={{site}}&rack_id={{rack}}',\n display_field='display_name',\n disabled_indicator='cluster'\n )\n )\n\n class Meta:\n fields = ['region', 'site', 'rack', 'devices']\n\n def __init__(self, *args, **kwargs):\n\n super(ClusterAddDevicesForm, self).__init__(*args, **kwargs)\n\n self.fields['devices'].choices = []\n\n\nclass ClusterRemoveDevicesForm(ConfirmationForm):\n pk = forms.ModelMultipleChoiceField(queryset=Device.objects.all(), widget=forms.MultipleHiddenInput)\n\n\n#\n# Virtual Machines\n#\n\nclass VirtualMachineForm(BootstrapMixin, TenancyForm, CustomFieldForm):\n cluster_group = forms.ModelChoiceField(\n queryset=ClusterGroup.objects.all(),\n required=False,\n widget=forms.Select(\n attrs={'filter-for': 'cluster', 'nullable': 'true'}\n )\n )\n cluster = ChainedModelChoiceField(\n queryset=Cluster.objects.all(),\n chains=(\n ('group', 'cluster_group'),\n ),\n widget=APISelect(\n api_url='/api/virtualization/clusters/?group_id={{cluster_group}}'\n )\n )\n\n class Meta:\n model = VirtualMachine\n fields = [\n 'name', 'status', 'cluster_group', 'cluster', 'tenant', 'platform', 'vcpus', 'memory', 'disk', 'comments',\n ]\n\n def __init__(self, *args, **kwargs):\n\n # Initialize helper selector\n instance = kwargs.get('instance')\n if instance.pk and instance.cluster is not None:\n initial = kwargs.get('initial', {}).copy()\n initial['cluster_group'] = instance.cluster.group\n kwargs['initial'] = initial\n\n super(VirtualMachineForm, self).__init__(*args, **kwargs)\n\n\nclass VirtualMachineCSVForm(forms.ModelForm):\n status = CSVChoiceField(\n choices=STATUS_CHOICES,\n required=False,\n help_text='Operational status of device'\n )\n cluster = forms.ModelChoiceField(\n queryset=Cluster.objects.all(),\n to_field_name='name',\n help_text='Name of parent cluster',\n error_messages={\n 'invalid_choice': 'Invalid cluster name.',\n }\n )\n tenant = forms.ModelChoiceField(\n queryset=Tenant.objects.all(),\n required=False,\n to_field_name='name',\n help_text='Name of assigned tenant',\n error_messages={\n 'invalid_choice': 'Tenant not found.'\n }\n )\n platform = forms.ModelChoiceField(\n queryset=Platform.objects.all(),\n required=False,\n to_field_name='name',\n help_text='Name of assigned platform',\n error_messages={\n 'invalid_choice': 'Invalid platform.',\n }\n )\n\n class Meta:\n model = VirtualMachine\n fields = ['name', 'status', 'cluster', 'tenant', 'platform', 'vcpus', 'memory', 'disk', 'comments']\n\n\nclass VirtualMachineBulkEditForm(BootstrapMixin, CustomFieldBulkEditForm):\n pk = forms.ModelMultipleChoiceField(queryset=VirtualMachine.objects.all(), widget=forms.MultipleHiddenInput)\n status = forms.ChoiceField(choices=add_blank_choice(STATUS_CHOICES), required=False, initial='')\n cluster = forms.ModelChoiceField(queryset=Cluster.objects.all(), required=False)\n tenant = forms.ModelChoiceField(queryset=Tenant.objects.all(), required=False)\n platform = forms.ModelChoiceField(queryset=Platform.objects.all(), required=False)\n vcpus = forms.IntegerField(required=False, label='vCPUs')\n memory = forms.IntegerField(required=False, label='Memory (MB)')\n disk = forms.IntegerField(required=False, label='Disk (GB)')\n comments = CommentField(widget=SmallTextarea)\n\n class Meta:\n nullable_fields = ['tenant', 'platform', 'vcpus', 'memory', 'disk', 'comments']\n\n\ndef vm_status_choices():\n status_counts = {}\n for status in VirtualMachine.objects.values('status').annotate(count=Count('status')).order_by('status'):\n status_counts[status['status']] = status['count']\n return [(s[0], '{} ({})'.format(s[1], status_counts.get(s[0], 0))) for s in STATUS_CHOICES]\n\n\nclass VirtualMachineFilterForm(BootstrapMixin, CustomFieldFilterForm):\n model = VirtualMachine\n q = forms.CharField(required=False, label='Search')\n cluster_group = FilterChoiceField(\n queryset=ClusterGroup.objects.all(),\n to_field_name='slug',\n null_option=(0, 'None'),\n )\n cluster_id = FilterChoiceField(\n queryset=Cluster.objects.annotate(filter_count=Count('virtual_machines')),\n label='Cluster'\n )\n status = forms.MultipleChoiceField(choices=vm_status_choices, required=False)\n\n\n#\n# VM interfaces\n#\n\nclass InterfaceForm(BootstrapMixin, forms.ModelForm):\n\n class Meta:\n model = Interface\n fields = ['virtual_machine', 'name', 'form_factor', 'enabled', 'mac_address', 'mtu', 'description']\n widgets = {\n 'virtual_machine': forms.HiddenInput(),\n 'form_factor': forms.HiddenInput(),\n }\n\n\nclass InterfaceCreateForm(ComponentForm):\n name_pattern = ExpandableNameField(label='Name')\n form_factor = forms.ChoiceField(choices=VIFACE_FF_CHOICES, initial=IFACE_FF_VIRTUAL, widget=forms.HiddenInput())\n enabled = forms.BooleanField(required=False)\n mtu = forms.IntegerField(required=False, min_value=1, max_value=32767, label='MTU')\n mac_address = MACAddressFormField(required=False, label='MAC Address')\n description = forms.CharField(max_length=100, required=False)\n\n def __init__(self, *args, **kwargs):\n\n # Set interfaces enabled by default\n kwargs['initial'] = kwargs.get('initial', {}).copy()\n kwargs['initial'].update({'enabled': True})\n\n super(InterfaceCreateForm, self).__init__(*args, **kwargs)\n\n\nclass InterfaceBulkEditForm(BootstrapMixin, BulkEditForm):\n pk = forms.ModelMultipleChoiceField(queryset=Interface.objects.all(), widget=forms.MultipleHiddenInput)\n virtual_machine = forms.ModelChoiceField(queryset=VirtualMachine.objects.all(), widget=forms.HiddenInput)\n enabled = forms.NullBooleanField(required=False, widget=BulkEditNullBooleanSelect)\n mtu = forms.IntegerField(required=False, min_value=1, max_value=32767, label='MTU')\n description = forms.CharField(max_length=100, required=False)\n\n class Meta:\n nullable_fields = ['mtu', 'description']\n\n\n#\n# Bulk VirtualMachine component creation\n#\n\nclass VirtualMachineBulkAddComponentForm(BootstrapMixin, forms.Form):\n pk = forms.ModelMultipleChoiceField(queryset=VirtualMachine.objects.all(), widget=forms.MultipleHiddenInput)\n name_pattern = ExpandableNameField(label='Name')\n\n\nclass VirtualMachineBulkAddInterfaceForm(VirtualMachineBulkAddComponentForm):\n form_factor = forms.ChoiceField(choices=VIFACE_FF_CHOICES, initial=IFACE_FF_VIRTUAL, widget=forms.HiddenInput())\n enabled = forms.BooleanField(required=False, initial=True)\n mtu = forms.IntegerField(required=False, min_value=1, max_value=32767, label='MTU')\n description = forms.CharField(max_length=100, required=False)\n",
"path": "netbox/virtualization/forms.py"
}
] | [
{
"content": "from __future__ import unicode_literals\n\nfrom mptt.forms import TreeNodeChoiceField\n\nfrom django import forms\nfrom django.db.models import Count\n\nfrom dcim.constants import VIFACE_FF_CHOICES\nfrom dcim.formfields import MACAddressFormField\nfrom dcim.models import Device, Interface, Platform, Rack, Region, Site\nfrom extras.forms import CustomFieldBulkEditForm, CustomFieldForm, CustomFieldFilterForm\nfrom tenancy.forms import TenancyForm\nfrom tenancy.models import Tenant\nfrom utilities.forms import (\n add_blank_choice, APISelect, APISelectMultiple, BootstrapMixin, BulkEditForm, BulkEditNullBooleanSelect,\n ChainedFieldsMixin, ChainedModelChoiceField, ChainedModelMultipleChoiceField, CommentField, ComponentForm,\n ConfirmationForm, CSVChoiceField, ExpandableNameField, FilterChoiceField, SlugField, SmallTextarea,\n)\nfrom .constants import STATUS_CHOICES\nfrom .models import Cluster, ClusterGroup, ClusterType, VirtualMachine\n\n\n#\n# Cluster types\n#\n\nclass ClusterTypeForm(BootstrapMixin, forms.ModelForm):\n slug = SlugField()\n\n class Meta:\n model = ClusterType\n fields = ['name', 'slug']\n\n\n#\n# Cluster groups\n#\n\nclass ClusterGroupForm(BootstrapMixin, forms.ModelForm):\n slug = SlugField()\n\n class Meta:\n model = ClusterGroup\n fields = ['name', 'slug']\n\n\n#\n# Clusters\n#\n\nclass ClusterForm(BootstrapMixin, CustomFieldForm):\n comments = CommentField(widget=SmallTextarea)\n\n class Meta:\n model = Cluster\n fields = ['name', 'type', 'group', 'comments']\n\n\nclass ClusterCSVForm(forms.ModelForm):\n type = forms.ModelChoiceField(\n queryset=ClusterType.objects.all(),\n to_field_name='name',\n help_text='Name of cluster type',\n error_messages={\n 'invalid_choice': 'Invalid cluster type name.',\n }\n )\n group = forms.ModelChoiceField(\n queryset=ClusterGroup.objects.all(),\n to_field_name='name',\n required=False,\n help_text='Name of cluster group',\n error_messages={\n 'invalid_choice': 'Invalid cluster group name.',\n }\n )\n\n class Meta:\n model = Cluster\n fields = ['name', 'type', 'group', 'comments']\n\n\nclass ClusterBulkEditForm(BootstrapMixin, CustomFieldBulkEditForm):\n pk = forms.ModelMultipleChoiceField(queryset=Cluster.objects.all(), widget=forms.MultipleHiddenInput)\n type = forms.ModelChoiceField(queryset=ClusterType.objects.all(), required=False)\n group = forms.ModelChoiceField(queryset=ClusterGroup.objects.all(), required=False)\n comments = CommentField(widget=SmallTextarea)\n\n class Meta:\n nullable_fields = ['group', 'comments']\n\n\nclass ClusterFilterForm(BootstrapMixin, CustomFieldFilterForm):\n model = Cluster\n q = forms.CharField(required=False, label='Search')\n group = FilterChoiceField(\n queryset=ClusterGroup.objects.annotate(filter_count=Count('clusters')),\n to_field_name='slug',\n null_option=(0, 'None'),\n required=False,\n )\n type = FilterChoiceField(\n queryset=ClusterType.objects.annotate(filter_count=Count('clusters')),\n to_field_name='slug',\n required=False,\n )\n\n\nclass ClusterAddDevicesForm(BootstrapMixin, ChainedFieldsMixin, forms.Form):\n region = TreeNodeChoiceField(\n queryset=Region.objects.all(),\n required=False,\n widget=forms.Select(\n attrs={'filter-for': 'site', 'nullable': 'true'}\n )\n )\n site = ChainedModelChoiceField(\n queryset=Site.objects.all(),\n chains=(\n ('region', 'region'),\n ),\n required=False,\n widget=APISelect(\n api_url='/api/dcim/sites/?region_id={{region}}',\n attrs={'filter-for': 'rack'}\n )\n )\n rack = ChainedModelChoiceField(\n queryset=Rack.objects.all(),\n chains=(\n ('site', 'site'),\n ),\n required=False,\n widget=APISelect(\n api_url='/api/dcim/racks/?site_id={{site}}',\n attrs={'filter-for': 'devices', 'nullable': 'true'}\n )\n )\n devices = ChainedModelMultipleChoiceField(\n queryset=Device.objects.filter(cluster__isnull=True),\n chains=(\n ('site', 'site'),\n ('rack', 'rack'),\n ),\n label='Device',\n widget=APISelectMultiple(\n api_url='/api/dcim/devices/?site_id={{site}}&rack_id={{rack}}',\n display_field='display_name',\n disabled_indicator='cluster'\n )\n )\n\n class Meta:\n fields = ['region', 'site', 'rack', 'devices']\n\n def __init__(self, *args, **kwargs):\n\n super(ClusterAddDevicesForm, self).__init__(*args, **kwargs)\n\n self.fields['devices'].choices = []\n\n\nclass ClusterRemoveDevicesForm(ConfirmationForm):\n pk = forms.ModelMultipleChoiceField(queryset=Device.objects.all(), widget=forms.MultipleHiddenInput)\n\n\n#\n# Virtual Machines\n#\n\nclass VirtualMachineForm(BootstrapMixin, TenancyForm, CustomFieldForm):\n cluster_group = forms.ModelChoiceField(\n queryset=ClusterGroup.objects.all(),\n required=False,\n widget=forms.Select(\n attrs={'filter-for': 'cluster', 'nullable': 'true'}\n )\n )\n cluster = ChainedModelChoiceField(\n queryset=Cluster.objects.all(),\n chains=(\n ('group', 'cluster_group'),\n ),\n widget=APISelect(\n api_url='/api/virtualization/clusters/?group_id={{cluster_group}}'\n )\n )\n\n class Meta:\n model = VirtualMachine\n fields = [\n 'name', 'status', 'cluster_group', 'cluster', 'tenant', 'platform', 'vcpus', 'memory', 'disk', 'comments',\n ]\n\n def __init__(self, *args, **kwargs):\n\n # Initialize helper selector\n instance = kwargs.get('instance')\n if instance.pk and instance.cluster is not None:\n initial = kwargs.get('initial', {}).copy()\n initial['cluster_group'] = instance.cluster.group\n kwargs['initial'] = initial\n\n super(VirtualMachineForm, self).__init__(*args, **kwargs)\n\n\nclass VirtualMachineCSVForm(forms.ModelForm):\n status = CSVChoiceField(\n choices=STATUS_CHOICES,\n required=False,\n help_text='Operational status of device'\n )\n cluster = forms.ModelChoiceField(\n queryset=Cluster.objects.all(),\n to_field_name='name',\n help_text='Name of parent cluster',\n error_messages={\n 'invalid_choice': 'Invalid cluster name.',\n }\n )\n tenant = forms.ModelChoiceField(\n queryset=Tenant.objects.all(),\n required=False,\n to_field_name='name',\n help_text='Name of assigned tenant',\n error_messages={\n 'invalid_choice': 'Tenant not found.'\n }\n )\n platform = forms.ModelChoiceField(\n queryset=Platform.objects.all(),\n required=False,\n to_field_name='name',\n help_text='Name of assigned platform',\n error_messages={\n 'invalid_choice': 'Invalid platform.',\n }\n )\n\n class Meta:\n model = VirtualMachine\n fields = ['name', 'status', 'cluster', 'tenant', 'platform', 'vcpus', 'memory', 'disk', 'comments']\n\n\nclass VirtualMachineBulkEditForm(BootstrapMixin, CustomFieldBulkEditForm):\n pk = forms.ModelMultipleChoiceField(queryset=VirtualMachine.objects.all(), widget=forms.MultipleHiddenInput)\n status = forms.ChoiceField(choices=add_blank_choice(STATUS_CHOICES), required=False, initial='')\n cluster = forms.ModelChoiceField(queryset=Cluster.objects.all(), required=False)\n tenant = forms.ModelChoiceField(queryset=Tenant.objects.all(), required=False)\n platform = forms.ModelChoiceField(queryset=Platform.objects.all(), required=False)\n vcpus = forms.IntegerField(required=False, label='vCPUs')\n memory = forms.IntegerField(required=False, label='Memory (MB)')\n disk = forms.IntegerField(required=False, label='Disk (GB)')\n comments = CommentField(widget=SmallTextarea)\n\n class Meta:\n nullable_fields = ['tenant', 'platform', 'vcpus', 'memory', 'disk', 'comments']\n\n\ndef vm_status_choices():\n status_counts = {}\n for status in VirtualMachine.objects.values('status').annotate(count=Count('status')).order_by('status'):\n status_counts[status['status']] = status['count']\n return [(s[0], '{} ({})'.format(s[1], status_counts.get(s[0], 0))) for s in STATUS_CHOICES]\n\n\nclass VirtualMachineFilterForm(BootstrapMixin, CustomFieldFilterForm):\n model = VirtualMachine\n q = forms.CharField(required=False, label='Search')\n cluster_group = FilterChoiceField(\n queryset=ClusterGroup.objects.all(),\n to_field_name='slug',\n null_option=(0, 'None'),\n )\n cluster_id = FilterChoiceField(\n queryset=Cluster.objects.annotate(filter_count=Count('virtual_machines')),\n label='Cluster'\n )\n status = forms.MultipleChoiceField(choices=vm_status_choices, required=False)\n\n\n#\n# VM interfaces\n#\n\nclass InterfaceForm(BootstrapMixin, forms.ModelForm):\n\n class Meta:\n model = Interface\n fields = ['virtual_machine', 'name', 'form_factor', 'enabled', 'mac_address', 'mtu', 'description']\n widgets = {\n 'virtual_machine': forms.HiddenInput(),\n }\n\n\nclass InterfaceCreateForm(ComponentForm):\n name_pattern = ExpandableNameField(label='Name')\n form_factor = forms.ChoiceField(choices=VIFACE_FF_CHOICES)\n enabled = forms.BooleanField(required=False)\n mtu = forms.IntegerField(required=False, min_value=1, max_value=32767, label='MTU')\n mac_address = MACAddressFormField(required=False, label='MAC Address')\n description = forms.CharField(max_length=100, required=False)\n\n def __init__(self, *args, **kwargs):\n\n # Set interfaces enabled by default\n kwargs['initial'] = kwargs.get('initial', {}).copy()\n kwargs['initial'].update({'enabled': True})\n\n super(InterfaceCreateForm, self).__init__(*args, **kwargs)\n\n\nclass InterfaceBulkEditForm(BootstrapMixin, BulkEditForm):\n pk = forms.ModelMultipleChoiceField(queryset=Interface.objects.all(), widget=forms.MultipleHiddenInput)\n virtual_machine = forms.ModelChoiceField(queryset=VirtualMachine.objects.all(), widget=forms.HiddenInput)\n enabled = forms.NullBooleanField(required=False, widget=BulkEditNullBooleanSelect)\n mtu = forms.IntegerField(required=False, min_value=1, max_value=32767, label='MTU')\n description = forms.CharField(max_length=100, required=False)\n\n class Meta:\n nullable_fields = ['mtu', 'description']\n\n\n#\n# Bulk VirtualMachine component creation\n#\n\nclass VirtualMachineBulkAddComponentForm(BootstrapMixin, forms.Form):\n pk = forms.ModelMultipleChoiceField(queryset=VirtualMachine.objects.all(), widget=forms.MultipleHiddenInput)\n name_pattern = ExpandableNameField(label='Name')\n\n\nclass VirtualMachineBulkAddInterfaceForm(VirtualMachineBulkAddComponentForm):\n form_factor = forms.ChoiceField(choices=VIFACE_FF_CHOICES)\n enabled = forms.BooleanField(required=False, initial=True)\n mtu = forms.IntegerField(required=False, min_value=1, max_value=32767, label='MTU')\n description = forms.CharField(max_length=100, required=False)\n",
"path": "netbox/virtualization/forms.py"
}
] | diff --git a/netbox/virtualization/forms.py b/netbox/virtualization/forms.py
index 73d6e7445a9..0d247c303ed 100644
--- a/netbox/virtualization/forms.py
+++ b/netbox/virtualization/forms.py
@@ -49,10 +49,11 @@ class Meta:
#
class ClusterForm(BootstrapMixin, CustomFieldForm):
+ comments = CommentField(widget=SmallTextarea)
class Meta:
model = Cluster
- fields = ['name', 'type', 'group']
+ fields = ['name', 'type', 'group', 'comments']
class ClusterCSVForm(forms.ModelForm):
|
qtile__qtile-4228 | Configured keyboards limited to 3
### The issue:
In my config I've defined 4 keyboard layouts, however in the widget on the bar, it never reaches the fourth layout when left-clicking. When the third layout is removed (moving the fourth into the third position) it is suddenly accessible so it's not a problem with the layout itself. I don't notice any logs that would apply to this.
```
widget.KeyboardLayout(
font = defaults['font'],
fontsize = defaults['fontsize'],
configured_keyboards = ['us', 'es', 'semimak-jq', 'mtgap'],
display_map = { #makes everything lowercase
'us': 'us',
'es': 'es',
'workman': 'wm',
'semimak': 'sm',
'mtgap': 'mt',
}
),
```
From my config, I defined us, es, semimak-jq, and mtgap, but I can never rotate onto mtgap unless I remove semimak-jq. When I manually set the layout with setxkbmap, it is correctly displayed as 'mt' in the widget, I just can't rotate onto it via left-click on the widget.
Qtile Version: 0.22.1, X11
### Required:
- [X] I have searched past issues to see if this bug has already been reported.
| [
{
"content": "# Copyright (c) 2013 Jacob Mourelos\n# Copyright (c) 2014 Shepilov Vladislav\n# Copyright (c) 2014-2015 Sean Vig\n# Copyright (c) 2014 Tycho Andersen\n# Copyright (c) 2019 zordsdavini\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import annotations\n\nimport re\nfrom abc import ABCMeta, abstractmethod\nfrom subprocess import CalledProcessError, check_output\nfrom typing import TYPE_CHECKING\n\nfrom libqtile.command.base import expose_command\nfrom libqtile.confreader import ConfigError\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\nif TYPE_CHECKING:\n from libqtile.core.manager import Qtile\n\n\nclass _BaseLayoutBackend(metaclass=ABCMeta):\n def __init__(self, qtile: Qtile):\n \"\"\"\n This handles getting and setter the keyboard layout with the appropriate\n backend.\n \"\"\"\n\n @abstractmethod\n def get_keyboard(self) -> str:\n \"\"\"\n Return the currently used keyboard layout as a string\n\n Examples: \"us\", \"us dvorak\". In case of error returns \"unknown\".\n \"\"\"\n\n def set_keyboard(self, layout: str, options: str | None) -> None:\n \"\"\"\n Set the keyboard layout with specified options.\n \"\"\"\n\n\nclass _X11LayoutBackend(_BaseLayoutBackend):\n kb_layout_regex = re.compile(r\"layout:\\s+(?P<layout>\\w+)\")\n kb_variant_regex = re.compile(r\"variant:\\s+(?P<variant>\\w+)\")\n\n def get_keyboard(self) -> str:\n try:\n command = \"setxkbmap -verbose 10 -query\"\n setxkbmap_output = check_output(command.split(\" \")).decode()\n except CalledProcessError:\n logger.exception(\"Can not get the keyboard layout:\")\n return \"unknown\"\n except OSError:\n logger.exception(\"Please, check that xset is available:\")\n return \"unknown\"\n\n match_layout = self.kb_layout_regex.search(setxkbmap_output)\n if match_layout is None:\n return \"ERR\"\n keyboard = match_layout.group(\"layout\")\n\n match_variant = self.kb_variant_regex.search(setxkbmap_output)\n if match_variant:\n keyboard += \" \" + match_variant.group(\"variant\")\n return keyboard\n\n def set_keyboard(self, layout: str, options: str | None) -> None:\n command = [\"setxkbmap\"]\n command.extend(layout.split(\" \"))\n if options:\n command.extend([\"-option\", options])\n try:\n check_output(command)\n except CalledProcessError:\n logger.error(\"Can not change the keyboard layout:\")\n except OSError:\n logger.error(\"Please, check that setxkbmap is available:\")\n\n\nclass _WaylandLayoutBackend(_BaseLayoutBackend):\n def __init__(self, qtile: Qtile) -> None:\n self.set_keymap = qtile.core.set_keymap\n self._layout: str = \"\"\n\n def get_keyboard(self) -> str:\n return self._layout\n\n def set_keyboard(self, layout: str, options: str | None) -> None:\n maybe_variant: str | None = None\n if \" \" in layout:\n layout_name, maybe_variant = layout.split(\" \", maxsplit=1)\n else:\n layout_name = layout\n self.set_keymap(layout_name, options, maybe_variant)\n self._layout = layout\n\n\nlayout_backends = {\n \"x11\": _X11LayoutBackend,\n \"wayland\": _WaylandLayoutBackend,\n}\n\n\nclass KeyboardLayout(base.InLoopPollText):\n \"\"\"Widget for changing and displaying the current keyboard layout\n\n To use this widget effectively you need to specify keyboard layouts you want to use\n (using \"configured_keyboards\") and bind function \"next_keyboard\" to specific keys in\n order to change layouts.\n\n For example:\n\n Key([mod], \"space\", lazy.widget[\"keyboardlayout\"].next_keyboard(), desc=\"Next keyboard layout.\"),\n\n When running Qtile with the X11 backend, this widget requires setxkbmap to be available.\n \"\"\"\n\n defaults = [\n (\"update_interval\", 1, \"Update time in seconds.\"),\n (\n \"configured_keyboards\",\n [\"us\"],\n \"A list of predefined keyboard layouts \"\n \"represented as strings. For example: \"\n \"['us', 'us colemak', 'es', 'fr'].\",\n ),\n (\n \"display_map\",\n {},\n \"Custom display of layout. Key should be in format \"\n \"'layout variant'. For example: \"\n \"{'us': 'us', 'lt sgs': 'sgs', 'ru phonetic': 'ru'}\",\n ),\n (\"option\", None, \"string of setxkbmap option. Ex., 'compose:menu,grp_led:scroll'\"),\n ]\n\n def __init__(self, **config):\n base.InLoopPollText.__init__(self, **config)\n self.add_defaults(KeyboardLayout.defaults)\n self.add_callbacks({\"Button1\": self.next_keyboard})\n\n def _configure(self, qtile, bar):\n base.InLoopPollText._configure(self, qtile, bar)\n\n if qtile.core.name not in layout_backends:\n raise ConfigError(\"KeyboardLayout does not support backend: \" + qtile.core.name)\n\n self.backend = layout_backends[qtile.core.name](qtile)\n self.backend.set_keyboard(self.configured_keyboards[0], self.option)\n\n @expose_command()\n def next_keyboard(self):\n \"\"\"set the next layout in the list of configured keyboard layouts as\n new current layout in use\n\n If the current keyboard layout is not in the list, it will set as new\n layout the first one in the list.\n \"\"\"\n\n current_keyboard = self.backend.get_keyboard()\n if current_keyboard in self.configured_keyboards:\n # iterate the list circularly\n next_keyboard = self.configured_keyboards[\n (self.configured_keyboards.index(current_keyboard) + 1)\n % len(self.configured_keyboards)\n ]\n else:\n next_keyboard = self.configured_keyboards[0]\n\n self.backend.set_keyboard(next_keyboard, self.option)\n\n self.tick()\n\n def poll(self):\n keyboard = self.backend.get_keyboard()\n if keyboard in self.display_map.keys():\n return self.display_map[keyboard]\n return keyboard.upper()\n",
"path": "libqtile/widget/keyboardlayout.py"
}
] | [
{
"content": "# Copyright (c) 2013 Jacob Mourelos\n# Copyright (c) 2014 Shepilov Vladislav\n# Copyright (c) 2014-2015 Sean Vig\n# Copyright (c) 2014 Tycho Andersen\n# Copyright (c) 2019 zordsdavini\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import annotations\n\nimport re\nfrom abc import ABCMeta, abstractmethod\nfrom subprocess import CalledProcessError, check_output\nfrom typing import TYPE_CHECKING\n\nfrom libqtile.command.base import expose_command\nfrom libqtile.confreader import ConfigError\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\nif TYPE_CHECKING:\n from libqtile.core.manager import Qtile\n\n\nclass _BaseLayoutBackend(metaclass=ABCMeta):\n def __init__(self, qtile: Qtile):\n \"\"\"\n This handles getting and setter the keyboard layout with the appropriate\n backend.\n \"\"\"\n\n @abstractmethod\n def get_keyboard(self) -> str:\n \"\"\"\n Return the currently used keyboard layout as a string\n\n Examples: \"us\", \"us dvorak\". In case of error returns \"unknown\".\n \"\"\"\n\n def set_keyboard(self, layout: str, options: str | None) -> None:\n \"\"\"\n Set the keyboard layout with specified options.\n \"\"\"\n\n\nclass _X11LayoutBackend(_BaseLayoutBackend):\n kb_layout_regex = re.compile(r\"layout:\\s+(?P<layout>[\\w-]+)\")\n kb_variant_regex = re.compile(r\"variant:\\s+(?P<variant>\\w+)\")\n\n def get_keyboard(self) -> str:\n try:\n command = \"setxkbmap -verbose 10 -query\"\n setxkbmap_output = check_output(command.split(\" \")).decode()\n except CalledProcessError:\n logger.exception(\"Can not get the keyboard layout:\")\n return \"unknown\"\n except OSError:\n logger.exception(\"Please, check that xset is available:\")\n return \"unknown\"\n\n match_layout = self.kb_layout_regex.search(setxkbmap_output)\n if match_layout is None:\n return \"ERR\"\n keyboard = match_layout.group(\"layout\")\n\n match_variant = self.kb_variant_regex.search(setxkbmap_output)\n if match_variant:\n keyboard += \" \" + match_variant.group(\"variant\")\n return keyboard\n\n def set_keyboard(self, layout: str, options: str | None) -> None:\n command = [\"setxkbmap\"]\n command.extend(layout.split(\" \"))\n if options:\n command.extend([\"-option\", options])\n try:\n check_output(command)\n except CalledProcessError:\n logger.error(\"Can not change the keyboard layout:\")\n except OSError:\n logger.error(\"Please, check that setxkbmap is available:\")\n\n\nclass _WaylandLayoutBackend(_BaseLayoutBackend):\n def __init__(self, qtile: Qtile) -> None:\n self.set_keymap = qtile.core.set_keymap\n self._layout: str = \"\"\n\n def get_keyboard(self) -> str:\n return self._layout\n\n def set_keyboard(self, layout: str, options: str | None) -> None:\n maybe_variant: str | None = None\n if \" \" in layout:\n layout_name, maybe_variant = layout.split(\" \", maxsplit=1)\n else:\n layout_name = layout\n self.set_keymap(layout_name, options, maybe_variant)\n self._layout = layout\n\n\nlayout_backends = {\n \"x11\": _X11LayoutBackend,\n \"wayland\": _WaylandLayoutBackend,\n}\n\n\nclass KeyboardLayout(base.InLoopPollText):\n \"\"\"Widget for changing and displaying the current keyboard layout\n\n To use this widget effectively you need to specify keyboard layouts you want to use\n (using \"configured_keyboards\") and bind function \"next_keyboard\" to specific keys in\n order to change layouts.\n\n For example:\n\n Key([mod], \"space\", lazy.widget[\"keyboardlayout\"].next_keyboard(), desc=\"Next keyboard layout.\"),\n\n When running Qtile with the X11 backend, this widget requires setxkbmap to be available.\n \"\"\"\n\n defaults = [\n (\"update_interval\", 1, \"Update time in seconds.\"),\n (\n \"configured_keyboards\",\n [\"us\"],\n \"A list of predefined keyboard layouts \"\n \"represented as strings. For example: \"\n \"['us', 'us colemak', 'es', 'fr'].\",\n ),\n (\n \"display_map\",\n {},\n \"Custom display of layout. Key should be in format \"\n \"'layout variant'. For example: \"\n \"{'us': 'us', 'lt sgs': 'sgs', 'ru phonetic': 'ru'}\",\n ),\n (\"option\", None, \"string of setxkbmap option. Ex., 'compose:menu,grp_led:scroll'\"),\n ]\n\n def __init__(self, **config):\n base.InLoopPollText.__init__(self, **config)\n self.add_defaults(KeyboardLayout.defaults)\n self.add_callbacks({\"Button1\": self.next_keyboard})\n\n def _configure(self, qtile, bar):\n base.InLoopPollText._configure(self, qtile, bar)\n\n if qtile.core.name not in layout_backends:\n raise ConfigError(\"KeyboardLayout does not support backend: \" + qtile.core.name)\n\n self.backend = layout_backends[qtile.core.name](qtile)\n self.backend.set_keyboard(self.configured_keyboards[0], self.option)\n\n @expose_command()\n def next_keyboard(self):\n \"\"\"set the next layout in the list of configured keyboard layouts as\n new current layout in use\n\n If the current keyboard layout is not in the list, it will set as new\n layout the first one in the list.\n \"\"\"\n\n current_keyboard = self.backend.get_keyboard()\n if current_keyboard in self.configured_keyboards:\n # iterate the list circularly\n next_keyboard = self.configured_keyboards[\n (self.configured_keyboards.index(current_keyboard) + 1)\n % len(self.configured_keyboards)\n ]\n else:\n next_keyboard = self.configured_keyboards[0]\n\n self.backend.set_keyboard(next_keyboard, self.option)\n\n self.tick()\n\n def poll(self):\n keyboard = self.backend.get_keyboard()\n if keyboard in self.display_map.keys():\n return self.display_map[keyboard]\n return keyboard.upper()\n",
"path": "libqtile/widget/keyboardlayout.py"
}
] | diff --git a/libqtile/widget/keyboardlayout.py b/libqtile/widget/keyboardlayout.py
index 102db17dd9..057875b864 100644
--- a/libqtile/widget/keyboardlayout.py
+++ b/libqtile/widget/keyboardlayout.py
@@ -60,7 +60,7 @@ def set_keyboard(self, layout: str, options: str | None) -> None:
class _X11LayoutBackend(_BaseLayoutBackend):
- kb_layout_regex = re.compile(r"layout:\s+(?P<layout>\w+)")
+ kb_layout_regex = re.compile(r"layout:\s+(?P<layout>[\w-]+)")
kb_variant_regex = re.compile(r"variant:\s+(?P<variant>\w+)")
def get_keyboard(self) -> str:
|
mabel-dev__opteryx-1689 | 🪲 VIEWs load error should be in debug mode only
### Thank you for taking the time to report a problem with Opteryx.
_To help us to respond to your request we ask that you try to provide the below detail about the bug._
**Describe the bug** _A clear and specific description of what the bug is. What the error, incorrect or unexpected behaviour was._
**Expected behaviour** _A clear and concise description of what you expected to happen._
**Sample Code/Statement** _If you can, please submit the SQL statement or Python code snippet, or a representative example using the sample datasets._
~~~sql
~~~
**Additional context** _Add any other context about the problem here, for example what you have done to try to diagnose or workaround the problem._
| [
{
"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport orjson\n\nfrom opteryx.planner.logical_planner import LogicalPlan\n\n\ndef _load_views():\n try:\n with open(\"views.json\", \"rb\") as defs:\n return orjson.loads(defs.read())\n except Exception as err:\n print(f\"[OPTERYX] Unable to open views definition file. {err}\")\n return {}\n\n\nVIEWS = _load_views()\n\n\ndef is_view(view_name: str) -> bool:\n return view_name in VIEWS\n\n\ndef view_as_plan(view_name: str) -> LogicalPlan:\n from opteryx.planner.logical_planner import do_logical_planning_phase\n from opteryx.third_party import sqloxide\n from opteryx.utils.sql import clean_statement\n from opteryx.utils.sql import remove_comments\n\n operation = VIEWS.get(view_name)[\"statement\"]\n\n clean_sql = clean_statement(remove_comments(operation))\n parsed_statements = sqloxide.parse_sql(clean_sql, dialect=\"mysql\")\n logical_plan, _, _ = next(do_logical_planning_phase(parsed_statements))\n\n return logical_plan\n",
"path": "opteryx/planner/views/__init__.py"
}
] | [
{
"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport orjson\n\nfrom opteryx.planner.logical_planner import LogicalPlan\n\n\ndef _load_views():\n try:\n with open(\"views.json\", \"rb\") as defs:\n return orjson.loads(defs.read())\n except Exception as err:\n # DEBUG:: log (f\"[OPTERYX] Unable to open views definition file. {err}\")\n return {}\n\n\nVIEWS = _load_views()\n\n\ndef is_view(view_name: str) -> bool:\n return view_name in VIEWS\n\n\ndef view_as_plan(view_name: str) -> LogicalPlan:\n from opteryx.planner.logical_planner import do_logical_planning_phase\n from opteryx.third_party import sqloxide\n from opteryx.utils.sql import clean_statement\n from opteryx.utils.sql import remove_comments\n\n operation = VIEWS.get(view_name)[\"statement\"]\n\n clean_sql = clean_statement(remove_comments(operation))\n parsed_statements = sqloxide.parse_sql(clean_sql, dialect=\"mysql\")\n logical_plan, _, _ = next(do_logical_planning_phase(parsed_statements))\n\n return logical_plan\n",
"path": "opteryx/planner/views/__init__.py"
}
] | diff --git a/opteryx/planner/views/__init__.py b/opteryx/planner/views/__init__.py
index a5187a557..54c77f303 100644
--- a/opteryx/planner/views/__init__.py
+++ b/opteryx/planner/views/__init__.py
@@ -20,7 +20,7 @@ def _load_views():
with open("views.json", "rb") as defs:
return orjson.loads(defs.read())
except Exception as err:
- print(f"[OPTERYX] Unable to open views definition file. {err}")
+ # DEBUG:: log (f"[OPTERYX] Unable to open views definition file. {err}")
return {}
|
AlexsLemonade__refinebio-471 | Expose transformation option to API
### Context
https://github.com/AlexsLemonade/refinebio-frontend/issues/208
### Problem or idea
We want a dropdown to change the transformation option, but the API currently doesn't support changing that value.
### Solution or next step
I think transformation just needs to be added to the DataSetSerializer
### New Issue Checklist
- [x] The title is short and descriptive.
- [x] You have explained the context that led you to write this issue.
- [x] You have reported a problem or idea.
- [x] You have proposed a solution or next step.
| [
{
"content": "from rest_framework import serializers\nfrom rest_framework_hstore.fields import HStoreField\nfrom data_refinery_common.models import ProcessorJob, DownloaderJob, SurveyJob\nfrom data_refinery_common.models import (\n Experiment,\n ExperimentAnnotation,\n Sample,\n SampleAnnotation,\n Organism,\n OrganismIndex,\n OriginalFile,\n Processor,\n ComputationalResult,\n ComputationalResultAnnotation,\n ComputedFile,\n Dataset,\n APIToken\n)\n\n##\n# Organism\n##\n\nclass OrganismSerializer(serializers.ModelSerializer):\n class Meta:\n model = Organism\n fields = (\n 'name',\n 'taxonomy_id',\n )\n\n\n##\n# Processor\n##\n\nclass ProcessorSerializer(serializers.ModelSerializer):\n class Meta:\n model = Processor\n fields = (\n 'name',\n 'docker_image',\n 'environment'\n )\n\n\n##\n# Transcriptome Index\n##\n\nclass OrganismIndexSerializer(serializers.ModelSerializer):\n class Meta:\n model = OrganismIndex\n fields = (\n 's3_url',\n 'source_version',\n 'salmon_version',\n 'last_modified',\n )\n\n##\n# Results\n##\n\nclass ComputationalResultAnnotationSerializer(serializers.ModelSerializer):\n data = HStoreField()\n\n class Meta:\n model = ComputationalResultAnnotation\n fields = (\n 'id',\n 'data',\n 'is_ccdl',\n 'created_at',\n 'last_modified'\n )\n\nclass ComputedFileSerializer(serializers.ModelSerializer):\n class Meta:\n model = ComputedFile\n fields = (\n 'id',\n 'filename',\n 'size_in_bytes',\n 'sha1',\n 's3_bucket',\n 's3_key',\n 'created_at',\n 'last_modified'\n )\n\nclass ComputationalResultSerializer(serializers.ModelSerializer):\n annotations = ComputationalResultAnnotationSerializer(many=True, source='computationalresultannotation_set')\n files = ComputedFileSerializer(many=True, source='computedfile_set')\n\n class Meta:\n model = ComputationalResult\n fields = (\n 'id',\n 'commands',\n 'processor',\n 'is_ccdl',\n 'annotations',\n 'files',\n 'time_start',\n 'time_end',\n 'created_at',\n 'last_modified'\n )\n\n\n##\n# Samples\n##\n\nclass SampleSerializer(serializers.ModelSerializer):\n organism = OrganismSerializer(many=False)\n\n class Meta:\n model = Sample\n fields = (\n 'id',\n 'title',\n 'accession_code',\n 'source_database',\n 'organism',\n 'platform_accession_code',\n 'platform_name',\n 'pretty_platform',\n 'technology',\n 'manufacturer',\n 'is_downloaded',\n 'is_processed',\n 'created_at',\n 'last_modified',\n )\n\nclass SampleAnnotationSerializer(serializers.ModelSerializer):\n data = HStoreField()\n\n class Meta:\n model = SampleAnnotation\n fields = (\n 'data',\n 'is_ccdl',\n 'created_at',\n 'last_modified',\n )\n\nclass DetailedSampleSerializer(serializers.ModelSerializer):\n annotations = SampleAnnotationSerializer(many=True, source='sampleannotation_set')\n organism = OrganismSerializer(many=False)\n results = ComputationalResultSerializer(many=True)\n\n class Meta:\n model = Sample\n fields = (\n 'id',\n 'title',\n 'accession_code',\n 'source_database',\n 'organism',\n 'platform_accession_code',\n 'platform_name',\n 'pretty_platform',\n 'technology',\n 'manufacturer',\n 'annotations',\n 'results',\n 'pipelines',\n 'source_archive_url',\n 'has_raw',\n 'sex',\n 'age',\n 'specimen_part',\n 'genotype',\n 'disease',\n 'disease_stage',\n 'cell_line',\n 'treatment',\n 'race',\n 'subject',\n 'compound',\n 'time',\n 'is_downloaded',\n 'is_processed',\n 'created_at',\n 'last_modified',\n )\n\n##\n# Experiments\n##\n\nclass ExperimentSerializer(serializers.ModelSerializer):\n organisms = serializers.StringRelatedField(many=True)\n platforms = serializers.ReadOnlyField()\n samples = serializers.StringRelatedField(many=True)\n pretty_platforms = serializers.ReadOnlyField()\n\n class Meta:\n model = Experiment\n fields = (\n 'id',\n 'title',\n 'description',\n 'accession_code',\n 'source_database',\n 'source_url',\n 'platforms',\n 'pretty_platforms',\n 'has_publication',\n 'publication_title',\n 'publication_doi',\n 'publication_authors',\n 'pubmed_id',\n 'samples',\n 'organisms',\n 'submitter_institution',\n 'created_at',\n 'last_modified'\n )\n\nclass ExperimentAnnotationSerializer(serializers.ModelSerializer):\n data = HStoreField()\n\n class Meta:\n model = ExperimentAnnotation\n fields = (\n 'data',\n 'is_ccdl',\n 'created_at',\n 'last_modified',\n )\n\nclass DetailedExperimentSerializer(serializers.ModelSerializer):\n annotations = ExperimentAnnotationSerializer(many=True, source='experimentannotation_set')\n samples = SampleSerializer(many=True)\n organisms = OrganismSerializer(many=True)\n\n class Meta:\n model = Experiment\n fields = (\n 'id',\n 'title',\n 'description',\n 'annotations',\n 'samples',\n 'protocol_description',\n 'accession_code',\n 'source_database',\n 'source_url',\n 'has_publication',\n 'publication_title',\n 'publication_doi',\n 'publication_authors',\n 'pubmed_id',\n 'source_first_published',\n 'source_last_modified',\n 'submitter_institution',\n 'last_modified',\n 'created_at',\n 'organisms',\n )\n\nclass PlatformSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Sample\n fields = (\n 'platform_accession_code',\n 'platform_name',\n )\n\nclass InstitutionSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Experiment\n fields = (\n 'submitter_institution',\n )\n\n##\n# Files\n##\n\nclass OriginalFileSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = OriginalFile\n fields = (\n 'id',\n 'filename',\n 'size_in_bytes',\n 'sha1',\n 'source_url',\n 'source_filename',\n 'is_downloaded',\n 'is_archive',\n 'has_raw',\n 'is_downloaded',\n 'created_at',\n 'last_modified'\n )\n\n##\n# Jobs\n##\n\nclass SurveyJobSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = SurveyJob\n fields = (\n 'id',\n 'source_type',\n 'success',\n 'start_time',\n 'end_time',\n 'created_at',\n 'last_modified'\n )\n\nclass DownloaderJobSerializer(serializers.ModelSerializer):\n original_files = OriginalFileSerializer(many=True)\n\n class Meta:\n model = DownloaderJob\n fields = (\n 'id',\n 'downloader_task',\n 'num_retries',\n 'retried',\n 'worker_id',\n 'worker_version',\n 'failure_reason',\n 'success',\n 'original_files',\n 'start_time',\n 'end_time',\n 'created_at',\n 'last_modified'\n )\n\nclass ProcessorJobSerializer(serializers.ModelSerializer):\n original_files = OriginalFileSerializer(many=True)\n\n class Meta:\n model = ProcessorJob\n fields = (\n 'id',\n 'pipeline_applied',\n 'num_retries',\n 'retried',\n 'worker_id',\n 'worker_version',\n 'failure_reason',\n 'success',\n 'original_files',\n 'start_time',\n 'end_time',\n 'created_at',\n 'last_modified'\n )\n\n##\n# Datasets\n##\n\ndef validate_dataset(data):\n \"\"\" Basic dataset validation. Currently only checks formatting, not values. \"\"\"\n if data['data'] != None:\n if type(data['data']) != dict:\n raise serializers.ValidationError(\"`data` must be a dict of lists.\")\n\n for key, value in data['data'].items():\n if type(value) != list:\n raise serializers.ValidationError(\"`data` must be a dict of lists. Problem with `\" + str(key) + \"`\")\n\n try:\n if len(value) != len(set(value)):\n raise serializers.ValidationError(\"Duplicate values detected in \" + str(value))\n except Exception as e:\n raise serializers.ValidationError(\"Received bad dataset data: \" + str(e))\n\n else:\n raise serializers.ValidationError(\"`data` must be a dict of lists.\")\n\nclass CreateDatasetSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Dataset\n fields = (\n 'id',\n 'data',\n 'email_address'\n )\n\n def validate(self, data):\n \"\"\"\n Ensure this is something we want in our dataset.\n \"\"\"\n try:\n validate_dataset(data)\n except Exception:\n raise\n return data\n\nclass DatasetSerializer(serializers.ModelSerializer):\n\n start = serializers.NullBooleanField(required=False)\n\n class Meta:\n model = Dataset\n fields = (\n 'id',\n 'data',\n 'aggregate_by',\n 'is_processing',\n 'is_processed',\n 'is_available',\n 'email_address',\n 'expires_on',\n 's3_bucket',\n 's3_key',\n 'created_at',\n 'last_modified',\n 'start'\n )\n extra_kwargs = {\n 'id': {\n 'read_only': True,\n },\n 'is_processing': {\n 'read_only': True,\n },\n 'is_processed': {\n 'read_only': True,\n },\n 'is_available': {\n 'read_only': True,\n },\n 'expires_on': {\n 'read_only': True,\n },\n 's3_bucket': {\n 'read_only': True,\n },\n 's3_key': {\n 'read_only': True,\n },\n 'created_at': {\n 'read_only': True,\n },\n 'last_modified': {\n 'read_only': True,\n }\n }\n\n def validate(self, data):\n \"\"\"\n Ensure this is something we want in our dataset.\n \"\"\"\n try:\n validate_dataset(data)\n except Exception:\n raise\n return data\n\nclass APITokenSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = APIToken\n fields = (\n 'id',\n 'is_activated',\n 'terms_and_conditions'\n )\n extra_kwargs = {\n 'id': {\n 'read_only': True\n },\n 'is_activated': {\n 'read_only': False\n },\n 'terms_and_conditions': {\n 'read_only': True\n }\n }\n",
"path": "api/data_refinery_api/serializers.py"
}
] | [
{
"content": "from rest_framework import serializers\nfrom rest_framework_hstore.fields import HStoreField\nfrom data_refinery_common.models import ProcessorJob, DownloaderJob, SurveyJob\nfrom data_refinery_common.models import (\n Experiment,\n ExperimentAnnotation,\n Sample,\n SampleAnnotation,\n Organism,\n OrganismIndex,\n OriginalFile,\n Processor,\n ComputationalResult,\n ComputationalResultAnnotation,\n ComputedFile,\n Dataset,\n APIToken\n)\n\n##\n# Organism\n##\n\nclass OrganismSerializer(serializers.ModelSerializer):\n class Meta:\n model = Organism\n fields = (\n 'name',\n 'taxonomy_id',\n )\n\n\n##\n# Processor\n##\n\nclass ProcessorSerializer(serializers.ModelSerializer):\n class Meta:\n model = Processor\n fields = (\n 'name',\n 'docker_image',\n 'environment'\n )\n\n\n##\n# Transcriptome Index\n##\n\nclass OrganismIndexSerializer(serializers.ModelSerializer):\n class Meta:\n model = OrganismIndex\n fields = (\n 's3_url',\n 'source_version',\n 'salmon_version',\n 'last_modified',\n )\n\n##\n# Results\n##\n\nclass ComputationalResultAnnotationSerializer(serializers.ModelSerializer):\n data = HStoreField()\n\n class Meta:\n model = ComputationalResultAnnotation\n fields = (\n 'id',\n 'data',\n 'is_ccdl',\n 'created_at',\n 'last_modified'\n )\n\nclass ComputedFileSerializer(serializers.ModelSerializer):\n class Meta:\n model = ComputedFile\n fields = (\n 'id',\n 'filename',\n 'size_in_bytes',\n 'sha1',\n 's3_bucket',\n 's3_key',\n 'created_at',\n 'last_modified'\n )\n\nclass ComputationalResultSerializer(serializers.ModelSerializer):\n annotations = ComputationalResultAnnotationSerializer(many=True, source='computationalresultannotation_set')\n files = ComputedFileSerializer(many=True, source='computedfile_set')\n\n class Meta:\n model = ComputationalResult\n fields = (\n 'id',\n 'commands',\n 'processor',\n 'is_ccdl',\n 'annotations',\n 'files',\n 'time_start',\n 'time_end',\n 'created_at',\n 'last_modified'\n )\n\n\n##\n# Samples\n##\n\nclass SampleSerializer(serializers.ModelSerializer):\n organism = OrganismSerializer(many=False)\n\n class Meta:\n model = Sample\n fields = (\n 'id',\n 'title',\n 'accession_code',\n 'source_database',\n 'organism',\n 'platform_accession_code',\n 'platform_name',\n 'pretty_platform',\n 'technology',\n 'manufacturer',\n 'is_downloaded',\n 'is_processed',\n 'created_at',\n 'last_modified',\n )\n\nclass SampleAnnotationSerializer(serializers.ModelSerializer):\n data = HStoreField()\n\n class Meta:\n model = SampleAnnotation\n fields = (\n 'data',\n 'is_ccdl',\n 'created_at',\n 'last_modified',\n )\n\nclass DetailedSampleSerializer(serializers.ModelSerializer):\n annotations = SampleAnnotationSerializer(many=True, source='sampleannotation_set')\n organism = OrganismSerializer(many=False)\n results = ComputationalResultSerializer(many=True)\n\n class Meta:\n model = Sample\n fields = (\n 'id',\n 'title',\n 'accession_code',\n 'source_database',\n 'organism',\n 'platform_accession_code',\n 'platform_name',\n 'pretty_platform',\n 'technology',\n 'manufacturer',\n 'annotations',\n 'results',\n 'pipelines',\n 'source_archive_url',\n 'has_raw',\n 'sex',\n 'age',\n 'specimen_part',\n 'genotype',\n 'disease',\n 'disease_stage',\n 'cell_line',\n 'treatment',\n 'race',\n 'subject',\n 'compound',\n 'time',\n 'is_downloaded',\n 'is_processed',\n 'created_at',\n 'last_modified',\n )\n\n##\n# Experiments\n##\n\nclass ExperimentSerializer(serializers.ModelSerializer):\n organisms = serializers.StringRelatedField(many=True)\n platforms = serializers.ReadOnlyField()\n samples = serializers.StringRelatedField(many=True)\n pretty_platforms = serializers.ReadOnlyField()\n\n class Meta:\n model = Experiment\n fields = (\n 'id',\n 'title',\n 'description',\n 'accession_code',\n 'source_database',\n 'source_url',\n 'platforms',\n 'pretty_platforms',\n 'has_publication',\n 'publication_title',\n 'publication_doi',\n 'publication_authors',\n 'pubmed_id',\n 'samples',\n 'organisms',\n 'submitter_institution',\n 'created_at',\n 'last_modified'\n )\n\nclass ExperimentAnnotationSerializer(serializers.ModelSerializer):\n data = HStoreField()\n\n class Meta:\n model = ExperimentAnnotation\n fields = (\n 'data',\n 'is_ccdl',\n 'created_at',\n 'last_modified',\n )\n\nclass DetailedExperimentSerializer(serializers.ModelSerializer):\n annotations = ExperimentAnnotationSerializer(many=True, source='experimentannotation_set')\n samples = SampleSerializer(many=True)\n organisms = OrganismSerializer(many=True)\n\n class Meta:\n model = Experiment\n fields = (\n 'id',\n 'title',\n 'description',\n 'annotations',\n 'samples',\n 'protocol_description',\n 'accession_code',\n 'source_database',\n 'source_url',\n 'has_publication',\n 'publication_title',\n 'publication_doi',\n 'publication_authors',\n 'pubmed_id',\n 'source_first_published',\n 'source_last_modified',\n 'submitter_institution',\n 'last_modified',\n 'created_at',\n 'organisms',\n )\n\nclass PlatformSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Sample\n fields = (\n 'platform_accession_code',\n 'platform_name',\n )\n\nclass InstitutionSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Experiment\n fields = (\n 'submitter_institution',\n )\n\n##\n# Files\n##\n\nclass OriginalFileSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = OriginalFile\n fields = (\n 'id',\n 'filename',\n 'size_in_bytes',\n 'sha1',\n 'source_url',\n 'source_filename',\n 'is_downloaded',\n 'is_archive',\n 'has_raw',\n 'is_downloaded',\n 'created_at',\n 'last_modified'\n )\n\n##\n# Jobs\n##\n\nclass SurveyJobSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = SurveyJob\n fields = (\n 'id',\n 'source_type',\n 'success',\n 'start_time',\n 'end_time',\n 'created_at',\n 'last_modified'\n )\n\nclass DownloaderJobSerializer(serializers.ModelSerializer):\n original_files = OriginalFileSerializer(many=True)\n\n class Meta:\n model = DownloaderJob\n fields = (\n 'id',\n 'downloader_task',\n 'num_retries',\n 'retried',\n 'worker_id',\n 'worker_version',\n 'failure_reason',\n 'success',\n 'original_files',\n 'start_time',\n 'end_time',\n 'created_at',\n 'last_modified'\n )\n\nclass ProcessorJobSerializer(serializers.ModelSerializer):\n original_files = OriginalFileSerializer(many=True)\n\n class Meta:\n model = ProcessorJob\n fields = (\n 'id',\n 'pipeline_applied',\n 'num_retries',\n 'retried',\n 'worker_id',\n 'worker_version',\n 'failure_reason',\n 'success',\n 'original_files',\n 'start_time',\n 'end_time',\n 'created_at',\n 'last_modified'\n )\n\n##\n# Datasets\n##\n\ndef validate_dataset(data):\n \"\"\" Basic dataset validation. Currently only checks formatting, not values. \"\"\"\n if data['data'] != None:\n if type(data['data']) != dict:\n raise serializers.ValidationError(\"`data` must be a dict of lists.\")\n\n for key, value in data['data'].items():\n if type(value) != list:\n raise serializers.ValidationError(\"`data` must be a dict of lists. Problem with `\" + str(key) + \"`\")\n\n try:\n if len(value) != len(set(value)):\n raise serializers.ValidationError(\"Duplicate values detected in \" + str(value))\n except Exception as e:\n raise serializers.ValidationError(\"Received bad dataset data: \" + str(e))\n\n else:\n raise serializers.ValidationError(\"`data` must be a dict of lists.\")\n\nclass CreateDatasetSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Dataset\n fields = (\n 'id',\n 'data',\n 'email_address'\n )\n\n def validate(self, data):\n \"\"\"\n Ensure this is something we want in our dataset.\n \"\"\"\n try:\n validate_dataset(data)\n except Exception:\n raise\n return data\n\nclass DatasetSerializer(serializers.ModelSerializer):\n\n start = serializers.NullBooleanField(required=False)\n\n class Meta:\n model = Dataset\n fields = (\n 'id',\n 'data',\n 'aggregate_by',\n 'scale_by',\n 'is_processing',\n 'is_processed',\n 'is_available',\n 'email_address',\n 'expires_on',\n 's3_bucket',\n 's3_key',\n 'created_at',\n 'last_modified',\n 'start'\n )\n extra_kwargs = {\n 'id': {\n 'read_only': True,\n },\n 'is_processing': {\n 'read_only': True,\n },\n 'is_processed': {\n 'read_only': True,\n },\n 'is_available': {\n 'read_only': True,\n },\n 'expires_on': {\n 'read_only': True,\n },\n 's3_bucket': {\n 'read_only': True,\n },\n 's3_key': {\n 'read_only': True,\n },\n 'created_at': {\n 'read_only': True,\n },\n 'last_modified': {\n 'read_only': True,\n }\n }\n\n def validate(self, data):\n \"\"\"\n Ensure this is something we want in our dataset.\n \"\"\"\n try:\n validate_dataset(data)\n except Exception:\n raise\n return data\n\nclass APITokenSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = APIToken\n fields = (\n 'id',\n 'is_activated',\n 'terms_and_conditions'\n )\n extra_kwargs = {\n 'id': {\n 'read_only': True\n },\n 'is_activated': {\n 'read_only': False\n },\n 'terms_and_conditions': {\n 'read_only': True\n }\n }\n",
"path": "api/data_refinery_api/serializers.py"
}
] | diff --git a/api/data_refinery_api/serializers.py b/api/data_refinery_api/serializers.py
index be703bfcc..88e9a34ce 100644
--- a/api/data_refinery_api/serializers.py
+++ b/api/data_refinery_api/serializers.py
@@ -416,6 +416,7 @@ class Meta:
'id',
'data',
'aggregate_by',
+ 'scale_by',
'is_processing',
'is_processed',
'is_available',
|
internetarchive__openlibrary-6157 | Include full url in "Report a problem" emails
### Describe the problem that you'd like solved
Currently the "Report a problem" form sends the user's URL, but doesn't include any parameters. So staff only sees e.g. `/search` instead of `/search?q=harry+potter`. This makes it harder to debug issues.
### Proposal & Constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
<!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? -->
### Additional context
Currently, the `Report a Problem` link in the footer includes the path, but not the parameters!
Related code:
- The link in the footer: https://github.com/internetarchive/openlibrary/blob/8a1e17358c107e665bc653b689c3f50fb329f2c2/openlibrary/templates/lib/nav_foot.html#L44
- The spot this parameter is passed to the support form: https://github.com/internetarchive/openlibrary/blob/8a1e17358c107e665bc653b689c3f50fb329f2c2/openlibrary/templates/support.html#L54-L56
- The form's post endpoint: https://github.com/internetarchive/openlibrary/blob/39962157c12465d050e43458444aba9a59661c8d/openlibrary/plugins/openlibrary/support.py#L65
### Stakeholders
@seabelis
| [
{
"content": "from typing import List, Union, Tuple, Any\n\nimport web\nimport json\nimport babel\nimport babel.core\nimport babel.dates\nfrom collections import defaultdict\nimport re\nimport random\nimport xml.etree.ElementTree as etree\nimport datetime\nimport logging\nfrom html.parser import HTMLParser\nfrom typing import Optional\n\nimport requests\n\nimport six\nfrom six.moves import urllib\nfrom six.moves.collections_abc import MutableMapping\nfrom six.moves.urllib.parse import (\n parse_qs,\n urlencode as parse_urlencode,\n urlparse,\n urlunparse,\n)\n\nfrom infogami import config\nfrom infogami.utils import view, delegate, stats\nfrom infogami.utils.view import render, get_template, public, query_param\nfrom infogami.utils.macro import macro\nfrom infogami.utils.context import context\nfrom infogami.infobase.client import Thing, Changeset, storify\n\nfrom openlibrary.core.helpers import commify, parse_datetime, truncate\nfrom openlibrary.core.middleware import GZipMiddleware\nfrom openlibrary.core import cache, ab\n\n\nclass MultiDict(MutableMapping):\n \"\"\"Ordered Dictionary that can store multiple values.\n\n >>> d = MultiDict()\n >>> d['x'] = 1\n >>> d['x'] = 2\n >>> d['y'] = 3\n >>> d['x']\n 2\n >>> d['y']\n 3\n >>> d['z']\n Traceback (most recent call last):\n ...\n KeyError: 'z'\n >>> list(d)\n ['x', 'x', 'y']\n >>> list(d.items())\n [('x', 1), ('x', 2), ('y', 3)]\n >>> list(d.multi_items())\n [('x', [1, 2]), ('y', [3])]\n \"\"\"\n\n def __init__(self, items=(), **kw):\n self._items = []\n\n for k, v in items:\n self[k] = v\n self.update(kw)\n\n def __getitem__(self, key):\n values = self.getall(key)\n if values:\n return values[-1]\n else:\n raise KeyError(key)\n\n def __setitem__(self, key, value):\n self._items.append((key, value))\n\n def __delitem__(self, key):\n self._items = [(k, v) for k, v in self._items if k != key]\n\n def __iter__(self):\n yield from self.keys()\n\n def __len__(self):\n return len(list(self.keys()))\n\n def getall(self, key):\n return [v for k, v in self._items if k == key]\n\n def keys(self):\n return [k for k, v in self._items]\n\n def values(self):\n return [v for k, v in self._items]\n\n def items(self):\n return self._items[:]\n\n def multi_items(self):\n \"\"\"Returns items as tuple of key and a list of values.\"\"\"\n items = []\n d = {}\n\n for k, v in self._items:\n if k not in d:\n d[k] = []\n items.append((k, d[k]))\n d[k].append(v)\n return items\n\n\n@macro\n@public\ndef render_template(name, *a, **kw):\n if \".\" in name:\n name = name.rsplit(\".\", 1)[0]\n return render[name](*a, **kw)\n\n\ndef kebab_case(upper_camel_case):\n \"\"\"\n :param str upper_camel_case: Text in upper camel case (e.g. \"HelloWorld\")\n :return: text in kebab case (e.g. 'hello-world')\n\n >>> kebab_case('HelloWorld')\n 'hello-world'\n >>> kebab_case(\"MergeUI\")\n 'merge-u-i'\n \"\"\"\n parts = re.findall(r'[A-Z][^A-Z]*', upper_camel_case)\n return '-'.join(parts).lower()\n\n\n@public\ndef render_component(name, attrs=None, json_encode=True):\n \"\"\"\n :param str name: Name of the component (excluding extension)\n :param dict attrs: attributes to add to the component element\n \"\"\"\n from openlibrary.plugins.upstream.code import static_url\n\n attrs = attrs or {}\n attrs_str = ''\n for (key, val) in attrs.items():\n if json_encode and isinstance(val, dict) or isinstance(val, list):\n val = json.dumps(val)\n # On the Vue side use decodeURIComponent to decode\n val = urllib.parse.quote(val)\n attrs_str += f' {key}=\"{val}\"'\n html = ''\n included = web.ctx.setdefault(\"included-components\", [])\n\n if len(included) == 0:\n # Need to include Vue\n html += '<script src=\"%s\"></script>' % static_url('build/vue.js')\n\n if name not in included:\n url = static_url('build/components/production/ol-%s.min.js' % name)\n html += '<script src=\"%s\"></script>' % url\n included.append(name)\n\n html += '<ol-{name} {attrs}></ol-{name}>'.format(\n name=kebab_case(name),\n attrs=attrs_str,\n )\n return html\n\n\n@public\ndef get_error(name, *args):\n \"\"\"Return error with the given name from errors.tmpl template.\"\"\"\n return get_message_from_template(\"errors\", name, args)\n\n\n@public\ndef get_message(name, *args):\n \"\"\"Return message with given name from messages.tmpl template\"\"\"\n return get_message_from_template(\"messages\", name, args)\n\n\ndef get_message_from_template(template_name, name, args):\n d = render_template(template_name).get(\"messages\", {})\n msg = d.get(name) or name.lower().replace(\"_\", \" \")\n\n if msg and args:\n return msg % args\n else:\n return msg\n\n\n@public\ndef list_recent_pages(path, limit=100, offset=0):\n \"\"\"Lists all pages with name path/* in the order of last_modified.\"\"\"\n q = {}\n\n q['key~'] = path + '/*'\n # don't show /type/delete and /type/redirect\n q['a:type!='] = '/type/delete'\n q['b:type!='] = '/type/redirect'\n\n q['sort'] = 'key'\n q['limit'] = limit\n q['offset'] = offset\n q['sort'] = '-last_modified'\n # queries are very slow with != conditions\n # q['type'] != '/type/delete'\n return web.ctx.site.get_many(web.ctx.site.things(q))\n\n\n@public\ndef json_encode(d):\n return json.dumps(d)\n\n\ndef unflatten(d, separator=\"--\"):\n \"\"\"Convert flattened data into nested form.\n\n >>> unflatten({\"a\": 1, \"b--x\": 2, \"b--y\": 3, \"c--0\": 4, \"c--1\": 5})\n {'a': 1, 'c': [4, 5], 'b': {'y': 3, 'x': 2}}\n >>> unflatten({\"a--0--x\": 1, \"a--0--y\": 2, \"a--1--x\": 3, \"a--1--y\": 4})\n {'a': [{'x': 1, 'y': 2}, {'x': 3, 'y': 4}]}\n\n \"\"\"\n\n def isint(k):\n try:\n int(k)\n return True\n except ValueError:\n return False\n\n def setvalue(data, k, v):\n if '--' in k:\n k, k2 = k.split(separator, 1)\n setvalue(data.setdefault(k, {}), k2, v)\n else:\n data[k] = v\n\n def makelist(d):\n \"\"\"Convert d into a list if all the keys of d are integers.\"\"\"\n if isinstance(d, dict):\n if all(isint(k) for k in d):\n return [makelist(d[k]) for k in sorted(d, key=int)]\n else:\n return web.storage((k, makelist(v)) for k, v in d.items())\n else:\n return d\n\n d2 = {}\n for k, v in d.items():\n setvalue(d2, k, v)\n return makelist(d2)\n\n\ndef fuzzy_find(value, options, stopwords=None):\n stopwords = stopwords or []\n \"\"\"Try find the option nearest to the value.\n\n >>> fuzzy_find(\"O'Reilly\", [\"O'Reilly Inc\", \"Addison-Wesley\"])\n \"O'Reilly Inc\"\n \"\"\"\n if not options:\n return value\n\n rx = web.re_compile(r\"[-_\\.&, ]+\")\n\n # build word frequency\n d = defaultdict(list)\n for option in options:\n for t in rx.split(option):\n d[t].append(option)\n\n # find score for each option\n score = defaultdict(lambda: 0)\n for t in rx.split(value):\n if t.lower() in stopwords:\n continue\n for option in d[t]:\n score[option] += 1\n\n # take the option with maximum score\n return max(options, key=score.__getitem__)\n\n\n@public\ndef radio_input(checked=False, **params):\n params['type'] = 'radio'\n if checked:\n params['checked'] = \"checked\"\n return \"<input %s />\" % \" \".join(\n [f'{k}=\"{web.websafe(v)}\"' for k, v in params.items()]\n )\n\n\n@public\ndef radio_list(name, args, value):\n html = []\n for arg in args:\n if isinstance(arg, tuple):\n arg, label = arg\n else:\n label = arg\n html.append(radio_input())\n\n\ndef get_coverstore_url() -> str:\n return config.get('coverstore_url', 'https://covers.openlibrary.org').rstrip('/')\n\n\n@public\ndef get_coverstore_public_url() -> str:\n return config.get('coverstore_public_url', get_coverstore_url()).rstrip('/')\n\n\ndef _get_changes_v1_raw(query, revision=None):\n \"\"\"Returns the raw versions response.\n\n Revision is taken as argument to make sure a new cache entry is used when a new revision of the page is created.\n \"\"\"\n if 'env' not in web.ctx:\n delegate.fakeload()\n\n versions = web.ctx.site.versions(query)\n\n for v in versions:\n v.created = v.created.isoformat()\n v.author = v.author and v.author.key\n\n # XXX-Anand: hack to avoid too big data to be stored in memcache.\n # v.changes is not used and it contrinutes to memcache bloat in a big way.\n v.changes = '[]'\n\n return versions\n\n\ndef get_changes_v1(query, revision=None):\n # uses the cached function _get_changes_v1_raw to get the raw data\n # and processes to before returning.\n def process(v):\n v = web.storage(v)\n v.created = parse_datetime(v.created)\n v.author = v.author and web.ctx.site.get(v.author, lazy=True)\n return v\n\n return [process(v) for v in _get_changes_v1_raw(query, revision)]\n\n\ndef _get_changes_v2_raw(query, revision=None):\n \"\"\"Returns the raw recentchanges response.\n\n Revision is taken as argument to make sure a new cache entry is used when a new revision of the page is created.\n \"\"\"\n if 'env' not in web.ctx:\n delegate.fakeload()\n\n changes = web.ctx.site.recentchanges(query)\n return [c.dict() for c in changes]\n\n\n# XXX-Anand: disabled temporarily to avoid too much memcache usage.\n# _get_changes_v2_raw = cache.memcache_memoize(_get_changes_v2_raw, key_prefix=\"upstream._get_changes_v2_raw\", timeout=10*60)\n\n\ndef get_changes_v2(query, revision=None):\n page = web.ctx.site.get(query['key'])\n\n def first(seq, default=None):\n try:\n return next(seq)\n except StopIteration:\n return default\n\n def process_change(change):\n change = Changeset.create(web.ctx.site, storify(change))\n change.thing = page\n change.key = page.key\n change.revision = first(c.revision for c in change.changes if c.key == page.key)\n change.created = change.timestamp\n\n change.get = change.__dict__.get\n change.get_comment = lambda: get_comment(change)\n change.machine_comment = change.data.get(\"machine_comment\")\n\n return change\n\n def get_comment(change):\n t = get_template(\"recentchanges/\" + change.kind + \"/comment\") or get_template(\n \"recentchanges/default/comment\"\n )\n return t(change, page)\n\n query['key'] = page.key\n changes = _get_changes_v2_raw(query, revision=page.revision)\n return [process_change(c) for c in changes]\n\n\ndef get_changes(query, revision=None):\n return get_changes_v2(query, revision=revision)\n\n\n@public\ndef get_history(page):\n h = web.storage(\n revision=page.revision, lastest_revision=page.revision, created=page.created\n )\n if h.revision < 5:\n h.recent = get_changes({\"key\": page.key, \"limit\": 5}, revision=page.revision)\n h.initial = h.recent[-1:]\n h.recent = h.recent[:-1]\n else:\n h.initial = get_changes(\n {\"key\": page.key, \"limit\": 1, \"offset\": h.revision - 1},\n revision=page.revision,\n )\n h.recent = get_changes({\"key\": page.key, \"limit\": 4}, revision=page.revision)\n\n return h\n\n\n@public\ndef get_version(key, revision):\n try:\n return web.ctx.site.versions({\"key\": key, \"revision\": revision, \"limit\": 1})[0]\n except IndexError:\n return None\n\n\n@public\ndef get_recent_author(doc):\n versions = get_changes_v1(\n {'key': doc.key, 'limit': 1, \"offset\": 0}, revision=doc.revision\n )\n if versions:\n return versions[0].author\n\n\n@public\ndef get_recent_accounts(limit=5, offset=0):\n versions = web.ctx.site.versions(\n {'type': '/type/user', 'revision': 1, 'limit': limit, 'offset': offset}\n )\n return web.ctx.site.get_many([v.key for v in versions])\n\n\ndef get_locale():\n try:\n return babel.Locale(web.ctx.get(\"lang\") or \"en\")\n except babel.core.UnknownLocaleError:\n return babel.Locale(\"en\")\n\n\n@public\ndef process_version(v):\n \"\"\"Looks at the version and adds machine_comment required for showing \"View MARC\" link.\"\"\"\n comments = [\n \"found a matching marc record\",\n \"add publisher and source\",\n ]\n if v.key.startswith('/books/') and not v.get('machine_comment'):\n thing = v.get('thing') or web.ctx.site.get(v.key, v.revision)\n if (\n thing.source_records\n and v.revision == 1\n or (v.comment and v.comment.lower() in comments)\n ):\n marc = thing.source_records[-1]\n if marc.startswith('marc:'):\n v.machine_comment = marc[len(\"marc:\") :]\n else:\n v.machine_comment = marc\n return v\n\n\n@public\ndef is_thing(t):\n return isinstance(t, Thing)\n\n\n@public\ndef putctx(key, value):\n \"\"\"Save a value in the context.\"\"\"\n context[key] = value\n return \"\"\n\n\nclass Metatag:\n def __init__(self, tag=\"meta\", **attrs):\n self.tag = tag\n self.attrs = attrs\n\n def __str__(self):\n attrs = ' '.join(f'{k}=\"{websafe(v)}\"' for k, v in self.attrs.items())\n return f'<{self.tag} {attrs} />'\n\n def __repr__(self):\n return 'Metatag(%s)' % str(self)\n\n\n@public\ndef add_metatag(tag=\"meta\", **attrs):\n context.setdefault('metatags', [])\n context.metatags.append(Metatag(tag, **attrs))\n\n\n@public\ndef url_quote(text):\n if isinstance(text, str):\n text = text.encode('utf8')\n return urllib.parse.quote_plus(text)\n\n\n@public\ndef urlencode(dict_or_list_of_tuples: Union[dict, list[tuple[str, Any]]]) -> str:\n \"\"\"\n You probably want to use this, if you're looking to urlencode parameters. This will\n encode things to utf8 that would otherwise cause urlencode to error.\n \"\"\"\n from six.moves.urllib.parse import urlencode as og_urlencode\n\n tuples = dict_or_list_of_tuples\n if isinstance(dict_or_list_of_tuples, dict):\n tuples = dict_or_list_of_tuples.items()\n params = [(k, v.encode('utf-8') if isinstance(v, str) else v) for (k, v) in tuples]\n return og_urlencode(params)\n\n\n@public\ndef entity_decode(text):\n try:\n return six.moves.html_parser.unescape(text)\n except AttributeError:\n return six.moves.html_parser.HTMLParser().unescape(text)\n\n\n@public\ndef set_share_links(url='#', title='', view_context=None):\n \"\"\"\n Constructs list share links for social platforms and assigns to view context attribute\n\n Args (all required):\n url (str or unicode) - complete canonical url to page being shared\n title (str or unicode) - title of page being shared\n view_context (object that has/can-have share_links attribute)\n \"\"\"\n encoded_url = url_quote(url)\n text = url_quote(\"Check this out: \" + entity_decode(title))\n links = [\n {\n 'text': 'Facebook',\n 'url': 'https://www.facebook.com/sharer/sharer.php?u=' + encoded_url,\n },\n {\n 'text': 'Twitter',\n 'url': f'https://twitter.com/intent/tweet?url={encoded_url}&via=openlibrary&text={text}',\n },\n {\n 'text': 'Pinterest',\n 'url': f'https://pinterest.com/pin/create/link/?url={encoded_url}&description={text}',\n },\n ]\n view_context.share_links = links\n\n\ndef pad(seq, size, e=None):\n \"\"\"\n >>> pad([1, 2], 4, 0)\n [1, 2, 0, 0]\n \"\"\"\n seq = seq[:]\n while len(seq) < size:\n seq.append(e)\n return seq\n\n\ndef parse_toc_row(line):\n \"\"\"Parse one row of table of contents.\n\n >>> def f(text):\n ... d = parse_toc_row(text)\n ... return (d['level'], d['label'], d['title'], d['pagenum'])\n ...\n >>> f(\"* chapter 1 | Welcome to the real world! | 2\")\n (1, 'chapter 1', 'Welcome to the real world!', '2')\n >>> f(\"Welcome to the real world!\")\n (0, '', 'Welcome to the real world!', '')\n >>> f(\"** | Welcome to the real world! | 2\")\n (2, '', 'Welcome to the real world!', '2')\n >>> f(\"|Preface | 1\")\n (0, '', 'Preface', '1')\n >>> f(\"1.1 | Apple\")\n (0, '1.1', 'Apple', '')\n \"\"\"\n RE_LEVEL = web.re_compile(r\"(\\**)(.*)\")\n level, text = RE_LEVEL.match(line.strip()).groups()\n\n if \"|\" in text:\n tokens = text.split(\"|\", 2)\n label, title, page = pad(tokens, 3, '')\n else:\n title = text\n label = page = \"\"\n\n return web.storage(\n level=len(level), label=label.strip(), title=title.strip(), pagenum=page.strip()\n )\n\n\ndef parse_toc(text):\n \"\"\"Parses each line of toc\"\"\"\n if text is None:\n return []\n return [parse_toc_row(line) for line in text.splitlines() if line.strip(\" |\")]\n\n\n_languages = None\n\n\n@public\ndef get_languages():\n global _languages\n if _languages is None:\n keys = web.ctx.site.things(\n {\"type\": \"/type/language\", \"key~\": \"/languages/*\", \"limit\": 1000}\n )\n _languages = sorted(\n (\n web.storage(name=d.name, code=d.code, key=d.key)\n for d in web.ctx.site.get_many(keys)\n ),\n key=lambda d: d.name.lower(),\n )\n return _languages\n\n\n@public\ndef get_author_config():\n return _get_author_config()\n\n\[email protected]\ndef _get_author_config():\n \"\"\"Returns the author config.\n\n The results are cached on the first invocation.\n Any changes to /config/author page require restarting the app.\n\n \"\"\"\n thing = web.ctx.site.get('/config/author')\n if hasattr(thing, \"identifiers\"):\n identifiers = [web.storage(t.dict()) for t in thing.identifiers if 'name' in t]\n else:\n identifiers = {}\n return web.storage(identifiers=identifiers)\n\n\n@public\ndef get_edition_config():\n return _get_edition_config()\n\n\[email protected]\ndef _get_edition_config():\n \"\"\"Returns the edition config.\n\n The results are cached on the first invocation. Any changes to /config/edition page require restarting the app.\n\n This is is cached because fetching and creating the Thing object was taking about 20ms of time for each book request.\n \"\"\"\n thing = web.ctx.site.get('/config/edition')\n classifications = [\n web.storage(t.dict()) for t in thing.classifications if 'name' in t\n ]\n identifiers = [web.storage(t.dict()) for t in thing.identifiers if 'name' in t]\n roles = thing.roles\n return web.storage(\n classifications=classifications, identifiers=identifiers, roles=roles\n )\n\n\nfrom openlibrary.core.olmarkdown import OLMarkdown\n\n\ndef get_markdown(text, safe_mode=False):\n md = OLMarkdown(source=text, safe_mode=safe_mode)\n view._register_mdx_extensions(md)\n md.postprocessors += view.wiki_processors\n return md\n\n\nclass HTML(str):\n def __init__(self, html):\n str.__init__(self, web.safeunicode(html))\n\n def __repr__(self):\n return \"<html: %s>\" % str.__repr__(self)\n\n\n_websafe = web.websafe\n\n\ndef websafe(text):\n if isinstance(text, HTML):\n return text\n elif isinstance(text, web.template.TemplateResult):\n return web.safestr(text)\n else:\n return _websafe(text)\n\n\nfrom openlibrary.plugins.upstream import adapter\nfrom openlibrary.utils.olcompress import OLCompressor\nfrom openlibrary.utils import olmemcache\nimport memcache\n\n\nclass UpstreamMemcacheClient:\n \"\"\"Wrapper to memcache Client to handle upstream specific conversion and OL specific compression.\n Compatible with memcache Client API.\n \"\"\"\n\n def __init__(self, servers):\n self._client = memcache.Client(servers)\n compressor = OLCompressor()\n self.compress = compressor.compress\n\n def decompress(*args, **kw):\n d = json.loads(compressor.decompress(*args, **kw))\n return json.dumps(adapter.unconvert_dict(d))\n\n self.decompress = decompress\n\n def get(self, key):\n key = adapter.convert_key(key)\n if key is None:\n return None\n\n try:\n value = self._client.get(web.safestr(key))\n except memcache.Client.MemcachedKeyError:\n return None\n\n return value and self.decompress(value)\n\n def get_multi(self, keys):\n keys = [adapter.convert_key(k) for k in keys]\n keys = [web.safestr(k) for k in keys]\n\n d = self._client.get_multi(keys)\n return {\n web.safeunicode(adapter.unconvert_key(k)): self.decompress(v)\n for k, v in d.items()\n }\n\n\nif config.get('upstream_memcache_servers'):\n olmemcache.Client = UpstreamMemcacheClient\n # set config.memcache_servers only after olmemcache.Client is updated\n config.memcache_servers = config.upstream_memcache_servers\n\n\ndef _get_recent_changes():\n site = web.ctx.get('site') or delegate.create_site()\n web.ctx.setdefault(\"ip\", \"127.0.0.1\")\n\n # The recentchanges can have multiple revisions for a document if it has been modified more than once.\n # Take only the most recent revision in that case.\n visited = set()\n\n def is_visited(key):\n if key in visited:\n return True\n else:\n visited.add(key)\n return False\n\n # ignore reverts\n re_revert = web.re_compile(r\"reverted to revision \\d+\")\n\n def is_revert(r):\n return re_revert.match(r.comment or \"\")\n\n # take the 100 recent changes, filter them and take the first 50\n q = {\"bot\": False, \"limit\": 100}\n result = site.versions(q)\n result = [r for r in result if not is_visited(r.key) and not is_revert(r)]\n result = result[:50]\n\n def process_thing(thing):\n t = web.storage()\n for k in [\"key\", \"title\", \"name\", \"displayname\"]:\n t[k] = thing[k]\n t['type'] = web.storage(key=thing.type.key)\n return t\n\n for r in result:\n r.author = r.author and process_thing(r.author)\n r.thing = process_thing(site.get(r.key, r.revision))\n\n return result\n\n\ndef _get_recent_changes2():\n \"\"\"New recent changes for around the library.\n\n This function returns the message to display for each change.\n The message is get by calling `recentchanges/$kind/message.html` template.\n\n If `$var ignore=True` is set by the message template, the change is ignored.\n \"\"\"\n if 'env' not in web.ctx:\n delegate.fakeload()\n\n q = {\"bot\": False, \"limit\": 100}\n changes = web.ctx.site.recentchanges(q)\n\n def is_ignored(c):\n return (\n # c.kind=='update' allow us to ignore update recent changes on people\n c.kind == 'update'\n or\n # ignore change if author has been deleted (e.g. spammer)\n (c.author and c.author.type.key == '/type/delete')\n )\n\n def render(c):\n t = get_template(\"recentchanges/\" + c.kind + \"/message\") or get_template(\n \"recentchanges/default/message\"\n )\n return t(c)\n\n messages = [render(c) for c in changes if not is_ignored(c)]\n messages = [m for m in messages if str(m.get(\"ignore\", \"false\")).lower() != \"true\"]\n return messages\n\n\n_get_recent_changes = web.memoize(_get_recent_changes, expires=5 * 60, background=True)\n_get_recent_changes2 = web.memoize(\n _get_recent_changes2, expires=5 * 60, background=True\n)\n\n\n@public\ndef get_random_recent_changes(n):\n if \"recentchanges_v2\" in web.ctx.get(\"features\", []):\n changes = _get_recent_changes2()\n else:\n changes = _get_recent_changes()\n\n _changes = random.sample(changes, n) if len(changes) > n else changes\n for i, change in enumerate(_changes):\n _changes[i]['__body__'] = (\n _changes[i]['__body__'].replace('<script>', '').replace('</script>', '')\n )\n return _changes\n\n\ndef _get_blog_feeds():\n url = \"https://blog.openlibrary.org/feed/\"\n try:\n stats.begin(\"get_blog_feeds\", url=url)\n tree = etree.fromstring(requests.get(url).text)\n except Exception:\n # Handle error gracefully.\n logging.getLogger(\"openlibrary\").error(\n \"Failed to fetch blog feeds\", exc_info=True\n )\n return []\n finally:\n stats.end()\n\n def parse_item(item):\n pubdate = datetime.datetime.strptime(\n item.find(\"pubDate\").text, '%a, %d %b %Y %H:%M:%S +0000'\n ).isoformat()\n return dict(\n title=item.find(\"title\").text, link=item.find(\"link\").text, pubdate=pubdate\n )\n\n return [parse_item(item) for item in tree.findall(\".//item\")]\n\n\n_get_blog_feeds = cache.memcache_memoize(\n _get_blog_feeds, key_prefix=\"upstream.get_blog_feeds\", timeout=5 * 60\n)\n\n\ndef get_donation_include(include):\n web_input = web.input()\n\n # The following allows archive.org staff to test banners without\n # needing to reload openlibrary services:\n dev_host = web_input.pop(\"dev_host\", \"\") # e.g. `www-user`\n if dev_host and re.match('^[a-zA-Z0-9-.]+$', dev_host):\n script_src = \"https://%s.archive.org/includes/donate.js\" % dev_host\n else:\n script_src = \"/cdn/archive.org/donate.js\"\n\n if 'ymd' in web_input:\n script_src += '?ymd=' + web_input.ymd\n\n html = (\n \"\"\"\n <div id=\"donato\"></div>\n <script src=\"%s\" data-platform=\"ol\"></script>\n \"\"\"\n % script_src\n )\n return html\n\n\n# get_donation_include = cache.memcache_memoize(get_donation_include, key_prefix=\"upstream.get_donation_include\", timeout=60)\n\n\n@public\ndef item_image(image_path, default=None):\n if image_path is None:\n return default\n if image_path.startswith('https:'):\n return image_path\n return \"https:\" + image_path\n\n\n@public\ndef get_blog_feeds():\n def process(post):\n post = web.storage(post)\n post.pubdate = parse_datetime(post.pubdate)\n return post\n\n return [process(post) for post in _get_blog_feeds()]\n\n\nclass Request:\n path = property(lambda self: web.ctx.path)\n home = property(lambda self: web.ctx.home)\n domain = property(lambda self: web.ctx.host)\n\n @property\n def canonical_url(self):\n \"\"\"Returns the https:// version of the URL.\n\n Used for adding <meta rel=\"canonical\" ..> tag in all web pages.\n Required to make OL retain the page rank after https migration.\n \"\"\"\n readable_path = web.ctx.get('readable_path', web.ctx.path) or ''\n query = web.ctx.query or ''\n host = web.ctx.host or ''\n url = host + readable_path + query\n if url:\n url = \"https://\" + url\n parsed_url = urlparse(url)\n\n parsed_query = parse_qs(parsed_url.query)\n queries_to_exclude = ['sort', 'mode', 'v', 'type', 'debug']\n\n canonical_query = {\n q: v for q, v in parsed_query.items() if q not in queries_to_exclude\n }\n query = parse_urlencode(canonical_query, doseq=True)\n parsed_url = parsed_url._replace(query=query)\n\n url = urlunparse(parsed_url)\n\n return url\n return ''\n\n\n@public\ndef render_once(key):\n rendered = web.ctx.setdefault('render_once', {})\n if key in rendered:\n return False\n else:\n rendered[key] = True\n return True\n\n\n@public\ndef today():\n return datetime.datetime.today()\n\n\nclass HTMLTagRemover(HTMLParser):\n def __init__(self):\n super().__init__()\n self.data = []\n\n def handle_data(self, data):\n self.data.append(data.strip())\n\n def handle_endtag(self, tag):\n self.data.append('\\n' if tag in ('p', 'li') else ' ')\n\n\n@public\ndef reformat_html(html_str: str, max_length: Optional[int] = None) -> str:\n \"\"\"\n Reformats an HTML string, removing all opening and closing tags.\n Adds a line break element between each set of text content.\n Optionally truncates contents that exceeds the given max length.\n\n returns: A reformatted HTML string\n \"\"\"\n parser = HTMLTagRemover()\n # Must have a root node, otherwise the parser will fail\n parser.feed(f'<div>{html_str}</div>')\n content = [web.websafe(s) for s in parser.data if s]\n\n if max_length:\n return truncate(''.join(content), max_length).strip().replace('\\n', '<br>')\n else:\n return ''.join(content).strip().replace('\\n', '<br>')\n\n\ndef setup():\n \"\"\"Do required initialization\"\"\"\n # monkey-patch get_markdown to use OL Flavored Markdown\n view.get_markdown = get_markdown\n\n # Provide alternate implementations for websafe and commify\n web.websafe = websafe\n web.template.Template.FILTERS['.html'] = websafe\n web.template.Template.FILTERS['.xml'] = websafe\n\n web.commify = commify\n\n web.template.Template.globals.update(\n {\n 'HTML': HTML,\n 'request': Request(),\n 'logger': logging.getLogger(\"openlibrary.template\"),\n 'sum': sum,\n 'get_donation_include': get_donation_include,\n 'websafe': web.websafe,\n }\n )\n\n from openlibrary.core import helpers as h\n\n web.template.Template.globals.update(h.helpers)\n\n if config.get('use_gzip') == True:\n config.middleware.append(GZipMiddleware)\n\n\nif __name__ == '__main__':\n import doctest\n\n doctest.testmod()\n",
"path": "openlibrary/plugins/upstream/utils.py"
}
] | [
{
"content": "from typing import List, Union, Tuple, Any\n\nimport web\nimport json\nimport babel\nimport babel.core\nimport babel.dates\nfrom collections import defaultdict\nimport re\nimport random\nimport xml.etree.ElementTree as etree\nimport datetime\nimport logging\nfrom html.parser import HTMLParser\nfrom typing import Optional\n\nimport requests\n\nimport six\nfrom six.moves import urllib\nfrom six.moves.collections_abc import MutableMapping\nfrom six.moves.urllib.parse import (\n parse_qs,\n urlencode as parse_urlencode,\n urlparse,\n urlunparse,\n)\n\nfrom infogami import config\nfrom infogami.utils import view, delegate, stats\nfrom infogami.utils.view import render, get_template, public, query_param\nfrom infogami.utils.macro import macro\nfrom infogami.utils.context import context\nfrom infogami.infobase.client import Thing, Changeset, storify\n\nfrom openlibrary.core.helpers import commify, parse_datetime, truncate\nfrom openlibrary.core.middleware import GZipMiddleware\nfrom openlibrary.core import cache, ab\n\n\nclass MultiDict(MutableMapping):\n \"\"\"Ordered Dictionary that can store multiple values.\n\n >>> d = MultiDict()\n >>> d['x'] = 1\n >>> d['x'] = 2\n >>> d['y'] = 3\n >>> d['x']\n 2\n >>> d['y']\n 3\n >>> d['z']\n Traceback (most recent call last):\n ...\n KeyError: 'z'\n >>> list(d)\n ['x', 'x', 'y']\n >>> list(d.items())\n [('x', 1), ('x', 2), ('y', 3)]\n >>> list(d.multi_items())\n [('x', [1, 2]), ('y', [3])]\n \"\"\"\n\n def __init__(self, items=(), **kw):\n self._items = []\n\n for k, v in items:\n self[k] = v\n self.update(kw)\n\n def __getitem__(self, key):\n values = self.getall(key)\n if values:\n return values[-1]\n else:\n raise KeyError(key)\n\n def __setitem__(self, key, value):\n self._items.append((key, value))\n\n def __delitem__(self, key):\n self._items = [(k, v) for k, v in self._items if k != key]\n\n def __iter__(self):\n yield from self.keys()\n\n def __len__(self):\n return len(list(self.keys()))\n\n def getall(self, key):\n return [v for k, v in self._items if k == key]\n\n def keys(self):\n return [k for k, v in self._items]\n\n def values(self):\n return [v for k, v in self._items]\n\n def items(self):\n return self._items[:]\n\n def multi_items(self):\n \"\"\"Returns items as tuple of key and a list of values.\"\"\"\n items = []\n d = {}\n\n for k, v in self._items:\n if k not in d:\n d[k] = []\n items.append((k, d[k]))\n d[k].append(v)\n return items\n\n\n@macro\n@public\ndef render_template(name, *a, **kw):\n if \".\" in name:\n name = name.rsplit(\".\", 1)[0]\n return render[name](*a, **kw)\n\n\ndef kebab_case(upper_camel_case):\n \"\"\"\n :param str upper_camel_case: Text in upper camel case (e.g. \"HelloWorld\")\n :return: text in kebab case (e.g. 'hello-world')\n\n >>> kebab_case('HelloWorld')\n 'hello-world'\n >>> kebab_case(\"MergeUI\")\n 'merge-u-i'\n \"\"\"\n parts = re.findall(r'[A-Z][^A-Z]*', upper_camel_case)\n return '-'.join(parts).lower()\n\n\n@public\ndef render_component(name, attrs=None, json_encode=True):\n \"\"\"\n :param str name: Name of the component (excluding extension)\n :param dict attrs: attributes to add to the component element\n \"\"\"\n from openlibrary.plugins.upstream.code import static_url\n\n attrs = attrs or {}\n attrs_str = ''\n for (key, val) in attrs.items():\n if json_encode and isinstance(val, dict) or isinstance(val, list):\n val = json.dumps(val)\n # On the Vue side use decodeURIComponent to decode\n val = urllib.parse.quote(val)\n attrs_str += f' {key}=\"{val}\"'\n html = ''\n included = web.ctx.setdefault(\"included-components\", [])\n\n if len(included) == 0:\n # Need to include Vue\n html += '<script src=\"%s\"></script>' % static_url('build/vue.js')\n\n if name not in included:\n url = static_url('build/components/production/ol-%s.min.js' % name)\n html += '<script src=\"%s\"></script>' % url\n included.append(name)\n\n html += '<ol-{name} {attrs}></ol-{name}>'.format(\n name=kebab_case(name),\n attrs=attrs_str,\n )\n return html\n\n\n@public\ndef get_error(name, *args):\n \"\"\"Return error with the given name from errors.tmpl template.\"\"\"\n return get_message_from_template(\"errors\", name, args)\n\n\n@public\ndef get_message(name, *args):\n \"\"\"Return message with given name from messages.tmpl template\"\"\"\n return get_message_from_template(\"messages\", name, args)\n\n\ndef get_message_from_template(template_name, name, args):\n d = render_template(template_name).get(\"messages\", {})\n msg = d.get(name) or name.lower().replace(\"_\", \" \")\n\n if msg and args:\n return msg % args\n else:\n return msg\n\n\n@public\ndef list_recent_pages(path, limit=100, offset=0):\n \"\"\"Lists all pages with name path/* in the order of last_modified.\"\"\"\n q = {}\n\n q['key~'] = path + '/*'\n # don't show /type/delete and /type/redirect\n q['a:type!='] = '/type/delete'\n q['b:type!='] = '/type/redirect'\n\n q['sort'] = 'key'\n q['limit'] = limit\n q['offset'] = offset\n q['sort'] = '-last_modified'\n # queries are very slow with != conditions\n # q['type'] != '/type/delete'\n return web.ctx.site.get_many(web.ctx.site.things(q))\n\n\n@public\ndef json_encode(d):\n return json.dumps(d)\n\n\ndef unflatten(d, separator=\"--\"):\n \"\"\"Convert flattened data into nested form.\n\n >>> unflatten({\"a\": 1, \"b--x\": 2, \"b--y\": 3, \"c--0\": 4, \"c--1\": 5})\n {'a': 1, 'c': [4, 5], 'b': {'y': 3, 'x': 2}}\n >>> unflatten({\"a--0--x\": 1, \"a--0--y\": 2, \"a--1--x\": 3, \"a--1--y\": 4})\n {'a': [{'x': 1, 'y': 2}, {'x': 3, 'y': 4}]}\n\n \"\"\"\n\n def isint(k):\n try:\n int(k)\n return True\n except ValueError:\n return False\n\n def setvalue(data, k, v):\n if '--' in k:\n k, k2 = k.split(separator, 1)\n setvalue(data.setdefault(k, {}), k2, v)\n else:\n data[k] = v\n\n def makelist(d):\n \"\"\"Convert d into a list if all the keys of d are integers.\"\"\"\n if isinstance(d, dict):\n if all(isint(k) for k in d):\n return [makelist(d[k]) for k in sorted(d, key=int)]\n else:\n return web.storage((k, makelist(v)) for k, v in d.items())\n else:\n return d\n\n d2 = {}\n for k, v in d.items():\n setvalue(d2, k, v)\n return makelist(d2)\n\n\ndef fuzzy_find(value, options, stopwords=None):\n stopwords = stopwords or []\n \"\"\"Try find the option nearest to the value.\n\n >>> fuzzy_find(\"O'Reilly\", [\"O'Reilly Inc\", \"Addison-Wesley\"])\n \"O'Reilly Inc\"\n \"\"\"\n if not options:\n return value\n\n rx = web.re_compile(r\"[-_\\.&, ]+\")\n\n # build word frequency\n d = defaultdict(list)\n for option in options:\n for t in rx.split(option):\n d[t].append(option)\n\n # find score for each option\n score = defaultdict(lambda: 0)\n for t in rx.split(value):\n if t.lower() in stopwords:\n continue\n for option in d[t]:\n score[option] += 1\n\n # take the option with maximum score\n return max(options, key=score.__getitem__)\n\n\n@public\ndef radio_input(checked=False, **params):\n params['type'] = 'radio'\n if checked:\n params['checked'] = \"checked\"\n return \"<input %s />\" % \" \".join(\n [f'{k}=\"{web.websafe(v)}\"' for k, v in params.items()]\n )\n\n\n@public\ndef radio_list(name, args, value):\n html = []\n for arg in args:\n if isinstance(arg, tuple):\n arg, label = arg\n else:\n label = arg\n html.append(radio_input())\n\n\ndef get_coverstore_url() -> str:\n return config.get('coverstore_url', 'https://covers.openlibrary.org').rstrip('/')\n\n\n@public\ndef get_coverstore_public_url() -> str:\n return config.get('coverstore_public_url', get_coverstore_url()).rstrip('/')\n\n\ndef _get_changes_v1_raw(query, revision=None):\n \"\"\"Returns the raw versions response.\n\n Revision is taken as argument to make sure a new cache entry is used when a new revision of the page is created.\n \"\"\"\n if 'env' not in web.ctx:\n delegate.fakeload()\n\n versions = web.ctx.site.versions(query)\n\n for v in versions:\n v.created = v.created.isoformat()\n v.author = v.author and v.author.key\n\n # XXX-Anand: hack to avoid too big data to be stored in memcache.\n # v.changes is not used and it contrinutes to memcache bloat in a big way.\n v.changes = '[]'\n\n return versions\n\n\ndef get_changes_v1(query, revision=None):\n # uses the cached function _get_changes_v1_raw to get the raw data\n # and processes to before returning.\n def process(v):\n v = web.storage(v)\n v.created = parse_datetime(v.created)\n v.author = v.author and web.ctx.site.get(v.author, lazy=True)\n return v\n\n return [process(v) for v in _get_changes_v1_raw(query, revision)]\n\n\ndef _get_changes_v2_raw(query, revision=None):\n \"\"\"Returns the raw recentchanges response.\n\n Revision is taken as argument to make sure a new cache entry is used when a new revision of the page is created.\n \"\"\"\n if 'env' not in web.ctx:\n delegate.fakeload()\n\n changes = web.ctx.site.recentchanges(query)\n return [c.dict() for c in changes]\n\n\n# XXX-Anand: disabled temporarily to avoid too much memcache usage.\n# _get_changes_v2_raw = cache.memcache_memoize(_get_changes_v2_raw, key_prefix=\"upstream._get_changes_v2_raw\", timeout=10*60)\n\n\ndef get_changes_v2(query, revision=None):\n page = web.ctx.site.get(query['key'])\n\n def first(seq, default=None):\n try:\n return next(seq)\n except StopIteration:\n return default\n\n def process_change(change):\n change = Changeset.create(web.ctx.site, storify(change))\n change.thing = page\n change.key = page.key\n change.revision = first(c.revision for c in change.changes if c.key == page.key)\n change.created = change.timestamp\n\n change.get = change.__dict__.get\n change.get_comment = lambda: get_comment(change)\n change.machine_comment = change.data.get(\"machine_comment\")\n\n return change\n\n def get_comment(change):\n t = get_template(\"recentchanges/\" + change.kind + \"/comment\") or get_template(\n \"recentchanges/default/comment\"\n )\n return t(change, page)\n\n query['key'] = page.key\n changes = _get_changes_v2_raw(query, revision=page.revision)\n return [process_change(c) for c in changes]\n\n\ndef get_changes(query, revision=None):\n return get_changes_v2(query, revision=revision)\n\n\n@public\ndef get_history(page):\n h = web.storage(\n revision=page.revision, lastest_revision=page.revision, created=page.created\n )\n if h.revision < 5:\n h.recent = get_changes({\"key\": page.key, \"limit\": 5}, revision=page.revision)\n h.initial = h.recent[-1:]\n h.recent = h.recent[:-1]\n else:\n h.initial = get_changes(\n {\"key\": page.key, \"limit\": 1, \"offset\": h.revision - 1},\n revision=page.revision,\n )\n h.recent = get_changes({\"key\": page.key, \"limit\": 4}, revision=page.revision)\n\n return h\n\n\n@public\ndef get_version(key, revision):\n try:\n return web.ctx.site.versions({\"key\": key, \"revision\": revision, \"limit\": 1})[0]\n except IndexError:\n return None\n\n\n@public\ndef get_recent_author(doc):\n versions = get_changes_v1(\n {'key': doc.key, 'limit': 1, \"offset\": 0}, revision=doc.revision\n )\n if versions:\n return versions[0].author\n\n\n@public\ndef get_recent_accounts(limit=5, offset=0):\n versions = web.ctx.site.versions(\n {'type': '/type/user', 'revision': 1, 'limit': limit, 'offset': offset}\n )\n return web.ctx.site.get_many([v.key for v in versions])\n\n\ndef get_locale():\n try:\n return babel.Locale(web.ctx.get(\"lang\") or \"en\")\n except babel.core.UnknownLocaleError:\n return babel.Locale(\"en\")\n\n\n@public\ndef process_version(v):\n \"\"\"Looks at the version and adds machine_comment required for showing \"View MARC\" link.\"\"\"\n comments = [\n \"found a matching marc record\",\n \"add publisher and source\",\n ]\n if v.key.startswith('/books/') and not v.get('machine_comment'):\n thing = v.get('thing') or web.ctx.site.get(v.key, v.revision)\n if (\n thing.source_records\n and v.revision == 1\n or (v.comment and v.comment.lower() in comments)\n ):\n marc = thing.source_records[-1]\n if marc.startswith('marc:'):\n v.machine_comment = marc[len(\"marc:\") :]\n else:\n v.machine_comment = marc\n return v\n\n\n@public\ndef is_thing(t):\n return isinstance(t, Thing)\n\n\n@public\ndef putctx(key, value):\n \"\"\"Save a value in the context.\"\"\"\n context[key] = value\n return \"\"\n\n\nclass Metatag:\n def __init__(self, tag=\"meta\", **attrs):\n self.tag = tag\n self.attrs = attrs\n\n def __str__(self):\n attrs = ' '.join(f'{k}=\"{websafe(v)}\"' for k, v in self.attrs.items())\n return f'<{self.tag} {attrs} />'\n\n def __repr__(self):\n return 'Metatag(%s)' % str(self)\n\n\n@public\ndef add_metatag(tag=\"meta\", **attrs):\n context.setdefault('metatags', [])\n context.metatags.append(Metatag(tag, **attrs))\n\n\n@public\ndef url_quote(text):\n if isinstance(text, str):\n text = text.encode('utf8')\n return urllib.parse.quote_plus(text)\n\n\n@public\ndef urlencode(dict_or_list_of_tuples: Union[dict, list[tuple[str, Any]]]) -> str:\n \"\"\"\n You probably want to use this, if you're looking to urlencode parameters. This will\n encode things to utf8 that would otherwise cause urlencode to error.\n \"\"\"\n from six.moves.urllib.parse import urlencode as og_urlencode\n\n tuples = dict_or_list_of_tuples\n if isinstance(dict_or_list_of_tuples, dict):\n tuples = dict_or_list_of_tuples.items()\n params = [(k, v.encode('utf-8') if isinstance(v, str) else v) for (k, v) in tuples]\n return og_urlencode(params)\n\n\n@public\ndef entity_decode(text):\n try:\n return six.moves.html_parser.unescape(text)\n except AttributeError:\n return six.moves.html_parser.HTMLParser().unescape(text)\n\n\n@public\ndef set_share_links(url='#', title='', view_context=None):\n \"\"\"\n Constructs list share links for social platforms and assigns to view context attribute\n\n Args (all required):\n url (str or unicode) - complete canonical url to page being shared\n title (str or unicode) - title of page being shared\n view_context (object that has/can-have share_links attribute)\n \"\"\"\n encoded_url = url_quote(url)\n text = url_quote(\"Check this out: \" + entity_decode(title))\n links = [\n {\n 'text': 'Facebook',\n 'url': 'https://www.facebook.com/sharer/sharer.php?u=' + encoded_url,\n },\n {\n 'text': 'Twitter',\n 'url': f'https://twitter.com/intent/tweet?url={encoded_url}&via=openlibrary&text={text}',\n },\n {\n 'text': 'Pinterest',\n 'url': f'https://pinterest.com/pin/create/link/?url={encoded_url}&description={text}',\n },\n ]\n view_context.share_links = links\n\n\ndef pad(seq, size, e=None):\n \"\"\"\n >>> pad([1, 2], 4, 0)\n [1, 2, 0, 0]\n \"\"\"\n seq = seq[:]\n while len(seq) < size:\n seq.append(e)\n return seq\n\n\ndef parse_toc_row(line):\n \"\"\"Parse one row of table of contents.\n\n >>> def f(text):\n ... d = parse_toc_row(text)\n ... return (d['level'], d['label'], d['title'], d['pagenum'])\n ...\n >>> f(\"* chapter 1 | Welcome to the real world! | 2\")\n (1, 'chapter 1', 'Welcome to the real world!', '2')\n >>> f(\"Welcome to the real world!\")\n (0, '', 'Welcome to the real world!', '')\n >>> f(\"** | Welcome to the real world! | 2\")\n (2, '', 'Welcome to the real world!', '2')\n >>> f(\"|Preface | 1\")\n (0, '', 'Preface', '1')\n >>> f(\"1.1 | Apple\")\n (0, '1.1', 'Apple', '')\n \"\"\"\n RE_LEVEL = web.re_compile(r\"(\\**)(.*)\")\n level, text = RE_LEVEL.match(line.strip()).groups()\n\n if \"|\" in text:\n tokens = text.split(\"|\", 2)\n label, title, page = pad(tokens, 3, '')\n else:\n title = text\n label = page = \"\"\n\n return web.storage(\n level=len(level), label=label.strip(), title=title.strip(), pagenum=page.strip()\n )\n\n\ndef parse_toc(text):\n \"\"\"Parses each line of toc\"\"\"\n if text is None:\n return []\n return [parse_toc_row(line) for line in text.splitlines() if line.strip(\" |\")]\n\n\n_languages = None\n\n\n@public\ndef get_languages():\n global _languages\n if _languages is None:\n keys = web.ctx.site.things(\n {\"type\": \"/type/language\", \"key~\": \"/languages/*\", \"limit\": 1000}\n )\n _languages = sorted(\n (\n web.storage(name=d.name, code=d.code, key=d.key)\n for d in web.ctx.site.get_many(keys)\n ),\n key=lambda d: d.name.lower(),\n )\n return _languages\n\n\n@public\ndef get_author_config():\n return _get_author_config()\n\n\[email protected]\ndef _get_author_config():\n \"\"\"Returns the author config.\n\n The results are cached on the first invocation.\n Any changes to /config/author page require restarting the app.\n\n \"\"\"\n thing = web.ctx.site.get('/config/author')\n if hasattr(thing, \"identifiers\"):\n identifiers = [web.storage(t.dict()) for t in thing.identifiers if 'name' in t]\n else:\n identifiers = {}\n return web.storage(identifiers=identifiers)\n\n\n@public\ndef get_edition_config():\n return _get_edition_config()\n\n\[email protected]\ndef _get_edition_config():\n \"\"\"Returns the edition config.\n\n The results are cached on the first invocation. Any changes to /config/edition page require restarting the app.\n\n This is is cached because fetching and creating the Thing object was taking about 20ms of time for each book request.\n \"\"\"\n thing = web.ctx.site.get('/config/edition')\n classifications = [\n web.storage(t.dict()) for t in thing.classifications if 'name' in t\n ]\n identifiers = [web.storage(t.dict()) for t in thing.identifiers if 'name' in t]\n roles = thing.roles\n return web.storage(\n classifications=classifications, identifiers=identifiers, roles=roles\n )\n\n\nfrom openlibrary.core.olmarkdown import OLMarkdown\n\n\ndef get_markdown(text, safe_mode=False):\n md = OLMarkdown(source=text, safe_mode=safe_mode)\n view._register_mdx_extensions(md)\n md.postprocessors += view.wiki_processors\n return md\n\n\nclass HTML(str):\n def __init__(self, html):\n str.__init__(self, web.safeunicode(html))\n\n def __repr__(self):\n return \"<html: %s>\" % str.__repr__(self)\n\n\n_websafe = web.websafe\n\n\ndef websafe(text):\n if isinstance(text, HTML):\n return text\n elif isinstance(text, web.template.TemplateResult):\n return web.safestr(text)\n else:\n return _websafe(text)\n\n\nfrom openlibrary.plugins.upstream import adapter\nfrom openlibrary.utils.olcompress import OLCompressor\nfrom openlibrary.utils import olmemcache\nimport memcache\n\n\nclass UpstreamMemcacheClient:\n \"\"\"Wrapper to memcache Client to handle upstream specific conversion and OL specific compression.\n Compatible with memcache Client API.\n \"\"\"\n\n def __init__(self, servers):\n self._client = memcache.Client(servers)\n compressor = OLCompressor()\n self.compress = compressor.compress\n\n def decompress(*args, **kw):\n d = json.loads(compressor.decompress(*args, **kw))\n return json.dumps(adapter.unconvert_dict(d))\n\n self.decompress = decompress\n\n def get(self, key):\n key = adapter.convert_key(key)\n if key is None:\n return None\n\n try:\n value = self._client.get(web.safestr(key))\n except memcache.Client.MemcachedKeyError:\n return None\n\n return value and self.decompress(value)\n\n def get_multi(self, keys):\n keys = [adapter.convert_key(k) for k in keys]\n keys = [web.safestr(k) for k in keys]\n\n d = self._client.get_multi(keys)\n return {\n web.safeunicode(adapter.unconvert_key(k)): self.decompress(v)\n for k, v in d.items()\n }\n\n\nif config.get('upstream_memcache_servers'):\n olmemcache.Client = UpstreamMemcacheClient\n # set config.memcache_servers only after olmemcache.Client is updated\n config.memcache_servers = config.upstream_memcache_servers\n\n\ndef _get_recent_changes():\n site = web.ctx.get('site') or delegate.create_site()\n web.ctx.setdefault(\"ip\", \"127.0.0.1\")\n\n # The recentchanges can have multiple revisions for a document if it has been modified more than once.\n # Take only the most recent revision in that case.\n visited = set()\n\n def is_visited(key):\n if key in visited:\n return True\n else:\n visited.add(key)\n return False\n\n # ignore reverts\n re_revert = web.re_compile(r\"reverted to revision \\d+\")\n\n def is_revert(r):\n return re_revert.match(r.comment or \"\")\n\n # take the 100 recent changes, filter them and take the first 50\n q = {\"bot\": False, \"limit\": 100}\n result = site.versions(q)\n result = [r for r in result if not is_visited(r.key) and not is_revert(r)]\n result = result[:50]\n\n def process_thing(thing):\n t = web.storage()\n for k in [\"key\", \"title\", \"name\", \"displayname\"]:\n t[k] = thing[k]\n t['type'] = web.storage(key=thing.type.key)\n return t\n\n for r in result:\n r.author = r.author and process_thing(r.author)\n r.thing = process_thing(site.get(r.key, r.revision))\n\n return result\n\n\ndef _get_recent_changes2():\n \"\"\"New recent changes for around the library.\n\n This function returns the message to display for each change.\n The message is get by calling `recentchanges/$kind/message.html` template.\n\n If `$var ignore=True` is set by the message template, the change is ignored.\n \"\"\"\n if 'env' not in web.ctx:\n delegate.fakeload()\n\n q = {\"bot\": False, \"limit\": 100}\n changes = web.ctx.site.recentchanges(q)\n\n def is_ignored(c):\n return (\n # c.kind=='update' allow us to ignore update recent changes on people\n c.kind == 'update'\n or\n # ignore change if author has been deleted (e.g. spammer)\n (c.author and c.author.type.key == '/type/delete')\n )\n\n def render(c):\n t = get_template(\"recentchanges/\" + c.kind + \"/message\") or get_template(\n \"recentchanges/default/message\"\n )\n return t(c)\n\n messages = [render(c) for c in changes if not is_ignored(c)]\n messages = [m for m in messages if str(m.get(\"ignore\", \"false\")).lower() != \"true\"]\n return messages\n\n\n_get_recent_changes = web.memoize(_get_recent_changes, expires=5 * 60, background=True)\n_get_recent_changes2 = web.memoize(\n _get_recent_changes2, expires=5 * 60, background=True\n)\n\n\n@public\ndef get_random_recent_changes(n):\n if \"recentchanges_v2\" in web.ctx.get(\"features\", []):\n changes = _get_recent_changes2()\n else:\n changes = _get_recent_changes()\n\n _changes = random.sample(changes, n) if len(changes) > n else changes\n for i, change in enumerate(_changes):\n _changes[i]['__body__'] = (\n _changes[i]['__body__'].replace('<script>', '').replace('</script>', '')\n )\n return _changes\n\n\ndef _get_blog_feeds():\n url = \"https://blog.openlibrary.org/feed/\"\n try:\n stats.begin(\"get_blog_feeds\", url=url)\n tree = etree.fromstring(requests.get(url).text)\n except Exception:\n # Handle error gracefully.\n logging.getLogger(\"openlibrary\").error(\n \"Failed to fetch blog feeds\", exc_info=True\n )\n return []\n finally:\n stats.end()\n\n def parse_item(item):\n pubdate = datetime.datetime.strptime(\n item.find(\"pubDate\").text, '%a, %d %b %Y %H:%M:%S +0000'\n ).isoformat()\n return dict(\n title=item.find(\"title\").text, link=item.find(\"link\").text, pubdate=pubdate\n )\n\n return [parse_item(item) for item in tree.findall(\".//item\")]\n\n\n_get_blog_feeds = cache.memcache_memoize(\n _get_blog_feeds, key_prefix=\"upstream.get_blog_feeds\", timeout=5 * 60\n)\n\n\ndef get_donation_include(include):\n web_input = web.input()\n\n # The following allows archive.org staff to test banners without\n # needing to reload openlibrary services:\n dev_host = web_input.pop(\"dev_host\", \"\") # e.g. `www-user`\n if dev_host and re.match('^[a-zA-Z0-9-.]+$', dev_host):\n script_src = \"https://%s.archive.org/includes/donate.js\" % dev_host\n else:\n script_src = \"/cdn/archive.org/donate.js\"\n\n if 'ymd' in web_input:\n script_src += '?ymd=' + web_input.ymd\n\n html = (\n \"\"\"\n <div id=\"donato\"></div>\n <script src=\"%s\" data-platform=\"ol\"></script>\n \"\"\"\n % script_src\n )\n return html\n\n\n# get_donation_include = cache.memcache_memoize(get_donation_include, key_prefix=\"upstream.get_donation_include\", timeout=60)\n\n\n@public\ndef item_image(image_path, default=None):\n if image_path is None:\n return default\n if image_path.startswith('https:'):\n return image_path\n return \"https:\" + image_path\n\n\n@public\ndef get_blog_feeds():\n def process(post):\n post = web.storage(post)\n post.pubdate = parse_datetime(post.pubdate)\n return post\n\n return [process(post) for post in _get_blog_feeds()]\n\n\nclass Request:\n path = property(lambda self: web.ctx.path)\n home = property(lambda self: web.ctx.home)\n domain = property(lambda self: web.ctx.host)\n fullpath = property(lambda self: web.ctx.fullpath)\n\n @property\n def canonical_url(self):\n \"\"\"Returns the https:// version of the URL.\n\n Used for adding <meta rel=\"canonical\" ..> tag in all web pages.\n Required to make OL retain the page rank after https migration.\n \"\"\"\n readable_path = web.ctx.get('readable_path', web.ctx.path) or ''\n query = web.ctx.query or ''\n host = web.ctx.host or ''\n url = host + readable_path + query\n if url:\n url = \"https://\" + url\n parsed_url = urlparse(url)\n\n parsed_query = parse_qs(parsed_url.query)\n queries_to_exclude = ['sort', 'mode', 'v', 'type', 'debug']\n\n canonical_query = {\n q: v for q, v in parsed_query.items() if q not in queries_to_exclude\n }\n query = parse_urlencode(canonical_query, doseq=True)\n parsed_url = parsed_url._replace(query=query)\n\n url = urlunparse(parsed_url)\n\n return url\n return ''\n\n\n@public\ndef render_once(key):\n rendered = web.ctx.setdefault('render_once', {})\n if key in rendered:\n return False\n else:\n rendered[key] = True\n return True\n\n\n@public\ndef today():\n return datetime.datetime.today()\n\n\nclass HTMLTagRemover(HTMLParser):\n def __init__(self):\n super().__init__()\n self.data = []\n\n def handle_data(self, data):\n self.data.append(data.strip())\n\n def handle_endtag(self, tag):\n self.data.append('\\n' if tag in ('p', 'li') else ' ')\n\n\n@public\ndef reformat_html(html_str: str, max_length: Optional[int] = None) -> str:\n \"\"\"\n Reformats an HTML string, removing all opening and closing tags.\n Adds a line break element between each set of text content.\n Optionally truncates contents that exceeds the given max length.\n\n returns: A reformatted HTML string\n \"\"\"\n parser = HTMLTagRemover()\n # Must have a root node, otherwise the parser will fail\n parser.feed(f'<div>{html_str}</div>')\n content = [web.websafe(s) for s in parser.data if s]\n\n if max_length:\n return truncate(''.join(content), max_length).strip().replace('\\n', '<br>')\n else:\n return ''.join(content).strip().replace('\\n', '<br>')\n\n\ndef setup():\n \"\"\"Do required initialization\"\"\"\n # monkey-patch get_markdown to use OL Flavored Markdown\n view.get_markdown = get_markdown\n\n # Provide alternate implementations for websafe and commify\n web.websafe = websafe\n web.template.Template.FILTERS['.html'] = websafe\n web.template.Template.FILTERS['.xml'] = websafe\n\n web.commify = commify\n\n web.template.Template.globals.update(\n {\n 'HTML': HTML,\n 'request': Request(),\n 'logger': logging.getLogger(\"openlibrary.template\"),\n 'sum': sum,\n 'get_donation_include': get_donation_include,\n 'websafe': web.websafe,\n }\n )\n\n from openlibrary.core import helpers as h\n\n web.template.Template.globals.update(h.helpers)\n\n if config.get('use_gzip') == True:\n config.middleware.append(GZipMiddleware)\n\n\nif __name__ == '__main__':\n import doctest\n\n doctest.testmod()\n",
"path": "openlibrary/plugins/upstream/utils.py"
}
] | diff --git a/openlibrary/plugins/upstream/utils.py b/openlibrary/plugins/upstream/utils.py
index 881804c6631..9ec1e654dc5 100644
--- a/openlibrary/plugins/upstream/utils.py
+++ b/openlibrary/plugins/upstream/utils.py
@@ -937,6 +937,7 @@ class Request:
path = property(lambda self: web.ctx.path)
home = property(lambda self: web.ctx.home)
domain = property(lambda self: web.ctx.host)
+ fullpath = property(lambda self: web.ctx.fullpath)
@property
def canonical_url(self):
diff --git a/openlibrary/templates/lib/nav_foot.html b/openlibrary/templates/lib/nav_foot.html
index c4b5b268f77..0b8c90296fc 100644
--- a/openlibrary/templates/lib/nav_foot.html
+++ b/openlibrary/templates/lib/nav_foot.html
@@ -41,7 +41,7 @@ <h2>$:_('Develop')</h2>
<h2>$:_('Help')</h2>
<ul>
<li><a href="/help">$_('Help Center')</a></li>
- <li><a href="/contact?path=$request.path" title="$_('Problems')">$_('Report A Problem')</a></li>
+ <li><a href="/contact?$:urlencode(dict(path=request.fullpath))" title="$_('Problems')">$_('Report A Problem')</a></li>
<li><a href="/help/faq/editing" title="$_('Suggest Edits')">$_('Suggesting Edits')</a></li>
</ul>
<aside id="footer-icons">
|
ivy-llc__ivy-13216 | iscomplexobj
Was mentioned here #11223, but it's open for almost a month now 😅
| [
{
"content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n)\nfrom ivy.functional.frontends.jax.numpy import (\n promote_types_of_jax_inputs as promote_jax_arrays,\n)\n\n\n@to_ivy_arrays_and_back\ndef allclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False):\n a, b = promote_jax_arrays(a, b)\n return ivy.allclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)\n\n\n@to_ivy_arrays_and_back\ndef array_equal(a1, a2, equal_nan: bool) -> bool:\n a1, a2 = promote_jax_arrays(a1, a2)\n if ivy.shape(a1) != ivy.shape(a2):\n return False\n eq = ivy.asarray(a1 == a2)\n if equal_nan:\n eq = ivy.logical_or(eq, ivy.logical_and(ivy.isnan(a1), ivy.isnan(a2)))\n return ivy.all(eq)\n\n\n@to_ivy_arrays_and_back\ndef array_equiv(a1, a2) -> bool:\n a1, a2 = promote_jax_arrays(a1, a2)\n try:\n eq = ivy.equal(a1, a2)\n except ValueError:\n # shapes are not broadcastable\n return False\n return ivy.all(eq)\n\n\n@to_ivy_arrays_and_back\ndef isneginf(x, out=None):\n return ivy.isneginf(x, out=out)\n\n\n@to_ivy_arrays_and_back\ndef isposinf(x, out=None):\n return ivy.isposinf(x, out=out)\n\n\n@to_ivy_arrays_and_back\ndef not_equal(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.not_equal(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef less(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.less(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef less_equal(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.less_equal(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef greater(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.greater(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef greater_equal(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.greater_equal(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef isnan(x, out=None):\n return ivy.isnan(x, out=out)\n\n\n@to_ivy_arrays_and_back\ndef equal(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.equal(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef all(a, axis=None, out=None, keepdims=False, *, where=False):\n return ivy.all(a, axis=axis, keepdims=keepdims, out=out)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_and(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.bitwise_and(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_not(x):\n return ivy.bitwise_invert(x)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_or(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.bitwise_or(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_xor(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.bitwise_xor(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef any(a, axis=None, out=None, keepdims=False, *, where=None):\n # TODO: Out not supported\n ret = ivy.any(a, axis=axis, keepdims=keepdims)\n if ivy.is_array(where):\n where = ivy.array(where, dtype=ivy.bool)\n ret = ivy.where(where, ret, ivy.default(None, ivy.zeros_like(ret)))\n return ret\n\n\nalltrue = all\n\n\nsometrue = any\n\n\n@to_ivy_arrays_and_back\n# known issue in jnp's documentation of arguments\n# https://github.com/google/jax/issues/9119\ndef logical_and(x1, x2, /):\n if x1.dtype == \"complex128\" or x2.dtype == \"complex128\":\n x1 = ivy.astype(x1, ivy.complex128)\n x2 = ivy.astype(x2, ivy.complex128)\n else:\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.logical_and(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef invert(x, /):\n return ivy.bitwise_invert(x)\n\n\n@to_ivy_arrays_and_back\ndef isfinite(x, /):\n return ivy.isfinite(x)\n\n\n@to_ivy_arrays_and_back\ndef isinf(x, /):\n return ivy.isinf(x)\n\n\n@to_ivy_arrays_and_back\ndef isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False):\n a, b = promote_jax_arrays(a, b)\n return ivy.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)\n\n\n@to_ivy_arrays_and_back\ndef logical_not(x, /):\n return ivy.logical_not(x)\n\n\n@to_ivy_arrays_and_back\ndef logical_or(x1, x2, /):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.logical_or(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef isscalar(x, /):\n return ivy.isscalar(x)\n\n\n@to_ivy_arrays_and_back\ndef left_shift(x1, x2):\n return ivy.isscalar(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef isreal(x, out=None):\n return ivy.isreal(x, out=out)\n\n\n@to_ivy_arrays_and_back\ndef logical_xor(x1, x2, /):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.logical_xor(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef right_shift(x1, x2, /):\n return ivy.bitwise_right_shift(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef isrealobj(x: any):\n return not ivy.is_complex_dtype(ivy.dtype(x))\n\n\n@to_ivy_arrays_and_back\ndef iscomplex(x: any):\n return ivy.bitwise_invert(ivy.isreal(x))\n",
"path": "ivy/functional/frontends/jax/numpy/logic.py"
}
] | [
{
"content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n)\nfrom ivy.functional.frontends.jax.numpy import (\n promote_types_of_jax_inputs as promote_jax_arrays,\n)\n\n\n@to_ivy_arrays_and_back\ndef allclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False):\n a, b = promote_jax_arrays(a, b)\n return ivy.allclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)\n\n\n@to_ivy_arrays_and_back\ndef array_equal(a1, a2, equal_nan: bool) -> bool:\n a1, a2 = promote_jax_arrays(a1, a2)\n if ivy.shape(a1) != ivy.shape(a2):\n return False\n eq = ivy.asarray(a1 == a2)\n if equal_nan:\n eq = ivy.logical_or(eq, ivy.logical_and(ivy.isnan(a1), ivy.isnan(a2)))\n return ivy.all(eq)\n\n\n@to_ivy_arrays_and_back\ndef array_equiv(a1, a2) -> bool:\n a1, a2 = promote_jax_arrays(a1, a2)\n try:\n eq = ivy.equal(a1, a2)\n except ValueError:\n # shapes are not broadcastable\n return False\n return ivy.all(eq)\n\n\n@to_ivy_arrays_and_back\ndef isneginf(x, out=None):\n return ivy.isneginf(x, out=out)\n\n\n@to_ivy_arrays_and_back\ndef isposinf(x, out=None):\n return ivy.isposinf(x, out=out)\n\n\n@to_ivy_arrays_and_back\ndef not_equal(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.not_equal(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef less(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.less(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef less_equal(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.less_equal(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef greater(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.greater(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef greater_equal(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.greater_equal(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef isnan(x, out=None):\n return ivy.isnan(x, out=out)\n\n\n@to_ivy_arrays_and_back\ndef equal(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.equal(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef all(a, axis=None, out=None, keepdims=False, *, where=False):\n return ivy.all(a, axis=axis, keepdims=keepdims, out=out)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_and(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.bitwise_and(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_not(x):\n return ivy.bitwise_invert(x)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_or(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.bitwise_or(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_xor(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.bitwise_xor(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef any(a, axis=None, out=None, keepdims=False, *, where=None):\n # TODO: Out not supported\n ret = ivy.any(a, axis=axis, keepdims=keepdims)\n if ivy.is_array(where):\n where = ivy.array(where, dtype=ivy.bool)\n ret = ivy.where(where, ret, ivy.default(None, ivy.zeros_like(ret)))\n return ret\n\n\nalltrue = all\n\n\nsometrue = any\n\n\n@to_ivy_arrays_and_back\n# known issue in jnp's documentation of arguments\n# https://github.com/google/jax/issues/9119\ndef logical_and(x1, x2, /):\n if x1.dtype == \"complex128\" or x2.dtype == \"complex128\":\n x1 = ivy.astype(x1, ivy.complex128)\n x2 = ivy.astype(x2, ivy.complex128)\n else:\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.logical_and(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef invert(x, /):\n return ivy.bitwise_invert(x)\n\n\n@to_ivy_arrays_and_back\ndef isfinite(x, /):\n return ivy.isfinite(x)\n\n\n@to_ivy_arrays_and_back\ndef isinf(x, /):\n return ivy.isinf(x)\n\n\n@to_ivy_arrays_and_back\ndef isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False):\n a, b = promote_jax_arrays(a, b)\n return ivy.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)\n\n\n@to_ivy_arrays_and_back\ndef logical_not(x, /):\n return ivy.logical_not(x)\n\n\n@to_ivy_arrays_and_back\ndef logical_or(x1, x2, /):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.logical_or(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef isscalar(x, /):\n return ivy.isscalar(x)\n\n\n@to_ivy_arrays_and_back\ndef left_shift(x1, x2):\n return ivy.isscalar(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef isreal(x, out=None):\n return ivy.isreal(x, out=out)\n\n\n@to_ivy_arrays_and_back\ndef logical_xor(x1, x2, /):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.logical_xor(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef right_shift(x1, x2, /):\n return ivy.bitwise_right_shift(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef isrealobj(x: any):\n return not ivy.is_complex_dtype(ivy.dtype(x))\n\n\n@to_ivy_arrays_and_back\ndef iscomplex(x: any):\n return ivy.bitwise_invert(ivy.isreal(x))\n\n\n@to_ivy_arrays_and_back\ndef iscomplexobj(x):\n if x.ndim == 0:\n return ivy.is_complex_dtype(ivy.dtype(x))\n for ele in x:\n if ivy.is_complex_dtype(ivy.dtype(ele)):\n return True\n else:\n return False",
"path": "ivy/functional/frontends/jax/numpy/logic.py"
}
] | diff --git a/ivy/functional/frontends/jax/numpy/logic.py b/ivy/functional/frontends/jax/numpy/logic.py
index ff1b7db94a9f7..400f8c5b669fe 100644
--- a/ivy/functional/frontends/jax/numpy/logic.py
+++ b/ivy/functional/frontends/jax/numpy/logic.py
@@ -209,3 +209,14 @@ def isrealobj(x: any):
@to_ivy_arrays_and_back
def iscomplex(x: any):
return ivy.bitwise_invert(ivy.isreal(x))
+
+
+@to_ivy_arrays_and_back
+def iscomplexobj(x):
+ if x.ndim == 0:
+ return ivy.is_complex_dtype(ivy.dtype(x))
+ for ele in x:
+ if ivy.is_complex_dtype(ivy.dtype(ele)):
+ return True
+ else:
+ return False
\ No newline at end of file
diff --git a/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_logic.py b/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_logic.py
index e7d73071a572d..30e4fef02f0bc 100644
--- a/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_logic.py
+++ b/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_logic.py
@@ -938,3 +938,30 @@ def test_jax_numpy_isrealobj(
on_device=on_device,
x=x[0],
)
+
+
+# iscomplexobj
+@handle_frontend_test(
+ fn_tree="jax.numpy.iscomplexobj",
+ dtype_and_x=helpers.dtype_and_values(
+ available_dtypes=helpers.get_dtypes("valid"),
+ ),
+ test_with_out=st.just(False),
+)
+def test_jax_numpy_iscomplexobj(
+ dtype_and_x,
+ frontend,
+ on_device,
+ *,
+ fn_tree,
+ test_flags,
+):
+ input_dtype, x = dtype_and_x
+ helpers.test_frontend_function(
+ input_dtypes=input_dtype,
+ frontend=frontend,
+ test_flags=test_flags,
+ fn_tree=fn_tree,
+ on_device=on_device,
+ x=x[0],
+ )
|
mirumee__ariadne-799 | Support Starlette 0.18.0
Was just released: https://github.com/encode/starlette/releases/tag/0.18.0
and currently the dependency is pinned at `<0.18.0`.
| [
{
"content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\", encoding=\"utf8\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.15.0.dev3\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n \"graphql-core>=3.2.0,<3.3\",\n \"starlette<0.18\",\n \"typing_extensions>=3.6.0\",\n ],\n extras_require={\"asgi-file-uploads\": [\"python-multipart>=0.0.5\"]},\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n zip_safe=False,\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\", encoding=\"utf8\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.15.0.dev3\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n \"graphql-core>=3.2.0,<3.3\",\n \"starlette<0.19\",\n \"typing_extensions>=3.6.0\",\n ],\n extras_require={\"asgi-file-uploads\": [\"python-multipart>=0.0.5\"]},\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n zip_safe=False,\n)\n",
"path": "setup.py"
}
] | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6e031eacd..d7a27d961 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,6 +3,7 @@
## 0.15.0 (unreleased)
- Updated `graphql-core` requirement to 3.2.0.
+- Bumped `starlette` support to 0.18.
- Drop Python 3.6 support.
- Added basic support for `OPTIONS` HTTP request.
- Refactor `ariadne.asgi.GraphQL` to make it easier to customize JSON response.
diff --git a/requirements.txt b/requirements.txt
index 7581b1ddd..353cc7b5e 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -12,7 +12,7 @@ idna==3.3
# via anyio
sniffio==1.2.0
# via anyio
-starlette==0.17.1
+starlette==0.18.0
# via ariadne (setup.py)
typing-extensions==4.1.1
# via ariadne (setup.py)
diff --git a/setup.py b/setup.py
index a2f3f8492..7c250ae56 100755
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,7 @@
include_package_data=True,
install_requires=[
"graphql-core>=3.2.0,<3.3",
- "starlette<0.18",
+ "starlette<0.19",
"typing_extensions>=3.6.0",
],
extras_require={"asgi-file-uploads": ["python-multipart>=0.0.5"]},
|
jupyterhub__jupyterhub-2510 | Deleting and recreating a named server results in lost name in GUI
<!--
Welcome to Zero to JupyterHub!
Before filing an issue, please search through the issues to see
if your question has been discussed before. If you
need more information after searching, feel
free to message us on the gitter channel. Many
JupyterHub community members watch the gitter channel
so you will have the benefit of other users' experience
as well as the JupyterHub team.
If you still wish to file an issue, please submit
as much detail about your issue as possible. If
you think it would be helpful, include a
scrubbed version of your `config.yaml` file. We've put
a place below where you can paste this in.
*** WARNING ***
Make sure you remove all sensitive information that's
in your `config.yaml` file, as GitHub is a public space.
Please remove at *least* the following fields:
* any special keys under auth
* proxy.secretToken
* hub.cookieSecret
If you post any sensitive information we reserve the
right to edit your comment in order to remove it.
-->
## Description
I've been working on a POC for my place of work to examine the feasibility of using JupyterHub to serve Jupyter Notebook/Lab servers with custom images containing a Python SDK we're working on.
Recently, I've been working on testing out named servers. In that process, I've discovered that if you delete a named server from the browser GUI, then recreate it in (in any fashion, whether by the REST API or through the GUI), that server will no longer appear listed.
## To reproduce
1. Create a named server:

2. Delete it:

3. Create it again: `curl -X POST -H "Authorization: token a_very_secret_token" "http://my.host.domain/hub/api/users/pmende/servers/serverA"`
Now the user's Hub Control Panel/Home still no longer lists the server (i.e., it is identical to the image after 2, above), but there is definitely a running pod with the server name:
```
$ kubectl get pods -n jhub
NAME READY STATUS RESTARTS AGE
hub-949c864ff-v7dx2 1/1 Running 0 18m
jupyter-pmende-2dserver-41 1/1 Running 0 3m44s
proxy-c88fd6f59-s8k82 1/1 Running 0 18m
```
## Hub creation
`helm upgrade --install jhub jupyterhub/jupyterhub --namespace jhub --version=0.9-8ed2f81 --values config.yaml`
## Contents of `config.yaml`
```
#########################
# Networking Config #
#########################
proxy:
secretToken: "mysupersecrettoken"
service:
type: NodePort
nodePorts:
http: 31212
chp:
resources:
requests:
memory: 0
cpu: 0
ingress:
enabled: true
hosts:
- my.host.domain
rules:
http:
- paths: /hub/api
backend:
serviceName: hub
servicePort: 8081
#########################
# Hardware/Image Config #
#########################
singleuser:
image:
name: jupyter/scipy-notebook
tag: 59b402ce701d
cpu:
guarantee: 0.25
limit: 0.5
memory:
guarantee: "256M"
limit: "320M"
profileList:
- display_name: "Default"
description: "0.25 CPU; 256M Ram"
default: True
- display_name: "BIG"
description: "0.5 Whole CPUs, 512M Ram"
kubespawner_override:
cpu_guarantee: 0.5
cpu_limit: 0.75
mem_guarantee: "512M"
mem_limit: "640M"
#########################
# Hub Config #
#########################
hub:
allowNamedServers: true
extraConfig: |
c.JupyterHub.admin_access = True
c.JupyterHub.api_tokens = {
"a_very_secret_token": "pmende"
}
```
| [
{
"content": "\"\"\"User handlers\"\"\"\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\nimport asyncio\nimport json\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom datetime import timezone\n\nfrom async_generator import aclosing\nfrom dateutil.parser import parse as parse_date\nfrom tornado import web\nfrom tornado.iostream import StreamClosedError\n\nfrom .. import orm\nfrom ..user import User\nfrom ..utils import admin_only\nfrom ..utils import isoformat\nfrom ..utils import iterate_until\nfrom ..utils import maybe_future\nfrom ..utils import url_path_join\nfrom .base import APIHandler\n\n\nclass SelfAPIHandler(APIHandler):\n \"\"\"Return the authenticated user's model\n\n Based on the authentication info. Acts as a 'whoami' for auth tokens.\n \"\"\"\n\n async def get(self):\n user = self.current_user\n if user is None:\n # whoami can be accessed via oauth token\n user = self.get_current_user_oauth_token()\n if user is None:\n raise web.HTTPError(403)\n self.write(json.dumps(self.user_model(user)))\n\n\nclass UserListAPIHandler(APIHandler):\n @admin_only\n def get(self):\n data = [\n self.user_model(u, include_servers=True, include_state=True)\n for u in self.db.query(orm.User)\n ]\n self.write(json.dumps(data))\n\n @admin_only\n async def post(self):\n data = self.get_json_body()\n if not data or not isinstance(data, dict) or not data.get('usernames'):\n raise web.HTTPError(400, \"Must specify at least one user to create\")\n\n usernames = data.pop('usernames')\n self._check_user_model(data)\n # admin is set for all users\n # to create admin and non-admin users requires at least two API requests\n admin = data.get('admin', False)\n\n to_create = []\n invalid_names = []\n for name in usernames:\n name = self.authenticator.normalize_username(name)\n if not self.authenticator.validate_username(name):\n invalid_names.append(name)\n continue\n user = self.find_user(name)\n if user is not None:\n self.log.warning(\"User %s already exists\" % name)\n else:\n to_create.append(name)\n\n if invalid_names:\n if len(invalid_names) == 1:\n msg = \"Invalid username: %s\" % invalid_names[0]\n else:\n msg = \"Invalid usernames: %s\" % ', '.join(invalid_names)\n raise web.HTTPError(400, msg)\n\n if not to_create:\n raise web.HTTPError(409, \"All %i users already exist\" % len(usernames))\n\n created = []\n for name in to_create:\n user = self.user_from_username(name)\n if admin:\n user.admin = True\n self.db.commit()\n try:\n await maybe_future(self.authenticator.add_user(user))\n except Exception as e:\n self.log.error(\"Failed to create user: %s\" % name, exc_info=True)\n self.users.delete(user)\n raise web.HTTPError(\n 400, \"Failed to create user %s: %s\" % (name, str(e))\n )\n else:\n created.append(user)\n\n self.write(json.dumps([self.user_model(u) for u in created]))\n self.set_status(201)\n\n\ndef admin_or_self(method):\n \"\"\"Decorator for restricting access to either the target user or admin\"\"\"\n\n def m(self, name, *args, **kwargs):\n current = self.current_user\n if current is None:\n raise web.HTTPError(403)\n if not (current.name == name or current.admin):\n raise web.HTTPError(403)\n\n # raise 404 if not found\n if not self.find_user(name):\n raise web.HTTPError(404)\n return method(self, name, *args, **kwargs)\n\n return m\n\n\nclass UserAPIHandler(APIHandler):\n @admin_or_self\n async def get(self, name):\n user = self.find_user(name)\n model = self.user_model(\n user, include_servers=True, include_state=self.current_user.admin\n )\n # auth state will only be shown if the requester is an admin\n # this means users can't see their own auth state unless they\n # are admins, Hub admins often are also marked as admins so they\n # will see their auth state but normal users won't\n requester = self.current_user\n if requester.admin:\n model['auth_state'] = await user.get_auth_state()\n self.write(json.dumps(model))\n\n @admin_only\n async def post(self, name):\n data = self.get_json_body()\n user = self.find_user(name)\n if user is not None:\n raise web.HTTPError(409, \"User %s already exists\" % name)\n\n user = self.user_from_username(name)\n if data:\n self._check_user_model(data)\n if 'admin' in data:\n user.admin = data['admin']\n self.db.commit()\n\n try:\n await maybe_future(self.authenticator.add_user(user))\n except Exception:\n self.log.error(\"Failed to create user: %s\" % name, exc_info=True)\n # remove from registry\n self.users.delete(user)\n raise web.HTTPError(400, \"Failed to create user: %s\" % name)\n\n self.write(json.dumps(self.user_model(user)))\n self.set_status(201)\n\n @admin_only\n async def delete(self, name):\n user = self.find_user(name)\n if user is None:\n raise web.HTTPError(404)\n if user.name == self.current_user.name:\n raise web.HTTPError(400, \"Cannot delete yourself!\")\n if user.spawner._stop_pending:\n raise web.HTTPError(\n 400, \"%s's server is in the process of stopping, please wait.\" % name\n )\n if user.running:\n await self.stop_single_user(user)\n if user.spawner._stop_pending:\n raise web.HTTPError(\n 400,\n \"%s's server is in the process of stopping, please wait.\" % name,\n )\n\n await maybe_future(self.authenticator.delete_user(user))\n # remove from registry\n self.users.delete(user)\n\n self.set_status(204)\n\n @admin_only\n async def patch(self, name):\n user = self.find_user(name)\n if user is None:\n raise web.HTTPError(404)\n data = self.get_json_body()\n self._check_user_model(data)\n if 'name' in data and data['name'] != name:\n # check if the new name is already taken inside db\n if self.find_user(data['name']):\n raise web.HTTPError(\n 400,\n \"User %s already exists, username must be unique\" % data['name'],\n )\n for key, value in data.items():\n if key == 'auth_state':\n await user.save_auth_state(value)\n else:\n setattr(user, key, value)\n self.db.commit()\n user_ = self.user_model(user)\n user_['auth_state'] = await user.get_auth_state()\n self.write(json.dumps(user_))\n\n\nclass UserTokenListAPIHandler(APIHandler):\n \"\"\"API endpoint for listing/creating tokens\"\"\"\n\n @admin_or_self\n def get(self, name):\n \"\"\"Get tokens for a given user\"\"\"\n user = self.find_user(name)\n if not user:\n raise web.HTTPError(404, \"No such user: %s\" % name)\n\n now = datetime.utcnow()\n\n api_tokens = []\n\n def sort_key(token):\n return token.last_activity or token.created\n\n for token in sorted(user.api_tokens, key=sort_key):\n if token.expires_at and token.expires_at < now:\n # exclude expired tokens\n self.db.delete(token)\n self.db.commit()\n continue\n api_tokens.append(self.token_model(token))\n\n oauth_tokens = []\n # OAuth tokens use integer timestamps\n now_timestamp = now.timestamp()\n for token in sorted(user.oauth_tokens, key=sort_key):\n if token.expires_at and token.expires_at < now_timestamp:\n # exclude expired tokens\n self.db.delete(token)\n self.db.commit()\n continue\n oauth_tokens.append(self.token_model(token))\n self.write(json.dumps({'api_tokens': api_tokens, 'oauth_tokens': oauth_tokens}))\n\n async def post(self, name):\n body = self.get_json_body() or {}\n if not isinstance(body, dict):\n raise web.HTTPError(400, \"Body must be a JSON dict or empty\")\n\n requester = self.current_user\n if requester is None:\n # defer to Authenticator for identifying the user\n # can be username+password or an upstream auth token\n try:\n name = await self.authenticate(body.get('auth'))\n if isinstance(name, dict):\n # not a simple string so it has to be a dict\n name = name.get('name')\n except web.HTTPError as e:\n # turn any authentication error into 403\n raise web.HTTPError(403)\n except Exception as e:\n # suppress and log error here in case Authenticator\n # isn't prepared to handle auth via this data\n self.log.error(\n \"Error authenticating request for %s: %s\", self.request.uri, e\n )\n raise web.HTTPError(403)\n requester = self.find_user(name)\n if requester is None:\n # couldn't identify requester\n raise web.HTTPError(403)\n user = self.find_user(name)\n if requester is not user and not requester.admin:\n raise web.HTTPError(403, \"Only admins can request tokens for other users\")\n if not user:\n raise web.HTTPError(404, \"No such user: %s\" % name)\n if requester is not user:\n kind = 'user' if isinstance(requester, User) else 'service'\n\n note = body.get('note')\n if not note:\n note = \"Requested via api\"\n if requester is not user:\n note += \" by %s %s\" % (kind, requester.name)\n\n api_token = user.new_api_token(\n note=note, expires_in=body.get('expires_in', None)\n )\n if requester is not user:\n self.log.info(\n \"%s %s requested API token for %s\",\n kind.title(),\n requester.name,\n user.name,\n )\n else:\n user_kind = 'user' if isinstance(user, User) else 'service'\n self.log.info(\"%s %s requested new API token\", user_kind.title(), user.name)\n # retrieve the model\n token_model = self.token_model(orm.APIToken.find(self.db, api_token))\n token_model['token'] = api_token\n self.write(json.dumps(token_model))\n\n\nclass UserTokenAPIHandler(APIHandler):\n \"\"\"API endpoint for retrieving/deleting individual tokens\"\"\"\n\n def find_token_by_id(self, user, token_id):\n \"\"\"Find a token object by token-id key\n\n Raises 404 if not found for any reason\n (e.g. wrong owner, invalid key format, etc.)\n \"\"\"\n not_found = \"No such token %s for user %s\" % (token_id, user.name)\n prefix, id = token_id[0], token_id[1:]\n if prefix == 'a':\n Token = orm.APIToken\n elif prefix == 'o':\n Token = orm.OAuthAccessToken\n else:\n raise web.HTTPError(404, not_found)\n try:\n id = int(id)\n except ValueError:\n raise web.HTTPError(404, not_found)\n\n orm_token = self.db.query(Token).filter(Token.id == id).first()\n if orm_token is None or orm_token.user is not user.orm_user:\n raise web.HTTPError(404, \"Token not found %s\", orm_token)\n return orm_token\n\n @admin_or_self\n def get(self, name, token_id):\n \"\"\"\"\"\"\n user = self.find_user(name)\n if not user:\n raise web.HTTPError(404, \"No such user: %s\" % name)\n token = self.find_token_by_id(user, token_id)\n self.write(json.dumps(self.token_model(token)))\n\n @admin_or_self\n def delete(self, name, token_id):\n \"\"\"Delete a token\"\"\"\n user = self.find_user(name)\n if not user:\n raise web.HTTPError(404, \"No such user: %s\" % name)\n token = self.find_token_by_id(user, token_id)\n # deleting an oauth token deletes *all* oauth tokens for that client\n if isinstance(token, orm.OAuthAccessToken):\n client_id = token.client_id\n tokens = [\n token for token in user.oauth_tokens if token.client_id == client_id\n ]\n else:\n tokens = [token]\n for token in tokens:\n self.db.delete(token)\n self.db.commit()\n self.set_header('Content-Type', 'text/plain')\n self.set_status(204)\n\n\nclass UserServerAPIHandler(APIHandler):\n \"\"\"Start and stop single-user servers\"\"\"\n\n @admin_or_self\n async def post(self, name, server_name=''):\n user = self.find_user(name)\n if server_name:\n if not self.allow_named_servers:\n raise web.HTTPError(400, \"Named servers are not enabled.\")\n if (\n self.named_server_limit_per_user > 0\n and server_name not in user.orm_spawners\n ):\n named_spawners = list(user.all_spawners(include_default=False))\n if self.named_server_limit_per_user <= len(named_spawners):\n raise web.HTTPError(\n 400,\n \"User {} already has the maximum of {} named servers.\"\n \" One must be deleted before a new server can be created\".format(\n name, self.named_server_limit_per_user\n ),\n )\n spawner = user.spawners[server_name]\n pending = spawner.pending\n if pending == 'spawn':\n self.set_header('Content-Type', 'text/plain')\n self.set_status(202)\n return\n elif pending:\n raise web.HTTPError(400, \"%s is pending %s\" % (spawner._log_name, pending))\n\n if spawner.ready:\n # include notify, so that a server that died is noticed immediately\n # set _spawn_pending flag to prevent races while we wait\n spawner._spawn_pending = True\n try:\n state = await spawner.poll_and_notify()\n finally:\n spawner._spawn_pending = False\n if state is None:\n raise web.HTTPError(400, \"%s is already running\" % spawner._log_name)\n\n options = self.get_json_body()\n await self.spawn_single_user(user, server_name, options=options)\n status = 202 if spawner.pending == 'spawn' else 201\n self.set_header('Content-Type', 'text/plain')\n self.set_status(status)\n\n @admin_or_self\n async def delete(self, name, server_name=''):\n user = self.find_user(name)\n options = self.get_json_body()\n remove = (options or {}).get('remove', False)\n\n def _remove_spawner(f=None):\n if f and f.exception():\n return\n self.log.info(\"Deleting spawner %s\", spawner._log_name)\n self.db.delete(spawner.orm_spawner)\n self.db.commit()\n\n if server_name:\n if not self.allow_named_servers:\n raise web.HTTPError(400, \"Named servers are not enabled.\")\n if server_name not in user.orm_spawners:\n raise web.HTTPError(\n 404, \"%s has no server named '%s'\" % (name, server_name)\n )\n elif remove:\n raise web.HTTPError(400, \"Cannot delete the default server\")\n\n spawner = user.spawners[server_name]\n if spawner.pending == 'stop':\n self.log.debug(\"%s already stopping\", spawner._log_name)\n self.set_header('Content-Type', 'text/plain')\n self.set_status(202)\n if remove:\n spawner._stop_future.add_done_callback(_remove_spawner)\n return\n\n if spawner.pending:\n raise web.HTTPError(\n 400,\n \"%s is pending %s, please wait\" % (spawner._log_name, spawner.pending),\n )\n\n stop_future = None\n if spawner.ready:\n # include notify, so that a server that died is noticed immediately\n status = await spawner.poll_and_notify()\n if status is None:\n stop_future = await self.stop_single_user(user, server_name)\n\n if remove:\n if stop_future:\n stop_future.add_done_callback(_remove_spawner)\n else:\n _remove_spawner()\n\n status = 202 if spawner._stop_pending else 204\n self.set_header('Content-Type', 'text/plain')\n self.set_status(status)\n\n\nclass UserAdminAccessAPIHandler(APIHandler):\n \"\"\"Grant admins access to single-user servers\n\n This handler sets the necessary cookie for an admin to login to a single-user server.\n \"\"\"\n\n @admin_only\n def post(self, name):\n self.log.warning(\n \"Deprecated in JupyterHub 0.8.\"\n \" Admin access API is not needed now that we use OAuth.\"\n )\n current = self.current_user\n self.log.warning(\n \"Admin user %s has requested access to %s's server\", current.name, name\n )\n if not self.settings.get('admin_access', False):\n raise web.HTTPError(403, \"admin access to user servers disabled\")\n user = self.find_user(name)\n if user is None:\n raise web.HTTPError(404)\n\n\nclass SpawnProgressAPIHandler(APIHandler):\n \"\"\"EventStream handler for pending spawns\"\"\"\n\n keepalive_interval = 8\n\n def get_content_type(self):\n return 'text/event-stream'\n\n async def send_event(self, event):\n try:\n self.write('data: {}\\n\\n'.format(json.dumps(event)))\n await self.flush()\n except StreamClosedError:\n self.log.warning(\"Stream closed while handling %s\", self.request.uri)\n # raise Finish to halt the handler\n raise web.Finish()\n\n def initialize(self):\n super().initialize()\n self._finish_future = asyncio.Future()\n\n def on_finish(self):\n self._finish_future.set_result(None)\n\n async def keepalive(self):\n \"\"\"Write empty lines periodically\n\n to avoid being closed by intermediate proxies\n when there's a large gap between events.\n \"\"\"\n while not self._finish_future.done():\n try:\n self.write(\"\\n\\n\")\n await self.flush()\n except (StreamClosedError, RuntimeError):\n return\n\n await asyncio.wait([self._finish_future], timeout=self.keepalive_interval)\n\n @admin_or_self\n async def get(self, username, server_name=''):\n self.set_header('Cache-Control', 'no-cache')\n if server_name is None:\n server_name = ''\n user = self.find_user(username)\n if user is None:\n # no such user\n raise web.HTTPError(404)\n if server_name not in user.spawners:\n # user has no such server\n raise web.HTTPError(404)\n spawner = user.spawners[server_name]\n\n # start sending keepalive to avoid proxies closing the connection\n asyncio.ensure_future(self.keepalive())\n # cases:\n # - spawner already started and ready\n # - spawner not running at all\n # - spawner failed\n # - spawner pending start (what we expect)\n url = url_path_join(user.url, server_name, '/')\n ready_event = {\n 'progress': 100,\n 'ready': True,\n 'message': \"Server ready at {}\".format(url),\n 'html_message': 'Server ready at <a href=\"{0}\">{0}</a>'.format(url),\n 'url': url,\n }\n failed_event = {'progress': 100, 'failed': True, 'message': \"Spawn failed\"}\n\n if spawner.ready:\n # spawner already ready. Trigger progress-completion immediately\n self.log.info(\"Server %s is already started\", spawner._log_name)\n await self.send_event(ready_event)\n return\n\n spawn_future = spawner._spawn_future\n\n if not spawner._spawn_pending:\n # not pending, no progress to fetch\n # check if spawner has just failed\n f = spawn_future\n if f and f.done() and f.exception():\n failed_event['message'] = \"Spawn failed: %s\" % f.exception()\n await self.send_event(failed_event)\n return\n else:\n raise web.HTTPError(400, \"%s is not starting...\", spawner._log_name)\n\n # retrieve progress events from the Spawner\n async with aclosing(\n iterate_until(spawn_future, spawner._generate_progress())\n ) as events:\n async for event in events:\n # don't allow events to sneakily set the 'ready' flag\n if 'ready' in event:\n event.pop('ready', None)\n await self.send_event(event)\n\n # progress finished, wait for spawn to actually resolve,\n # in case progress finished early\n # (ignore errors, which will be logged elsewhere)\n await asyncio.wait([spawn_future])\n\n # progress and spawn finished, check if spawn succeeded\n if spawner.ready:\n # spawner is ready, signal completion and redirect\n self.log.info(\"Server %s is ready\", spawner._log_name)\n await self.send_event(ready_event)\n else:\n # what happened? Maybe spawn failed?\n f = spawn_future\n if f and f.done() and f.exception():\n failed_event['message'] = \"Spawn failed: %s\" % f.exception()\n else:\n self.log.warning(\n \"Server %s didn't start for unknown reason\", spawner._log_name\n )\n await self.send_event(failed_event)\n\n\ndef _parse_timestamp(timestamp):\n \"\"\"Parse and return a utc timestamp\n\n - raise HTTPError(400) on parse error\n - handle and strip tz info for internal consistency\n (we use naïve utc timestamps everywhere)\n \"\"\"\n try:\n dt = parse_date(timestamp)\n except Exception:\n raise web.HTTPError(400, \"Not a valid timestamp: %r\", timestamp)\n if dt.tzinfo:\n # strip timezone info to naïve UTC datetime\n dt = dt.astimezone(timezone.utc).replace(tzinfo=None)\n\n now = datetime.utcnow()\n if (dt - now) > timedelta(minutes=59):\n raise web.HTTPError(\n 400,\n \"Rejecting activity from more than an hour in the future: {}\".format(\n isoformat(dt)\n ),\n )\n return dt\n\n\nclass ActivityAPIHandler(APIHandler):\n def _validate_servers(self, user, servers):\n \"\"\"Validate servers dict argument\n\n - types are correct\n - each server exists\n - last_activity fields are parsed into datetime objects\n \"\"\"\n msg = \"servers must be a dict of the form {server_name: {last_activity: timestamp}}\"\n if not isinstance(servers, dict):\n raise web.HTTPError(400, msg)\n\n spawners = user.orm_spawners\n for server_name, server_info in servers.items():\n if server_name not in spawners:\n raise web.HTTPError(\n 400,\n \"No such server '{}' for user {}\".format(server_name, user.name),\n )\n # check that each per-server field is a dict\n if not isinstance(server_info, dict):\n raise web.HTTPError(400, msg)\n # check that last_activity is defined for each per-server dict\n if 'last_activity' not in server_info:\n raise web.HTTPError(400, msg)\n # parse last_activity timestamps\n # _parse_timestamp above is responsible for raising errors\n server_info['last_activity'] = _parse_timestamp(\n server_info['last_activity']\n )\n return servers\n\n @admin_or_self\n def post(self, username):\n user = self.find_user(username)\n if user is None:\n # no such user\n raise web.HTTPError(404, \"No such user: %r\", username)\n\n body = self.get_json_body()\n if not isinstance(body, dict):\n raise web.HTTPError(400, \"body must be a json dict\")\n\n last_activity_timestamp = body.get('last_activity')\n servers = body.get('servers')\n if not last_activity_timestamp and not servers:\n raise web.HTTPError(\n 400, \"body must contain at least one of `last_activity` or `servers`\"\n )\n\n if servers:\n # validate server args\n servers = self._validate_servers(user, servers)\n # at this point we know that the servers dict\n # is valid and contains only servers that exist\n # and last_activity is defined and a valid datetime object\n\n # update user.last_activity if specified\n if last_activity_timestamp:\n last_activity = _parse_timestamp(last_activity_timestamp)\n if (not user.last_activity) or last_activity > user.last_activity:\n self.log.debug(\n \"Activity for user %s: %s\", user.name, isoformat(last_activity)\n )\n user.last_activity = last_activity\n else:\n self.log.debug(\n \"Not updating activity for %s: %s < %s\",\n user,\n isoformat(last_activity),\n isoformat(user.last_activity),\n )\n\n if servers:\n for server_name, server_info in servers.items():\n last_activity = server_info['last_activity']\n spawner = user.orm_spawners[server_name]\n\n if (not spawner.last_activity) or last_activity > spawner.last_activity:\n self.log.debug(\n \"Activity on server %s/%s: %s\",\n user.name,\n server_name,\n isoformat(last_activity),\n )\n spawner.last_activity = last_activity\n else:\n self.log.debug(\n \"Not updating server activity on %s/%s: %s < %s\",\n user.name,\n server_name,\n isoformat(last_activity),\n isoformat(user.last_activity),\n )\n\n self.db.commit()\n\n\ndefault_handlers = [\n (r\"/api/user\", SelfAPIHandler),\n (r\"/api/users\", UserListAPIHandler),\n (r\"/api/users/([^/]+)\", UserAPIHandler),\n (r\"/api/users/([^/]+)/server\", UserServerAPIHandler),\n (r\"/api/users/([^/]+)/server/progress\", SpawnProgressAPIHandler),\n (r\"/api/users/([^/]+)/tokens\", UserTokenListAPIHandler),\n (r\"/api/users/([^/]+)/tokens/([^/]*)\", UserTokenAPIHandler),\n (r\"/api/users/([^/]+)/servers/([^/]*)\", UserServerAPIHandler),\n (r\"/api/users/([^/]+)/servers/([^/]*)/progress\", SpawnProgressAPIHandler),\n (r\"/api/users/([^/]+)/activity\", ActivityAPIHandler),\n (r\"/api/users/([^/]+)/admin-access\", UserAdminAccessAPIHandler),\n]\n",
"path": "jupyterhub/apihandlers/users.py"
}
] | [
{
"content": "\"\"\"User handlers\"\"\"\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\nimport asyncio\nimport json\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom datetime import timezone\n\nfrom async_generator import aclosing\nfrom dateutil.parser import parse as parse_date\nfrom tornado import web\nfrom tornado.iostream import StreamClosedError\n\nfrom .. import orm\nfrom ..user import User\nfrom ..utils import admin_only\nfrom ..utils import isoformat\nfrom ..utils import iterate_until\nfrom ..utils import maybe_future\nfrom ..utils import url_path_join\nfrom .base import APIHandler\n\n\nclass SelfAPIHandler(APIHandler):\n \"\"\"Return the authenticated user's model\n\n Based on the authentication info. Acts as a 'whoami' for auth tokens.\n \"\"\"\n\n async def get(self):\n user = self.current_user\n if user is None:\n # whoami can be accessed via oauth token\n user = self.get_current_user_oauth_token()\n if user is None:\n raise web.HTTPError(403)\n self.write(json.dumps(self.user_model(user)))\n\n\nclass UserListAPIHandler(APIHandler):\n @admin_only\n def get(self):\n data = [\n self.user_model(u, include_servers=True, include_state=True)\n for u in self.db.query(orm.User)\n ]\n self.write(json.dumps(data))\n\n @admin_only\n async def post(self):\n data = self.get_json_body()\n if not data or not isinstance(data, dict) or not data.get('usernames'):\n raise web.HTTPError(400, \"Must specify at least one user to create\")\n\n usernames = data.pop('usernames')\n self._check_user_model(data)\n # admin is set for all users\n # to create admin and non-admin users requires at least two API requests\n admin = data.get('admin', False)\n\n to_create = []\n invalid_names = []\n for name in usernames:\n name = self.authenticator.normalize_username(name)\n if not self.authenticator.validate_username(name):\n invalid_names.append(name)\n continue\n user = self.find_user(name)\n if user is not None:\n self.log.warning(\"User %s already exists\" % name)\n else:\n to_create.append(name)\n\n if invalid_names:\n if len(invalid_names) == 1:\n msg = \"Invalid username: %s\" % invalid_names[0]\n else:\n msg = \"Invalid usernames: %s\" % ', '.join(invalid_names)\n raise web.HTTPError(400, msg)\n\n if not to_create:\n raise web.HTTPError(409, \"All %i users already exist\" % len(usernames))\n\n created = []\n for name in to_create:\n user = self.user_from_username(name)\n if admin:\n user.admin = True\n self.db.commit()\n try:\n await maybe_future(self.authenticator.add_user(user))\n except Exception as e:\n self.log.error(\"Failed to create user: %s\" % name, exc_info=True)\n self.users.delete(user)\n raise web.HTTPError(\n 400, \"Failed to create user %s: %s\" % (name, str(e))\n )\n else:\n created.append(user)\n\n self.write(json.dumps([self.user_model(u) for u in created]))\n self.set_status(201)\n\n\ndef admin_or_self(method):\n \"\"\"Decorator for restricting access to either the target user or admin\"\"\"\n\n def m(self, name, *args, **kwargs):\n current = self.current_user\n if current is None:\n raise web.HTTPError(403)\n if not (current.name == name or current.admin):\n raise web.HTTPError(403)\n\n # raise 404 if not found\n if not self.find_user(name):\n raise web.HTTPError(404)\n return method(self, name, *args, **kwargs)\n\n return m\n\n\nclass UserAPIHandler(APIHandler):\n @admin_or_self\n async def get(self, name):\n user = self.find_user(name)\n model = self.user_model(\n user, include_servers=True, include_state=self.current_user.admin\n )\n # auth state will only be shown if the requester is an admin\n # this means users can't see their own auth state unless they\n # are admins, Hub admins often are also marked as admins so they\n # will see their auth state but normal users won't\n requester = self.current_user\n if requester.admin:\n model['auth_state'] = await user.get_auth_state()\n self.write(json.dumps(model))\n\n @admin_only\n async def post(self, name):\n data = self.get_json_body()\n user = self.find_user(name)\n if user is not None:\n raise web.HTTPError(409, \"User %s already exists\" % name)\n\n user = self.user_from_username(name)\n if data:\n self._check_user_model(data)\n if 'admin' in data:\n user.admin = data['admin']\n self.db.commit()\n\n try:\n await maybe_future(self.authenticator.add_user(user))\n except Exception:\n self.log.error(\"Failed to create user: %s\" % name, exc_info=True)\n # remove from registry\n self.users.delete(user)\n raise web.HTTPError(400, \"Failed to create user: %s\" % name)\n\n self.write(json.dumps(self.user_model(user)))\n self.set_status(201)\n\n @admin_only\n async def delete(self, name):\n user = self.find_user(name)\n if user is None:\n raise web.HTTPError(404)\n if user.name == self.current_user.name:\n raise web.HTTPError(400, \"Cannot delete yourself!\")\n if user.spawner._stop_pending:\n raise web.HTTPError(\n 400, \"%s's server is in the process of stopping, please wait.\" % name\n )\n if user.running:\n await self.stop_single_user(user)\n if user.spawner._stop_pending:\n raise web.HTTPError(\n 400,\n \"%s's server is in the process of stopping, please wait.\" % name,\n )\n\n await maybe_future(self.authenticator.delete_user(user))\n # remove from registry\n self.users.delete(user)\n\n self.set_status(204)\n\n @admin_only\n async def patch(self, name):\n user = self.find_user(name)\n if user is None:\n raise web.HTTPError(404)\n data = self.get_json_body()\n self._check_user_model(data)\n if 'name' in data and data['name'] != name:\n # check if the new name is already taken inside db\n if self.find_user(data['name']):\n raise web.HTTPError(\n 400,\n \"User %s already exists, username must be unique\" % data['name'],\n )\n for key, value in data.items():\n if key == 'auth_state':\n await user.save_auth_state(value)\n else:\n setattr(user, key, value)\n self.db.commit()\n user_ = self.user_model(user)\n user_['auth_state'] = await user.get_auth_state()\n self.write(json.dumps(user_))\n\n\nclass UserTokenListAPIHandler(APIHandler):\n \"\"\"API endpoint for listing/creating tokens\"\"\"\n\n @admin_or_self\n def get(self, name):\n \"\"\"Get tokens for a given user\"\"\"\n user = self.find_user(name)\n if not user:\n raise web.HTTPError(404, \"No such user: %s\" % name)\n\n now = datetime.utcnow()\n\n api_tokens = []\n\n def sort_key(token):\n return token.last_activity or token.created\n\n for token in sorted(user.api_tokens, key=sort_key):\n if token.expires_at and token.expires_at < now:\n # exclude expired tokens\n self.db.delete(token)\n self.db.commit()\n continue\n api_tokens.append(self.token_model(token))\n\n oauth_tokens = []\n # OAuth tokens use integer timestamps\n now_timestamp = now.timestamp()\n for token in sorted(user.oauth_tokens, key=sort_key):\n if token.expires_at and token.expires_at < now_timestamp:\n # exclude expired tokens\n self.db.delete(token)\n self.db.commit()\n continue\n oauth_tokens.append(self.token_model(token))\n self.write(json.dumps({'api_tokens': api_tokens, 'oauth_tokens': oauth_tokens}))\n\n async def post(self, name):\n body = self.get_json_body() or {}\n if not isinstance(body, dict):\n raise web.HTTPError(400, \"Body must be a JSON dict or empty\")\n\n requester = self.current_user\n if requester is None:\n # defer to Authenticator for identifying the user\n # can be username+password or an upstream auth token\n try:\n name = await self.authenticate(body.get('auth'))\n if isinstance(name, dict):\n # not a simple string so it has to be a dict\n name = name.get('name')\n except web.HTTPError as e:\n # turn any authentication error into 403\n raise web.HTTPError(403)\n except Exception as e:\n # suppress and log error here in case Authenticator\n # isn't prepared to handle auth via this data\n self.log.error(\n \"Error authenticating request for %s: %s\", self.request.uri, e\n )\n raise web.HTTPError(403)\n requester = self.find_user(name)\n if requester is None:\n # couldn't identify requester\n raise web.HTTPError(403)\n user = self.find_user(name)\n if requester is not user and not requester.admin:\n raise web.HTTPError(403, \"Only admins can request tokens for other users\")\n if not user:\n raise web.HTTPError(404, \"No such user: %s\" % name)\n if requester is not user:\n kind = 'user' if isinstance(requester, User) else 'service'\n\n note = body.get('note')\n if not note:\n note = \"Requested via api\"\n if requester is not user:\n note += \" by %s %s\" % (kind, requester.name)\n\n api_token = user.new_api_token(\n note=note, expires_in=body.get('expires_in', None)\n )\n if requester is not user:\n self.log.info(\n \"%s %s requested API token for %s\",\n kind.title(),\n requester.name,\n user.name,\n )\n else:\n user_kind = 'user' if isinstance(user, User) else 'service'\n self.log.info(\"%s %s requested new API token\", user_kind.title(), user.name)\n # retrieve the model\n token_model = self.token_model(orm.APIToken.find(self.db, api_token))\n token_model['token'] = api_token\n self.write(json.dumps(token_model))\n\n\nclass UserTokenAPIHandler(APIHandler):\n \"\"\"API endpoint for retrieving/deleting individual tokens\"\"\"\n\n def find_token_by_id(self, user, token_id):\n \"\"\"Find a token object by token-id key\n\n Raises 404 if not found for any reason\n (e.g. wrong owner, invalid key format, etc.)\n \"\"\"\n not_found = \"No such token %s for user %s\" % (token_id, user.name)\n prefix, id = token_id[0], token_id[1:]\n if prefix == 'a':\n Token = orm.APIToken\n elif prefix == 'o':\n Token = orm.OAuthAccessToken\n else:\n raise web.HTTPError(404, not_found)\n try:\n id = int(id)\n except ValueError:\n raise web.HTTPError(404, not_found)\n\n orm_token = self.db.query(Token).filter(Token.id == id).first()\n if orm_token is None or orm_token.user is not user.orm_user:\n raise web.HTTPError(404, \"Token not found %s\", orm_token)\n return orm_token\n\n @admin_or_self\n def get(self, name, token_id):\n \"\"\"\"\"\"\n user = self.find_user(name)\n if not user:\n raise web.HTTPError(404, \"No such user: %s\" % name)\n token = self.find_token_by_id(user, token_id)\n self.write(json.dumps(self.token_model(token)))\n\n @admin_or_self\n def delete(self, name, token_id):\n \"\"\"Delete a token\"\"\"\n user = self.find_user(name)\n if not user:\n raise web.HTTPError(404, \"No such user: %s\" % name)\n token = self.find_token_by_id(user, token_id)\n # deleting an oauth token deletes *all* oauth tokens for that client\n if isinstance(token, orm.OAuthAccessToken):\n client_id = token.client_id\n tokens = [\n token for token in user.oauth_tokens if token.client_id == client_id\n ]\n else:\n tokens = [token]\n for token in tokens:\n self.db.delete(token)\n self.db.commit()\n self.set_header('Content-Type', 'text/plain')\n self.set_status(204)\n\n\nclass UserServerAPIHandler(APIHandler):\n \"\"\"Start and stop single-user servers\"\"\"\n\n @admin_or_self\n async def post(self, name, server_name=''):\n user = self.find_user(name)\n if server_name:\n if not self.allow_named_servers:\n raise web.HTTPError(400, \"Named servers are not enabled.\")\n if (\n self.named_server_limit_per_user > 0\n and server_name not in user.orm_spawners\n ):\n named_spawners = list(user.all_spawners(include_default=False))\n if self.named_server_limit_per_user <= len(named_spawners):\n raise web.HTTPError(\n 400,\n \"User {} already has the maximum of {} named servers.\"\n \" One must be deleted before a new server can be created\".format(\n name, self.named_server_limit_per_user\n ),\n )\n spawner = user.spawners[server_name]\n pending = spawner.pending\n if pending == 'spawn':\n self.set_header('Content-Type', 'text/plain')\n self.set_status(202)\n return\n elif pending:\n raise web.HTTPError(400, \"%s is pending %s\" % (spawner._log_name, pending))\n\n if spawner.ready:\n # include notify, so that a server that died is noticed immediately\n # set _spawn_pending flag to prevent races while we wait\n spawner._spawn_pending = True\n try:\n state = await spawner.poll_and_notify()\n finally:\n spawner._spawn_pending = False\n if state is None:\n raise web.HTTPError(400, \"%s is already running\" % spawner._log_name)\n\n options = self.get_json_body()\n await self.spawn_single_user(user, server_name, options=options)\n status = 202 if spawner.pending == 'spawn' else 201\n self.set_header('Content-Type', 'text/plain')\n self.set_status(status)\n\n @admin_or_self\n async def delete(self, name, server_name=''):\n user = self.find_user(name)\n options = self.get_json_body()\n remove = (options or {}).get('remove', False)\n\n def _remove_spawner(f=None):\n if f and f.exception():\n return\n self.log.info(\"Deleting spawner %s\", spawner._log_name)\n self.db.delete(spawner.orm_spawner)\n user.spawners.pop(server_name, None)\n self.db.commit()\n\n if server_name:\n if not self.allow_named_servers:\n raise web.HTTPError(400, \"Named servers are not enabled.\")\n if server_name not in user.orm_spawners:\n raise web.HTTPError(\n 404, \"%s has no server named '%s'\" % (name, server_name)\n )\n elif remove:\n raise web.HTTPError(400, \"Cannot delete the default server\")\n\n spawner = user.spawners[server_name]\n if spawner.pending == 'stop':\n self.log.debug(\"%s already stopping\", spawner._log_name)\n self.set_header('Content-Type', 'text/plain')\n self.set_status(202)\n if remove:\n spawner._stop_future.add_done_callback(_remove_spawner)\n return\n\n if spawner.pending:\n raise web.HTTPError(\n 400,\n \"%s is pending %s, please wait\" % (spawner._log_name, spawner.pending),\n )\n\n stop_future = None\n if spawner.ready:\n # include notify, so that a server that died is noticed immediately\n status = await spawner.poll_and_notify()\n if status is None:\n stop_future = await self.stop_single_user(user, server_name)\n\n if remove:\n if stop_future:\n stop_future.add_done_callback(_remove_spawner)\n else:\n _remove_spawner()\n\n status = 202 if spawner._stop_pending else 204\n self.set_header('Content-Type', 'text/plain')\n self.set_status(status)\n\n\nclass UserAdminAccessAPIHandler(APIHandler):\n \"\"\"Grant admins access to single-user servers\n\n This handler sets the necessary cookie for an admin to login to a single-user server.\n \"\"\"\n\n @admin_only\n def post(self, name):\n self.log.warning(\n \"Deprecated in JupyterHub 0.8.\"\n \" Admin access API is not needed now that we use OAuth.\"\n )\n current = self.current_user\n self.log.warning(\n \"Admin user %s has requested access to %s's server\", current.name, name\n )\n if not self.settings.get('admin_access', False):\n raise web.HTTPError(403, \"admin access to user servers disabled\")\n user = self.find_user(name)\n if user is None:\n raise web.HTTPError(404)\n\n\nclass SpawnProgressAPIHandler(APIHandler):\n \"\"\"EventStream handler for pending spawns\"\"\"\n\n keepalive_interval = 8\n\n def get_content_type(self):\n return 'text/event-stream'\n\n async def send_event(self, event):\n try:\n self.write('data: {}\\n\\n'.format(json.dumps(event)))\n await self.flush()\n except StreamClosedError:\n self.log.warning(\"Stream closed while handling %s\", self.request.uri)\n # raise Finish to halt the handler\n raise web.Finish()\n\n def initialize(self):\n super().initialize()\n self._finish_future = asyncio.Future()\n\n def on_finish(self):\n self._finish_future.set_result(None)\n\n async def keepalive(self):\n \"\"\"Write empty lines periodically\n\n to avoid being closed by intermediate proxies\n when there's a large gap between events.\n \"\"\"\n while not self._finish_future.done():\n try:\n self.write(\"\\n\\n\")\n await self.flush()\n except (StreamClosedError, RuntimeError):\n return\n\n await asyncio.wait([self._finish_future], timeout=self.keepalive_interval)\n\n @admin_or_self\n async def get(self, username, server_name=''):\n self.set_header('Cache-Control', 'no-cache')\n if server_name is None:\n server_name = ''\n user = self.find_user(username)\n if user is None:\n # no such user\n raise web.HTTPError(404)\n if server_name not in user.spawners:\n # user has no such server\n raise web.HTTPError(404)\n spawner = user.spawners[server_name]\n\n # start sending keepalive to avoid proxies closing the connection\n asyncio.ensure_future(self.keepalive())\n # cases:\n # - spawner already started and ready\n # - spawner not running at all\n # - spawner failed\n # - spawner pending start (what we expect)\n url = url_path_join(user.url, server_name, '/')\n ready_event = {\n 'progress': 100,\n 'ready': True,\n 'message': \"Server ready at {}\".format(url),\n 'html_message': 'Server ready at <a href=\"{0}\">{0}</a>'.format(url),\n 'url': url,\n }\n failed_event = {'progress': 100, 'failed': True, 'message': \"Spawn failed\"}\n\n if spawner.ready:\n # spawner already ready. Trigger progress-completion immediately\n self.log.info(\"Server %s is already started\", spawner._log_name)\n await self.send_event(ready_event)\n return\n\n spawn_future = spawner._spawn_future\n\n if not spawner._spawn_pending:\n # not pending, no progress to fetch\n # check if spawner has just failed\n f = spawn_future\n if f and f.done() and f.exception():\n failed_event['message'] = \"Spawn failed: %s\" % f.exception()\n await self.send_event(failed_event)\n return\n else:\n raise web.HTTPError(400, \"%s is not starting...\", spawner._log_name)\n\n # retrieve progress events from the Spawner\n async with aclosing(\n iterate_until(spawn_future, spawner._generate_progress())\n ) as events:\n async for event in events:\n # don't allow events to sneakily set the 'ready' flag\n if 'ready' in event:\n event.pop('ready', None)\n await self.send_event(event)\n\n # progress finished, wait for spawn to actually resolve,\n # in case progress finished early\n # (ignore errors, which will be logged elsewhere)\n await asyncio.wait([spawn_future])\n\n # progress and spawn finished, check if spawn succeeded\n if spawner.ready:\n # spawner is ready, signal completion and redirect\n self.log.info(\"Server %s is ready\", spawner._log_name)\n await self.send_event(ready_event)\n else:\n # what happened? Maybe spawn failed?\n f = spawn_future\n if f and f.done() and f.exception():\n failed_event['message'] = \"Spawn failed: %s\" % f.exception()\n else:\n self.log.warning(\n \"Server %s didn't start for unknown reason\", spawner._log_name\n )\n await self.send_event(failed_event)\n\n\ndef _parse_timestamp(timestamp):\n \"\"\"Parse and return a utc timestamp\n\n - raise HTTPError(400) on parse error\n - handle and strip tz info for internal consistency\n (we use naïve utc timestamps everywhere)\n \"\"\"\n try:\n dt = parse_date(timestamp)\n except Exception:\n raise web.HTTPError(400, \"Not a valid timestamp: %r\", timestamp)\n if dt.tzinfo:\n # strip timezone info to naïve UTC datetime\n dt = dt.astimezone(timezone.utc).replace(tzinfo=None)\n\n now = datetime.utcnow()\n if (dt - now) > timedelta(minutes=59):\n raise web.HTTPError(\n 400,\n \"Rejecting activity from more than an hour in the future: {}\".format(\n isoformat(dt)\n ),\n )\n return dt\n\n\nclass ActivityAPIHandler(APIHandler):\n def _validate_servers(self, user, servers):\n \"\"\"Validate servers dict argument\n\n - types are correct\n - each server exists\n - last_activity fields are parsed into datetime objects\n \"\"\"\n msg = \"servers must be a dict of the form {server_name: {last_activity: timestamp}}\"\n if not isinstance(servers, dict):\n raise web.HTTPError(400, msg)\n\n spawners = user.orm_spawners\n for server_name, server_info in servers.items():\n if server_name not in spawners:\n raise web.HTTPError(\n 400,\n \"No such server '{}' for user {}\".format(server_name, user.name),\n )\n # check that each per-server field is a dict\n if not isinstance(server_info, dict):\n raise web.HTTPError(400, msg)\n # check that last_activity is defined for each per-server dict\n if 'last_activity' not in server_info:\n raise web.HTTPError(400, msg)\n # parse last_activity timestamps\n # _parse_timestamp above is responsible for raising errors\n server_info['last_activity'] = _parse_timestamp(\n server_info['last_activity']\n )\n return servers\n\n @admin_or_self\n def post(self, username):\n user = self.find_user(username)\n if user is None:\n # no such user\n raise web.HTTPError(404, \"No such user: %r\", username)\n\n body = self.get_json_body()\n if not isinstance(body, dict):\n raise web.HTTPError(400, \"body must be a json dict\")\n\n last_activity_timestamp = body.get('last_activity')\n servers = body.get('servers')\n if not last_activity_timestamp and not servers:\n raise web.HTTPError(\n 400, \"body must contain at least one of `last_activity` or `servers`\"\n )\n\n if servers:\n # validate server args\n servers = self._validate_servers(user, servers)\n # at this point we know that the servers dict\n # is valid and contains only servers that exist\n # and last_activity is defined and a valid datetime object\n\n # update user.last_activity if specified\n if last_activity_timestamp:\n last_activity = _parse_timestamp(last_activity_timestamp)\n if (not user.last_activity) or last_activity > user.last_activity:\n self.log.debug(\n \"Activity for user %s: %s\", user.name, isoformat(last_activity)\n )\n user.last_activity = last_activity\n else:\n self.log.debug(\n \"Not updating activity for %s: %s < %s\",\n user,\n isoformat(last_activity),\n isoformat(user.last_activity),\n )\n\n if servers:\n for server_name, server_info in servers.items():\n last_activity = server_info['last_activity']\n spawner = user.orm_spawners[server_name]\n\n if (not spawner.last_activity) or last_activity > spawner.last_activity:\n self.log.debug(\n \"Activity on server %s/%s: %s\",\n user.name,\n server_name,\n isoformat(last_activity),\n )\n spawner.last_activity = last_activity\n else:\n self.log.debug(\n \"Not updating server activity on %s/%s: %s < %s\",\n user.name,\n server_name,\n isoformat(last_activity),\n isoformat(user.last_activity),\n )\n\n self.db.commit()\n\n\ndefault_handlers = [\n (r\"/api/user\", SelfAPIHandler),\n (r\"/api/users\", UserListAPIHandler),\n (r\"/api/users/([^/]+)\", UserAPIHandler),\n (r\"/api/users/([^/]+)/server\", UserServerAPIHandler),\n (r\"/api/users/([^/]+)/server/progress\", SpawnProgressAPIHandler),\n (r\"/api/users/([^/]+)/tokens\", UserTokenListAPIHandler),\n (r\"/api/users/([^/]+)/tokens/([^/]*)\", UserTokenAPIHandler),\n (r\"/api/users/([^/]+)/servers/([^/]*)\", UserServerAPIHandler),\n (r\"/api/users/([^/]+)/servers/([^/]*)/progress\", SpawnProgressAPIHandler),\n (r\"/api/users/([^/]+)/activity\", ActivityAPIHandler),\n (r\"/api/users/([^/]+)/admin-access\", UserAdminAccessAPIHandler),\n]\n",
"path": "jupyterhub/apihandlers/users.py"
}
] | diff --git a/jupyterhub/apihandlers/users.py b/jupyterhub/apihandlers/users.py
index 8250e2ce46..1c633723a6 100644
--- a/jupyterhub/apihandlers/users.py
+++ b/jupyterhub/apihandlers/users.py
@@ -427,6 +427,7 @@ def _remove_spawner(f=None):
return
self.log.info("Deleting spawner %s", spawner._log_name)
self.db.delete(spawner.orm_spawner)
+ user.spawners.pop(server_name, None)
self.db.commit()
if server_name:
diff --git a/jupyterhub/tests/test_named_servers.py b/jupyterhub/tests/test_named_servers.py
index ab7a600267..0f6809c10a 100644
--- a/jupyterhub/tests/test_named_servers.py
+++ b/jupyterhub/tests/test_named_servers.py
@@ -162,8 +162,10 @@ async def test_delete_named_server(app, named_servers):
)
r.raise_for_status()
assert r.status_code == 204
- # low-level record is now removes
+ # low-level record is now removed
assert servername not in user.orm_spawners
+ # and it's still not in the high-level wrapper dict
+ assert servername not in user.spawners
async def test_named_server_disabled(app):
|
iterative__dvc-5067 | dvc version: does not follow symlinks
# Bug Report
## Description
This is the `dvc version` output, where it says the cache directory is `nfs4 on storage:/home` and cache type is `symlink`.
```
DVC version: 1.10.2 (pip)
---------------------------------
Platform: Python 3.8.3 on Linux-5.4.0-54-generic-x86_64-with-glibc2.10
Supports: All remotes
Cache types: symlink
Cache directory: nfs4 on storage:/home
Caches: local
Remotes: s3
Workspace directory: nfs4 on storage:/home
Repo: dvc, git
```
However, I do have a `~/.config/dvc/config` file that overrides this:
```
[core]
experiments = true
[cache]
type = "reflink,symlink,copy"
protected = true
dir = /home/jc/ssd_cache/dvc_cache
[feature]
parametrization = true
```
And the actual cache dir is `/home/jc/ssd_cache/dvc_cache` as I've specified instead of `nfs4 on storage:/home`
| [
{
"content": "import itertools\nimport os\nimport pathlib\nimport platform\nimport uuid\n\nfrom dvc.exceptions import DvcException, NotDvcRepoError\nfrom dvc.repo import Repo\nfrom dvc.scm.base import SCMError\nfrom dvc.system import System\nfrom dvc.tree import TREES, get_tree_cls, get_tree_config\nfrom dvc.utils import error_link\nfrom dvc.utils.pkg import PKG\nfrom dvc.version import __version__\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\nif PKG is None:\n package = \"\"\nelse:\n package = f\"({PKG})\"\n\n\ndef get_dvc_info():\n info = [\n f\"DVC version: {__version__} {package}\",\n \"---------------------------------\",\n f\"Platform: Python {platform.python_version()} on \"\n f\"{platform.platform()}\",\n f\"Supports: {_get_supported_remotes()}\",\n ]\n\n try:\n repo = Repo()\n\n # cache_dir might not exist yet (e.g. after `dvc init`), and we\n # can't auto-create it, as it might cause issues if the user\n # later decides to enable shared cache mode with\n # `dvc config cache.shared group`.\n if os.path.exists(repo.cache.local.cache_dir):\n info.append(\n \"Cache types: {}\".format(_get_linktype_support_info(repo))\n )\n if psutil:\n fs_type = get_fs_type(repo.cache.local.cache_dir)\n info.append(f\"Cache directory: {fs_type}\")\n else:\n info.append(\"Cache types: \" + error_link(\"no-dvc-cache\"))\n\n info.append(f\"Caches: {_get_caches(repo.cache)}\")\n\n info.append(f\"Remotes: {_get_remotes(repo.config)}\")\n\n except NotDvcRepoError:\n pass\n except SCMError:\n info.append(\"Repo: dvc, git (broken)\")\n else:\n root_directory = repo.root_dir\n if psutil:\n fs_root = get_fs_type(os.path.abspath(root_directory))\n info.append(f\"Workspace directory: {fs_root}\")\n info.append(\"Repo: {}\".format(_get_dvc_repo_info(repo)))\n return \"\\n\".join(info)\n\n\ndef _get_caches(cache):\n caches = (\n cache_type\n for cache_type, cache_instance in cache.by_scheme()\n if cache_instance\n )\n\n # Caches will be always non-empty including the local cache\n return \", \".join(caches)\n\n\ndef _get_remotes(config):\n schemes = (\n get_tree_cls(get_tree_config(config, name=remote)).scheme\n for remote in config[\"remote\"]\n )\n\n return \", \".join(schemes) or \"None\"\n\n\ndef _get_linktype_support_info(repo):\n\n links = {\n \"reflink\": (System.reflink, None),\n \"hardlink\": (System.hardlink, System.is_hardlink),\n \"symlink\": (System.symlink, System.is_symlink),\n }\n\n fname = \".\" + str(uuid.uuid4())\n src = os.path.join(repo.cache.local.cache_dir, fname)\n open(src, \"w\").close()\n dst = os.path.join(repo.root_dir, fname)\n\n cache = []\n\n for name, (link, is_link) in links.items():\n try:\n link(src, dst)\n status = \"supported\"\n if is_link and not is_link(dst):\n status = \"broken\"\n os.unlink(dst)\n except DvcException:\n status = \"not supported\"\n\n if status == \"supported\":\n cache.append(name)\n os.remove(src)\n\n return \", \".join(cache)\n\n\ndef _get_supported_remotes():\n\n supported_remotes = []\n for tree_cls in TREES:\n if not tree_cls.get_missing_deps():\n supported_remotes.append(tree_cls.scheme)\n\n if len(supported_remotes) == len(TREES):\n return \"All remotes\"\n\n if len(supported_remotes) == 1:\n return supported_remotes\n\n return \", \".join(supported_remotes)\n\n\ndef get_fs_type(path):\n\n partition = {\n pathlib.Path(part.mountpoint): (part.fstype + \" on \" + part.device)\n for part in psutil.disk_partitions(all=True)\n }\n\n path = pathlib.Path(path)\n\n for parent in itertools.chain([path], path.parents):\n if parent in partition:\n return partition[parent]\n return (\"unknown\", \"none\")\n\n\ndef _get_dvc_repo_info(self):\n if self.config.get(\"core\", {}).get(\"no_scm\", False):\n return \"dvc (no_scm)\"\n\n if self.root_dir != self.scm.root_dir:\n return \"dvc (subdir), git\"\n\n return \"dvc, git\"\n",
"path": "dvc/info.py"
}
] | [
{
"content": "import itertools\nimport os\nimport pathlib\nimport platform\nimport uuid\n\nfrom dvc.exceptions import DvcException, NotDvcRepoError\nfrom dvc.repo import Repo\nfrom dvc.scm.base import SCMError\nfrom dvc.system import System\nfrom dvc.tree import TREES, get_tree_cls, get_tree_config\nfrom dvc.utils import error_link\nfrom dvc.utils.pkg import PKG\nfrom dvc.version import __version__\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\nif PKG is None:\n package = \"\"\nelse:\n package = f\"({PKG})\"\n\n\ndef get_dvc_info():\n info = [\n f\"DVC version: {__version__} {package}\",\n \"---------------------------------\",\n f\"Platform: Python {platform.python_version()} on \"\n f\"{platform.platform()}\",\n f\"Supports: {_get_supported_remotes()}\",\n ]\n\n try:\n repo = Repo()\n\n # cache_dir might not exist yet (e.g. after `dvc init`), and we\n # can't auto-create it, as it might cause issues if the user\n # later decides to enable shared cache mode with\n # `dvc config cache.shared group`.\n if os.path.exists(repo.cache.local.cache_dir):\n info.append(\n \"Cache types: {}\".format(_get_linktype_support_info(repo))\n )\n if psutil:\n fs_type = get_fs_type(repo.cache.local.cache_dir)\n info.append(f\"Cache directory: {fs_type}\")\n else:\n info.append(\"Cache types: \" + error_link(\"no-dvc-cache\"))\n\n info.append(f\"Caches: {_get_caches(repo.cache)}\")\n\n info.append(f\"Remotes: {_get_remotes(repo.config)}\")\n\n except NotDvcRepoError:\n pass\n except SCMError:\n info.append(\"Repo: dvc, git (broken)\")\n else:\n root_directory = repo.root_dir\n if psutil:\n fs_root = get_fs_type(os.path.abspath(root_directory))\n info.append(f\"Workspace directory: {fs_root}\")\n info.append(\"Repo: {}\".format(_get_dvc_repo_info(repo)))\n return \"\\n\".join(info)\n\n\ndef _get_caches(cache):\n caches = (\n cache_type\n for cache_type, cache_instance in cache.by_scheme()\n if cache_instance\n )\n\n # Caches will be always non-empty including the local cache\n return \", \".join(caches)\n\n\ndef _get_remotes(config):\n schemes = (\n get_tree_cls(get_tree_config(config, name=remote)).scheme\n for remote in config[\"remote\"]\n )\n\n return \", \".join(schemes) or \"None\"\n\n\ndef _get_linktype_support_info(repo):\n\n links = {\n \"reflink\": (System.reflink, None),\n \"hardlink\": (System.hardlink, System.is_hardlink),\n \"symlink\": (System.symlink, System.is_symlink),\n }\n\n fname = \".\" + str(uuid.uuid4())\n src = os.path.join(repo.cache.local.cache_dir, fname)\n open(src, \"w\").close()\n dst = os.path.join(repo.root_dir, fname)\n\n cache = []\n\n for name, (link, is_link) in links.items():\n try:\n link(src, dst)\n status = \"supported\"\n if is_link and not is_link(dst):\n status = \"broken\"\n os.unlink(dst)\n except DvcException:\n status = \"not supported\"\n\n if status == \"supported\":\n cache.append(name)\n os.remove(src)\n\n return \", \".join(cache)\n\n\ndef _get_supported_remotes():\n\n supported_remotes = []\n for tree_cls in TREES:\n if not tree_cls.get_missing_deps():\n supported_remotes.append(tree_cls.scheme)\n\n if len(supported_remotes) == len(TREES):\n return \"All remotes\"\n\n if len(supported_remotes) == 1:\n return supported_remotes\n\n return \", \".join(supported_remotes)\n\n\ndef get_fs_type(path):\n\n partition = {\n pathlib.Path(part.mountpoint): (part.fstype + \" on \" + part.device)\n for part in psutil.disk_partitions(all=True)\n }\n\n # need to follow the symlink: https://github.com/iterative/dvc/issues/5065\n path = pathlib.Path(path).resolve()\n\n for parent in itertools.chain([path], path.parents):\n if parent in partition:\n return partition[parent]\n return (\"unknown\", \"none\")\n\n\ndef _get_dvc_repo_info(self):\n if self.config.get(\"core\", {}).get(\"no_scm\", False):\n return \"dvc (no_scm)\"\n\n if self.root_dir != self.scm.root_dir:\n return \"dvc (subdir), git\"\n\n return \"dvc, git\"\n",
"path": "dvc/info.py"
}
] | diff --git a/dvc/info.py b/dvc/info.py
index 4d8cbf8119..e7c05f05e8 100644
--- a/dvc/info.py
+++ b/dvc/info.py
@@ -142,7 +142,8 @@ def get_fs_type(path):
for part in psutil.disk_partitions(all=True)
}
- path = pathlib.Path(path)
+ # need to follow the symlink: https://github.com/iterative/dvc/issues/5065
+ path = pathlib.Path(path).resolve()
for parent in itertools.chain([path], path.parents):
if parent in partition:
|
napari__napari-3498 | Napari bundle app crashes
## 🐛 Bug
Napari just shows a message stating "App has crashed" and starts open applications like crazy until killing all of them and it stops by killing Naparis
## To Reproduce
Steps to reproduce the behavior:
1. Open napari
2. Click on Plugins -> Install/Uninstall Package(s)..
3. Install plugins and uninstall them until app crashes
## Expected behavior
App should not crash
## Actual results
Napari starts acting like crazy opening Naparis endlessly. I need to uninstall to use it again as as soon as I want to open it, party starts all over again
## Environment
MacOS Big Sur 11.6 - napari 0.4.11rc4
This is also reproducible in napari 0.4.11
<img width="1714" alt="Screen Shot 2021-09-24 at 11 50 41 AM" src="https://user-images.githubusercontent.com/63799148/134734701-e1e476b9-20a5-4135-a398-fa0986994ecb.png">
| [
{
"content": "\"\"\"\nnapari command line viewer.\n\"\"\"\nimport argparse\nimport logging\nimport os\nimport runpy\nimport sys\nimport warnings\nfrom ast import literal_eval\nfrom pathlib import Path\nfrom textwrap import wrap\nfrom typing import Any, Dict, List\n\n\nclass InfoAction(argparse.Action):\n def __call__(self, *args, **kwargs):\n # prevent unrelated INFO logs when doing \"napari --info\"\n from napari.utils import sys_info\n\n logging.basicConfig(level=logging.WARNING)\n print(sys_info())\n from .plugins import plugin_manager\n\n plugin_manager.discover_widgets()\n errors = plugin_manager.get_errors()\n if errors:\n names = {e.plugin_name for e in errors}\n print(\"\\n‼️ Errors were detected in the following plugins:\")\n print(\"(Run 'napari --plugin-info -v' for more details)\")\n print(\"\\n\".join(f\" - {n}\" for n in names))\n sys.exit()\n\n\nclass PluginInfoAction(argparse.Action):\n def __call__(self, *args, **kwargs):\n # prevent unrelated INFO logs when doing \"napari --info\"\n logging.basicConfig(level=logging.WARNING)\n from .plugins import plugin_manager\n\n plugin_manager.discover_widgets()\n print(plugin_manager)\n\n errors = plugin_manager.get_errors()\n if errors:\n print(\"‼️ Some errors occurred:\")\n verbose = '-v' in sys.argv or '--verbose' in sys.argv\n if not verbose:\n print(\" (use '-v') to show full tracebacks\")\n print(\"-\" * 38)\n\n for err in errors:\n print(err.plugin_name)\n print(f\" error: {err!r}\")\n print(f\" cause: {err.__cause__!r}\")\n if verbose:\n print(\" traceback:\")\n import traceback\n from textwrap import indent\n\n tb = traceback.format_tb(err.__cause__.__traceback__)\n print(indent(\"\".join(tb), ' '))\n sys.exit()\n\n\nclass CitationAction(argparse.Action):\n def __call__(self, *args, **kwargs):\n # prevent unrelated INFO logs when doing \"napari --citation\"\n from napari.utils import citation_text\n\n logging.basicConfig(level=logging.WARNING)\n print(citation_text)\n sys.exit()\n\n\ndef validate_unknown_args(unknown: List[str]) -> Dict[str, Any]:\n \"\"\"Convert a list of strings into a dict of valid kwargs for add_* methods.\n\n Will exit program if any of the arguments are unrecognized, or are\n malformed. Converts string to python type using literal_eval.\n\n Parameters\n ----------\n unknown : List[str]\n a list of strings gathered as \"unknown\" arguments in argparse.\n\n Returns\n -------\n kwargs : Dict[str, Any]\n {key: val} dict suitable for the viewer.add_* methods where ``val``\n is a ``literal_eval`` result, or string.\n \"\"\"\n\n from napari.components.viewer_model import valid_add_kwargs\n\n out: Dict[str, Any] = dict()\n valid = set.union(*valid_add_kwargs().values())\n for i, arg in enumerate(unknown):\n if not arg.startswith(\"--\"):\n continue\n\n if \"=\" in arg:\n key, value = arg.split(\"=\", maxsplit=1)\n else:\n key = arg\n key = key.lstrip('-').replace(\"-\", \"_\")\n\n if key not in valid:\n sys.exit(f\"error: unrecognized arguments: {arg}\")\n\n if \"=\" not in arg:\n try:\n value = unknown[i + 1]\n if value.startswith(\"--\"):\n raise IndexError()\n except IndexError:\n sys.exit(f\"error: argument {arg} expected one argument\")\n try:\n value = literal_eval(value)\n except Exception:\n value = value\n\n out[key] = value\n return out\n\n\ndef parse_sys_argv():\n \"\"\"Parse command line arguments.\"\"\"\n\n from napari import __version__, layers\n from napari.components.viewer_model import valid_add_kwargs\n\n kwarg_options = []\n for layer_type, keys in valid_add_kwargs().items():\n kwarg_options.append(f\" {layer_type.title()}:\")\n keys = {k.replace('_', '-') for k in keys}\n lines = wrap(\", \".join(sorted(keys)), break_on_hyphens=False)\n kwarg_options.extend([f\" {line}\" for line in lines])\n\n parser = argparse.ArgumentParser(\n usage=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=\"optional layer-type-specific arguments (precede with '--'):\\n\"\n + \"\\n\".join(kwarg_options),\n )\n parser.add_argument('paths', nargs='*', help='path(s) to view.')\n parser.add_argument(\n '-v',\n '--verbose',\n action='count',\n default=0,\n help=\"increase output verbosity\",\n )\n parser.add_argument(\n '-w',\n '--with',\n dest='with_',\n nargs='+',\n metavar=('PLUGIN_NAME', 'WIDGET_NAME'),\n help=(\n \"open napari with dock widget from specified plugin name.\"\n \"(If plugin provides multiple dock widgets, widget name must also \"\n \"be provided)\"\n ),\n )\n parser.add_argument(\n '--version',\n action='version',\n version=f'napari version {__version__}',\n )\n parser.add_argument(\n '--info',\n action=InfoAction,\n nargs=0,\n help='show system information and exit',\n )\n parser.add_argument(\n '--plugin-info',\n action=PluginInfoAction,\n nargs=0,\n help='show information about plugins and exit',\n )\n parser.add_argument(\n '--citation',\n action=CitationAction,\n nargs=0,\n help='show citation information and exit',\n )\n parser.add_argument(\n '--stack',\n action='store_true',\n help='concatenate multiple input files into a single stack.',\n )\n parser.add_argument(\n '--plugin',\n help='specify plugin name when opening a file',\n )\n parser.add_argument(\n '--layer-type',\n metavar=\"TYPE\",\n choices=set(layers.NAMES),\n help=(\n 'force file to be interpreted as a specific layer type. '\n f'one of {set(layers.NAMES)}'\n ),\n )\n parser.add_argument(\n '--reset',\n action='store_true',\n help='reset settings to default values.',\n )\n parser.add_argument(\n '--settings-path',\n type=Path,\n help='use specific path to store and load settings.',\n )\n\n args, unknown = parser.parse_known_args()\n # this is a hack to allow using \"=\" as a key=value separator while also\n # allowing nargs='*' on the \"paths\" argument...\n for idx, item in enumerate(reversed(args.paths)):\n if item.startswith(\"--\"):\n unknown.append(args.paths.pop(len(args.paths) - idx - 1))\n kwargs = validate_unknown_args(unknown) if unknown else {}\n\n return args, kwargs\n\n\ndef _run():\n from napari import run, view_path\n from napari.settings import get_settings\n\n \"\"\"Main program.\"\"\"\n args, kwargs = parse_sys_argv()\n\n # parse -v flags and set the appropriate logging level\n levels = [logging.WARNING, logging.INFO, logging.DEBUG]\n level = levels[min(2, args.verbose)] # prevent index error\n logging.basicConfig(\n level=level,\n format=\"%(asctime)s %(levelname)s %(message)s\",\n datefmt='%H:%M:%S',\n )\n\n if args.reset:\n if args.settings_path:\n settings = get_settings(path=args.settings_path)\n else:\n settings = get_settings()\n settings.reset()\n settings.save()\n sys.exit(\"Resetting settings to default values.\\n\")\n\n if args.plugin:\n # make sure plugin is only used when files are specified\n if not args.paths:\n sys.exit(\n \"error: The '--plugin' argument is only valid \"\n \"when providing a file name\"\n )\n # I *think* that Qt is looking in sys.argv for a flag `--plugins`,\n # which emits \"WARNING: No such plugin for spec 'builtins'\"\n # so remove --plugin from sys.argv to prevent that warningz\n sys.argv.remove('--plugin')\n\n if any(p.endswith('.py') for p in args.paths):\n # we're running a script\n if len(args.paths) > 1:\n sys.exit(\n 'When providing a python script, only a '\n 'single positional argument may be provided'\n )\n\n # run the file\n mod = runpy.run_path(args.paths[0])\n\n from napari_plugin_engine.markers import HookImplementationMarker\n\n # if this file had any hook implementations, register and run as plugin\n if any(isinstance(i, HookImplementationMarker) for i in mod.values()):\n _run_plugin_module(mod, os.path.basename(args.paths[0]))\n\n else:\n if args.with_:\n from .plugins import plugin_manager\n\n # if a plugin widget has been requested, this will fail immediately\n # if the requested plugin/widget is not available.\n plugin_manager.discover_widgets()\n pname, *wnames = args.with_\n if wnames:\n for wname in wnames:\n plugin_manager.get_widget(pname, wname)\n else:\n plugin_manager.get_widget(pname)\n\n from napari._qt.widgets.qt_splash_screen import NapariSplashScreen\n\n splash = NapariSplashScreen()\n splash.close() # will close once event loop starts\n\n # viewer is unused but _must_ be kept around.\n # it will be referenced by the global window only\n # once napari has finished starting\n # but in the meantime if the garbage collector runs;\n # it will collect it and hang napari at start time.\n # in a way that is machine, os, time (and likely weather dependant).\n viewer = view_path( # noqa: F841\n args.paths,\n stack=args.stack,\n plugin=args.plugin,\n layer_type=args.layer_type,\n **kwargs,\n )\n\n if args.with_:\n pname, *wnames = args.with_\n if wnames:\n for wname in wnames:\n viewer.window.add_plugin_dock_widget(pname, wname)\n else:\n viewer.window.add_plugin_dock_widget(pname)\n\n run(gui_exceptions=True)\n\n\ndef _run_plugin_module(mod, plugin_name):\n \"\"\"Register `mod` as a plugin, find/create viewer, and run napari.\"\"\"\n from napari import Viewer, run\n from napari.plugins import plugin_manager\n\n plugin_manager.register(mod, name=plugin_name)\n\n # now, check if a viewer was created, and if not, create one.\n for obj in mod.values():\n if isinstance(obj, Viewer):\n _v = obj\n break\n else:\n _v = Viewer()\n\n try:\n _v.window._qt_window.parent()\n except RuntimeError:\n # this script had a napari.run() in it, and the viewer has already been\n # used and cleaned up... if we eventually have \"reusable viewers\", we\n # can continue here\n return\n\n # finally, if the file declared a dock widget, add it to the viewer.\n dws = plugin_manager.hooks.napari_experimental_provide_dock_widget\n if any(i.plugin_name == plugin_name for i in dws.get_hookimpls()):\n _v.window.add_plugin_dock_widget(plugin_name)\n\n run()\n\n\ndef _run_pythonw(python_path):\n \"\"\"Execute this script again through pythonw.\n\n This can be used to ensure we're using a framework\n build of Python on macOS, which fixes frozen menubar issues.\n\n Parameters\n ----------\n python_path : pathlib.Path\n Path to python framework build.\n \"\"\"\n import subprocess\n\n cwd = Path.cwd()\n cmd = [python_path, '-m', 'napari']\n env = os.environ.copy()\n\n # Append command line arguments.\n if len(sys.argv) > 1:\n cmd.extend(sys.argv[1:])\n\n result = subprocess.run(cmd, env=env, cwd=cwd)\n sys.exit(result.returncode)\n\n\ndef main():\n # Ensure we're always using a \"framework build\" on the latest\n # macOS to ensure menubar works without needing to refocus napari.\n # We try this for macOS later than the Catelina release\n # See https://github.com/napari/napari/pull/1554 and\n # https://github.com/napari/napari/issues/380#issuecomment-659656775\n # and https://github.com/ContinuumIO/anaconda-issues/issues/199\n import platform\n from distutils.version import StrictVersion\n\n _MACOS_AT_LEAST_CATALINA = sys.platform == \"darwin\" and StrictVersion(\n platform.release()\n ) > StrictVersion('19.0.0')\n _MACOS_AT_LEAST_BIG_SUR = sys.platform == \"darwin\" and StrictVersion(\n platform.release()\n ) > StrictVersion('20.0.0')\n\n _RUNNING_CONDA = \"CONDA_PREFIX\" in os.environ\n _RUNNING_PYTHONW = \"PYTHONEXECUTABLE\" in os.environ\n\n # quick fix for Big Sur py3.9\n if _MACOS_AT_LEAST_BIG_SUR:\n os.environ['QT_MAC_WANTS_LAYER'] = '1'\n\n if _MACOS_AT_LEAST_CATALINA and _RUNNING_CONDA and not _RUNNING_PYTHONW:\n python_path = Path(sys.exec_prefix) / 'bin' / 'pythonw'\n\n if python_path.exists():\n # Running again with pythonw will exit this script\n # and use the framework build of python.\n _run_pythonw(python_path)\n else:\n msg = (\n 'pythonw executable not found.\\n'\n 'To unfreeze the menubar on macOS, '\n 'click away from napari to another app, '\n 'then reactivate napari. To avoid this problem, '\n 'please install python.app in conda using:\\n'\n 'conda install -c conda-forge python.app'\n )\n warnings.warn(msg)\n _run()\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"path": "napari/__main__.py"
}
] | [
{
"content": "\"\"\"\nnapari command line viewer.\n\"\"\"\nimport argparse\nimport logging\nimport os\nimport runpy\nimport sys\nimport warnings\nfrom ast import literal_eval\nfrom pathlib import Path\nfrom textwrap import wrap\nfrom typing import Any, Dict, List\n\n\nclass InfoAction(argparse.Action):\n def __call__(self, *args, **kwargs):\n # prevent unrelated INFO logs when doing \"napari --info\"\n from napari.utils import sys_info\n\n logging.basicConfig(level=logging.WARNING)\n print(sys_info())\n from .plugins import plugin_manager\n\n plugin_manager.discover_widgets()\n errors = plugin_manager.get_errors()\n if errors:\n names = {e.plugin_name for e in errors}\n print(\"\\n‼️ Errors were detected in the following plugins:\")\n print(\"(Run 'napari --plugin-info -v' for more details)\")\n print(\"\\n\".join(f\" - {n}\" for n in names))\n sys.exit()\n\n\nclass PluginInfoAction(argparse.Action):\n def __call__(self, *args, **kwargs):\n # prevent unrelated INFO logs when doing \"napari --info\"\n logging.basicConfig(level=logging.WARNING)\n from .plugins import plugin_manager\n\n plugin_manager.discover_widgets()\n print(plugin_manager)\n\n errors = plugin_manager.get_errors()\n if errors:\n print(\"‼️ Some errors occurred:\")\n verbose = '-v' in sys.argv or '--verbose' in sys.argv\n if not verbose:\n print(\" (use '-v') to show full tracebacks\")\n print(\"-\" * 38)\n\n for err in errors:\n print(err.plugin_name)\n print(f\" error: {err!r}\")\n print(f\" cause: {err.__cause__!r}\")\n if verbose:\n print(\" traceback:\")\n import traceback\n from textwrap import indent\n\n tb = traceback.format_tb(err.__cause__.__traceback__)\n print(indent(\"\".join(tb), ' '))\n sys.exit()\n\n\nclass CitationAction(argparse.Action):\n def __call__(self, *args, **kwargs):\n # prevent unrelated INFO logs when doing \"napari --citation\"\n from napari.utils import citation_text\n\n logging.basicConfig(level=logging.WARNING)\n print(citation_text)\n sys.exit()\n\n\ndef validate_unknown_args(unknown: List[str]) -> Dict[str, Any]:\n \"\"\"Convert a list of strings into a dict of valid kwargs for add_* methods.\n\n Will exit program if any of the arguments are unrecognized, or are\n malformed. Converts string to python type using literal_eval.\n\n Parameters\n ----------\n unknown : List[str]\n a list of strings gathered as \"unknown\" arguments in argparse.\n\n Returns\n -------\n kwargs : Dict[str, Any]\n {key: val} dict suitable for the viewer.add_* methods where ``val``\n is a ``literal_eval`` result, or string.\n \"\"\"\n\n from napari.components.viewer_model import valid_add_kwargs\n\n out: Dict[str, Any] = dict()\n valid = set.union(*valid_add_kwargs().values())\n for i, arg in enumerate(unknown):\n if not arg.startswith(\"--\"):\n continue\n\n if \"=\" in arg:\n key, value = arg.split(\"=\", maxsplit=1)\n else:\n key = arg\n key = key.lstrip('-').replace(\"-\", \"_\")\n\n if key not in valid:\n sys.exit(f\"error: unrecognized arguments: {arg}\")\n\n if \"=\" not in arg:\n try:\n value = unknown[i + 1]\n if value.startswith(\"--\"):\n raise IndexError()\n except IndexError:\n sys.exit(f\"error: argument {arg} expected one argument\")\n try:\n value = literal_eval(value)\n except Exception:\n value = value\n\n out[key] = value\n return out\n\n\ndef parse_sys_argv():\n \"\"\"Parse command line arguments.\"\"\"\n\n from napari import __version__, layers\n from napari.components.viewer_model import valid_add_kwargs\n\n kwarg_options = []\n for layer_type, keys in valid_add_kwargs().items():\n kwarg_options.append(f\" {layer_type.title()}:\")\n keys = {k.replace('_', '-') for k in keys}\n lines = wrap(\", \".join(sorted(keys)), break_on_hyphens=False)\n kwarg_options.extend([f\" {line}\" for line in lines])\n\n parser = argparse.ArgumentParser(\n usage=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=\"optional layer-type-specific arguments (precede with '--'):\\n\"\n + \"\\n\".join(kwarg_options),\n )\n parser.add_argument('paths', nargs='*', help='path(s) to view.')\n parser.add_argument(\n '-v',\n '--verbose',\n action='count',\n default=0,\n help=\"increase output verbosity\",\n )\n parser.add_argument(\n '-w',\n '--with',\n dest='with_',\n nargs='+',\n metavar=('PLUGIN_NAME', 'WIDGET_NAME'),\n help=(\n \"open napari with dock widget from specified plugin name.\"\n \"(If plugin provides multiple dock widgets, widget name must also \"\n \"be provided)\"\n ),\n )\n parser.add_argument(\n '--version',\n action='version',\n version=f'napari version {__version__}',\n )\n parser.add_argument(\n '--info',\n action=InfoAction,\n nargs=0,\n help='show system information and exit',\n )\n parser.add_argument(\n '--plugin-info',\n action=PluginInfoAction,\n nargs=0,\n help='show information about plugins and exit',\n )\n parser.add_argument(\n '--citation',\n action=CitationAction,\n nargs=0,\n help='show citation information and exit',\n )\n parser.add_argument(\n '--stack',\n action='store_true',\n help='concatenate multiple input files into a single stack.',\n )\n parser.add_argument(\n '--plugin',\n help='specify plugin name when opening a file',\n )\n parser.add_argument(\n '--layer-type',\n metavar=\"TYPE\",\n choices=set(layers.NAMES),\n help=(\n 'force file to be interpreted as a specific layer type. '\n f'one of {set(layers.NAMES)}'\n ),\n )\n parser.add_argument(\n '--reset',\n action='store_true',\n help='reset settings to default values.',\n )\n parser.add_argument(\n '--settings-path',\n type=Path,\n help='use specific path to store and load settings.',\n )\n\n args, unknown = parser.parse_known_args()\n # this is a hack to allow using \"=\" as a key=value separator while also\n # allowing nargs='*' on the \"paths\" argument...\n for idx, item in enumerate(reversed(args.paths)):\n if item.startswith(\"--\"):\n unknown.append(args.paths.pop(len(args.paths) - idx - 1))\n kwargs = validate_unknown_args(unknown) if unknown else {}\n\n return args, kwargs\n\n\ndef _run():\n from napari import run, view_path\n from napari.settings import get_settings\n\n \"\"\"Main program.\"\"\"\n args, kwargs = parse_sys_argv()\n\n # parse -v flags and set the appropriate logging level\n levels = [logging.WARNING, logging.INFO, logging.DEBUG]\n level = levels[min(2, args.verbose)] # prevent index error\n logging.basicConfig(\n level=level,\n format=\"%(asctime)s %(levelname)s %(message)s\",\n datefmt='%H:%M:%S',\n )\n\n if args.reset:\n if args.settings_path:\n settings = get_settings(path=args.settings_path)\n else:\n settings = get_settings()\n settings.reset()\n settings.save()\n sys.exit(\"Resetting settings to default values.\\n\")\n\n if args.plugin:\n # make sure plugin is only used when files are specified\n if not args.paths:\n sys.exit(\n \"error: The '--plugin' argument is only valid \"\n \"when providing a file name\"\n )\n # I *think* that Qt is looking in sys.argv for a flag `--plugins`,\n # which emits \"WARNING: No such plugin for spec 'builtins'\"\n # so remove --plugin from sys.argv to prevent that warningz\n sys.argv.remove('--plugin')\n\n if any(p.endswith('.py') for p in args.paths):\n # we're running a script\n if len(args.paths) > 1:\n sys.exit(\n 'When providing a python script, only a '\n 'single positional argument may be provided'\n )\n\n # run the file\n mod = runpy.run_path(args.paths[0])\n\n from napari_plugin_engine.markers import HookImplementationMarker\n\n # if this file had any hook implementations, register and run as plugin\n if any(isinstance(i, HookImplementationMarker) for i in mod.values()):\n _run_plugin_module(mod, os.path.basename(args.paths[0]))\n\n else:\n if args.with_:\n from .plugins import plugin_manager\n\n # if a plugin widget has been requested, this will fail immediately\n # if the requested plugin/widget is not available.\n plugin_manager.discover_widgets()\n pname, *wnames = args.with_\n if wnames:\n for wname in wnames:\n plugin_manager.get_widget(pname, wname)\n else:\n plugin_manager.get_widget(pname)\n\n from napari._qt.widgets.qt_splash_screen import NapariSplashScreen\n\n splash = NapariSplashScreen()\n splash.close() # will close once event loop starts\n\n # viewer is unused but _must_ be kept around.\n # it will be referenced by the global window only\n # once napari has finished starting\n # but in the meantime if the garbage collector runs;\n # it will collect it and hang napari at start time.\n # in a way that is machine, os, time (and likely weather dependant).\n viewer = view_path( # noqa: F841\n args.paths,\n stack=args.stack,\n plugin=args.plugin,\n layer_type=args.layer_type,\n **kwargs,\n )\n\n if args.with_:\n pname, *wnames = args.with_\n if wnames:\n for wname in wnames:\n viewer.window.add_plugin_dock_widget(pname, wname)\n else:\n viewer.window.add_plugin_dock_widget(pname)\n\n run(gui_exceptions=True)\n\n\ndef _run_plugin_module(mod, plugin_name):\n \"\"\"Register `mod` as a plugin, find/create viewer, and run napari.\"\"\"\n from napari import Viewer, run\n from napari.plugins import plugin_manager\n\n plugin_manager.register(mod, name=plugin_name)\n\n # now, check if a viewer was created, and if not, create one.\n for obj in mod.values():\n if isinstance(obj, Viewer):\n _v = obj\n break\n else:\n _v = Viewer()\n\n try:\n _v.window._qt_window.parent()\n except RuntimeError:\n # this script had a napari.run() in it, and the viewer has already been\n # used and cleaned up... if we eventually have \"reusable viewers\", we\n # can continue here\n return\n\n # finally, if the file declared a dock widget, add it to the viewer.\n dws = plugin_manager.hooks.napari_experimental_provide_dock_widget\n if any(i.plugin_name == plugin_name for i in dws.get_hookimpls()):\n _v.window.add_plugin_dock_widget(plugin_name)\n\n run()\n\n\ndef _run_pythonw(python_path):\n \"\"\"Execute this script again through pythonw.\n\n This can be used to ensure we're using a framework\n build of Python on macOS, which fixes frozen menubar issues.\n\n Parameters\n ----------\n python_path : pathlib.Path\n Path to python framework build.\n \"\"\"\n import subprocess\n\n cwd = Path.cwd()\n cmd = [python_path, '-m', 'napari']\n env = os.environ.copy()\n\n # Append command line arguments.\n if len(sys.argv) > 1:\n cmd.extend(sys.argv[1:])\n\n result = subprocess.run(cmd, env=env, cwd=cwd)\n sys.exit(result.returncode)\n\n\ndef main():\n # Ensure we're always using a \"framework build\" on the latest\n # macOS to ensure menubar works without needing to refocus napari.\n # We try this for macOS later than the Catelina release\n # See https://github.com/napari/napari/pull/1554 and\n # https://github.com/napari/napari/issues/380#issuecomment-659656775\n # and https://github.com/ContinuumIO/anaconda-issues/issues/199\n import platform\n from distutils.version import StrictVersion\n\n _MACOS_AT_LEAST_CATALINA = sys.platform == \"darwin\" and StrictVersion(\n platform.release()\n ) > StrictVersion('19.0.0')\n _MACOS_AT_LEAST_BIG_SUR = sys.platform == \"darwin\" and StrictVersion(\n platform.release()\n ) > StrictVersion('20.0.0')\n\n _RUNNING_CONDA = \"CONDA_PREFIX\" in os.environ\n _RUNNING_PYTHONW = \"PYTHONEXECUTABLE\" in os.environ\n\n # quick fix for Big Sur py3.9\n if _MACOS_AT_LEAST_BIG_SUR:\n os.environ['QT_MAC_WANTS_LAYER'] = '1'\n\n if _MACOS_AT_LEAST_CATALINA and _RUNNING_CONDA and not _RUNNING_PYTHONW:\n python_path = Path(sys.exec_prefix) / 'bin' / 'pythonw'\n\n if python_path.exists():\n # Running again with pythonw will exit this script\n # and use the framework build of python.\n _run_pythonw(python_path)\n else:\n msg = (\n 'pythonw executable not found.\\n'\n 'To unfreeze the menubar on macOS, '\n 'click away from napari to another app, '\n 'then reactivate napari. To avoid this problem, '\n 'please install python.app in conda using:\\n'\n 'conda install -c conda-forge python.app'\n )\n warnings.warn(msg)\n\n # Prevent https://github.com/napari/napari/issues/3415\n if sys.platform == \"darwin\" and sys.version_info >= (3, 8):\n import multiprocessing\n\n multiprocessing.set_start_method('fork')\n\n _run()\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"path": "napari/__main__.py"
}
] | diff --git a/napari/__main__.py b/napari/__main__.py
index ffa09c27a1e..802e0f2d79e 100644
--- a/napari/__main__.py
+++ b/napari/__main__.py
@@ -421,6 +421,13 @@ def main():
'conda install -c conda-forge python.app'
)
warnings.warn(msg)
+
+ # Prevent https://github.com/napari/napari/issues/3415
+ if sys.platform == "darwin" and sys.version_info >= (3, 8):
+ import multiprocessing
+
+ multiprocessing.set_start_method('fork')
+
_run()
|
Pylons__pyramid-3677 | threading.Thread.setDaemon has been deprecated in favor of setting daemon attribute directly in Python 3.10
https://github.com/Pylons/pyramid/blob/8061fce297cc7117d3e6e2b39e47512c7db2904f/src/pyramid/scripts/pserve.py#L234
Ref : python/cpython#25174
| [
{
"content": "# (c) 2005 Ian Bicking and contributors; written for Paste\n# (http://pythonpaste.org) Licensed under the MIT license:\n# http://www.opensource.org/licenses/mit-license.php\n#\n# For discussion of daemonizing:\n# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/278731\n#\n# Code taken also from QP: http://www.mems-exchange.org/software/qp/ From\n# lib/site.py\n\nimport argparse\nimport hupper\nimport os\nimport re\nimport sys\nimport textwrap\nimport threading\nimport time\nimport webbrowser\n\nfrom pyramid.path import AssetResolver\nfrom pyramid.scripts.common import get_config_loader, parse_vars\nfrom pyramid.settings import aslist\n\n\ndef main(argv=sys.argv, quiet=False, original_ignore_files=None):\n command = PServeCommand(\n argv, quiet=quiet, original_ignore_files=original_ignore_files\n )\n return command.run()\n\n\nclass PServeCommand:\n\n description = \"\"\"\\\n This command serves a web application that uses a PasteDeploy\n configuration file for the server and application.\n\n You can also include variable assignments like 'http_port=8080'\n and then use %(http_port)s in your config files.\n \"\"\"\n default_verbosity = 1\n\n parser = argparse.ArgumentParser(\n description=textwrap.dedent(description),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n parser.add_argument(\n '-n',\n '--app-name',\n dest='app_name',\n metavar='NAME',\n help=\"Load the named application (default main)\",\n )\n parser.add_argument(\n '-s',\n '--server',\n dest='server',\n metavar='SERVER_TYPE',\n help=\"Use the named server.\",\n )\n parser.add_argument(\n '--server-name',\n dest='server_name',\n metavar='SECTION_NAME',\n help=(\n \"Use the named server as defined in the configuration file \"\n \"(default: main)\"\n ),\n )\n parser.add_argument(\n '--reload',\n dest='reload',\n action='store_true',\n help=\"Use auto-restart file monitor\",\n )\n parser.add_argument(\n '--reload-interval',\n dest='reload_interval',\n default=1,\n help=(\n \"Seconds between checking files (low number can cause \"\n \"significant CPU usage)\"\n ),\n )\n parser.add_argument(\n '-b',\n '--browser',\n dest='browser',\n action='store_true',\n help=(\n \"Open a web browser to the server url. The server url is \"\n \"determined from the 'open_url' setting in the 'pserve' \"\n \"section of the configuration file.\"\n ),\n )\n parser.add_argument(\n '-v',\n '--verbose',\n default=default_verbosity,\n dest='verbose',\n action='count',\n help=\"Set verbose level (default \" + str(default_verbosity) + \")\",\n )\n parser.add_argument(\n '-q',\n '--quiet',\n action='store_const',\n const=0,\n dest='verbose',\n help=\"Suppress verbose output\",\n )\n parser.add_argument(\n 'config_uri',\n nargs='?',\n default=None,\n help='The URI to the configuration file.',\n )\n parser.add_argument(\n 'config_vars',\n nargs='*',\n default=(),\n help=\"Variables required by the config file. For example, \"\n \"`http_port=%%(http_port)s` would expect `http_port=8080` to be \"\n \"passed here.\",\n )\n\n _get_config_loader = staticmethod(get_config_loader) # for testing\n\n open_url = None\n\n _scheme_re = re.compile(r'^[a-z][a-z]+:', re.I)\n\n def __init__(self, argv, quiet=False, original_ignore_files=None):\n self.args = self.parser.parse_args(argv[1:])\n if quiet:\n self.args.verbose = 0\n if self.args.reload:\n self.worker_kwargs = {'argv': argv, \"quiet\": quiet}\n self.watch_files = set()\n self.ignore_files = set()\n self.original_ignore_files = original_ignore_files\n\n def out(self, msg): # pragma: no cover\n if self.args.verbose > 0:\n print(msg, file=sys.stderr)\n\n def get_config_path(self, loader):\n return os.path.abspath(loader.uri.path)\n\n def pserve_file_config(self, loader, global_conf=None):\n settings = loader.get_settings('pserve', global_conf)\n config_path = self.get_config_path(loader)\n here = os.path.dirname(config_path)\n watch_files = aslist(settings.get('watch_files', ''), flatten=False)\n self.ignore_files = set(\n aslist(settings.get('ignore_files', ''), flatten=False)\n )\n\n # track file paths relative to the ini file\n resolver = AssetResolver(package=None)\n for file in watch_files:\n if ':' in file:\n file = resolver.resolve(file).abspath()\n elif not os.path.isabs(file):\n file = os.path.join(here, file)\n self.watch_files.add(os.path.abspath(file))\n\n # attempt to determine the url of the server\n open_url = settings.get('open_url')\n if open_url:\n self.open_url = open_url\n\n def guess_server_url(self, loader, server_name, global_conf=None):\n server_name = server_name or 'main'\n settings = loader.get_settings('server:' + server_name, global_conf)\n if 'port' in settings:\n return 'http://127.0.0.1:{port}'.format(**settings)\n\n def run(self): # pragma: no cover\n if not self.args.config_uri:\n self.out('You must give a config file')\n return 2\n config_uri = self.args.config_uri\n config_vars = parse_vars(self.args.config_vars)\n app_spec = self.args.config_uri\n app_name = self.args.app_name\n\n loader = self._get_config_loader(config_uri)\n\n # setup logging only in the worker process incase the logging config\n # opens files which should not be opened by multiple processes at once\n if not self.args.reload or hupper.is_active():\n loader.setup_logging(config_vars)\n\n self.pserve_file_config(loader, global_conf=config_vars)\n\n server_name = self.args.server_name\n if self.args.server:\n server_spec = 'egg:pyramid'\n assert server_name is None\n server_name = self.args.server\n else:\n server_spec = app_spec\n\n server_loader = loader\n if server_spec != app_spec:\n server_loader = self.get_config_loader(server_spec)\n\n # do not open the browser on each reload so check hupper first\n if self.args.browser and not hupper.is_active():\n url = self.open_url\n\n if not url:\n url = self.guess_server_url(\n server_loader, server_name, config_vars\n )\n\n if not url:\n self.out(\n 'WARNING: could not determine the server\\'s url to '\n 'open the browser. To fix this set the \"open_url\" '\n 'setting in the [pserve] section of the '\n 'configuration file.'\n )\n\n else:\n\n def open_browser():\n time.sleep(1)\n webbrowser.open(url)\n\n t = threading.Thread(target=open_browser)\n t.setDaemon(True)\n t.start()\n\n if self.args.reload and not hupper.is_active():\n if self.args.verbose > 1:\n self.out('Running reloading file monitor')\n self.worker_kwargs['original_ignore_files'] = self.ignore_files\n hupper.start_reloader(\n 'pyramid.scripts.pserve.main',\n reload_interval=int(self.args.reload_interval),\n verbose=self.args.verbose,\n worker_kwargs=self.worker_kwargs,\n ignore_files=self.ignore_files,\n )\n return 0\n\n config_path = self.get_config_path(loader)\n self.watch_files.add(config_path)\n\n server_path = self.get_config_path(server_loader)\n self.watch_files.add(server_path)\n\n if hupper.is_active():\n reloader = hupper.get_reloader()\n reloader.watch_files(list(self.watch_files))\n\n if (\n self.original_ignore_files is not None\n and self.original_ignore_files != self.ignore_files\n ):\n self.out(\n 'A change to \"ignore_files\" was detected but it will not take'\n ' effect until pserve is restarted.'\n )\n\n server = server_loader.get_wsgi_server(server_name, config_vars)\n\n app = loader.get_wsgi_app(app_name, config_vars)\n\n if self.args.verbose > 0:\n if hasattr(os, 'getpid'):\n msg = 'Starting server in PID %i.' % os.getpid()\n else:\n msg = 'Starting server.'\n self.out(msg)\n\n try:\n server(app)\n except (SystemExit, KeyboardInterrupt) as e:\n if self.args.verbose > 1:\n raise\n if str(e):\n msg = ' ' + str(e)\n else:\n msg = ''\n self.out('Exiting%s (-v to see traceback)' % msg)\n\n\n# For paste.deploy server instantiation (egg:pyramid#wsgiref)\ndef wsgiref_server_runner(wsgi_app, global_conf, **kw): # pragma: no cover\n from wsgiref.simple_server import make_server\n\n host = kw.get('host', '0.0.0.0')\n port = int(kw.get('port', 8080))\n server = make_server(host, port, wsgi_app)\n print(\n 'Starting HTTP server on http://%s:%s' % (host, port), file=sys.stderr\n )\n server.serve_forever()\n\n\n# For paste.deploy server instantiation (egg:pyramid#cherrypy)\ndef cherrypy_server_runner(\n app,\n global_conf=None,\n host='127.0.0.1',\n port=None,\n ssl_pem=None,\n protocol_version=None,\n numthreads=None,\n server_name=None,\n max=None,\n request_queue_size=None,\n timeout=None,\n): # pragma: no cover\n \"\"\"\n Entry point for CherryPy's WSGI server\n\n Serves the specified WSGI app via CherryPyWSGIServer.\n\n ``app``\n\n The WSGI 'application callable'; multiple WSGI applications\n may be passed as (script_name, callable) pairs.\n\n ``host``\n\n This is the ipaddress to bind to (or a hostname if your\n nameserver is properly configured). This defaults to\n 127.0.0.1, which is not a public interface.\n\n ``port``\n\n The port to run on, defaults to 8080 for HTTP, or 4443 for\n HTTPS. This can be a string or an integer value.\n\n ``ssl_pem``\n\n This an optional SSL certificate file (via OpenSSL) You can\n generate a self-signed test PEM certificate file as follows:\n\n $ openssl genrsa 1024 > host.key\n $ chmod 400 host.key\n $ openssl req -new -x509 -nodes -sha1 -days 365 \\\\\n -key host.key > host.cert\n $ cat host.cert host.key > host.pem\n $ chmod 400 host.pem\n\n ``protocol_version``\n\n The protocol used by the server, by default ``HTTP/1.1``.\n\n ``numthreads``\n\n The number of worker threads to create.\n\n ``server_name``\n\n The string to set for WSGI's SERVER_NAME environ entry.\n\n ``max``\n\n The maximum number of queued requests. (defaults to -1 = no\n limit).\n\n ``request_queue_size``\n\n The 'backlog' argument to socket.listen(); specifies the\n maximum number of queued connections.\n\n ``timeout``\n\n The timeout in seconds for accepted connections.\n \"\"\"\n is_ssl = False\n if ssl_pem:\n port = port or 4443\n is_ssl = True\n\n if not port:\n if ':' in host:\n host, port = host.split(':', 1)\n else:\n port = 8080\n bind_addr = (host, int(port))\n\n kwargs = {}\n for var_name in ('numthreads', 'max', 'request_queue_size', 'timeout'):\n var = locals()[var_name]\n if var is not None:\n kwargs[var_name] = int(var)\n\n try:\n from cheroot.wsgi import Server as WSGIServer\n except ImportError:\n from cherrypy.wsgiserver import CherryPyWSGIServer as WSGIServer\n\n server = WSGIServer(bind_addr, app, server_name=server_name, **kwargs)\n if ssl_pem is not None:\n # creates wsgiserver.ssl_builtin as side-effect\n try:\n from cheroot.server import get_ssl_adapter_class\n from cheroot.ssl.builtin import BuiltinSSLAdapter\n except ImportError:\n from cherrypy.wsgiserver import get_ssl_adapter_class\n from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter\n get_ssl_adapter_class()\n server.ssl_adapter = BuiltinSSLAdapter(ssl_pem, ssl_pem)\n\n if protocol_version:\n server.protocol = protocol_version\n\n try:\n protocol = is_ssl and 'https' or 'http'\n if host == '0.0.0.0':\n print(\n 'serving on 0.0.0.0:%s view at %s://127.0.0.1:%s'\n % (port, protocol, port),\n file=sys.stderr,\n )\n else:\n print(\n 'serving on %s://%s:%s' % (protocol, host, port),\n file=sys.stderr,\n )\n server.start()\n except (KeyboardInterrupt, SystemExit):\n server.stop()\n\n return server\n\n\nif __name__ == '__main__': # pragma: no cover\n sys.exit(main() or 0)\n",
"path": "src/pyramid/scripts/pserve.py"
}
] | [
{
"content": "# (c) 2005 Ian Bicking and contributors; written for Paste\n# (http://pythonpaste.org) Licensed under the MIT license:\n# http://www.opensource.org/licenses/mit-license.php\n#\n# For discussion of daemonizing:\n# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/278731\n#\n# Code taken also from QP: http://www.mems-exchange.org/software/qp/ From\n# lib/site.py\n\nimport argparse\nimport hupper\nimport os\nimport re\nimport sys\nimport textwrap\nimport threading\nimport time\nimport webbrowser\n\nfrom pyramid.path import AssetResolver\nfrom pyramid.scripts.common import get_config_loader, parse_vars\nfrom pyramid.settings import aslist\n\n\ndef main(argv=sys.argv, quiet=False, original_ignore_files=None):\n command = PServeCommand(\n argv, quiet=quiet, original_ignore_files=original_ignore_files\n )\n return command.run()\n\n\nclass PServeCommand:\n\n description = \"\"\"\\\n This command serves a web application that uses a PasteDeploy\n configuration file for the server and application.\n\n You can also include variable assignments like 'http_port=8080'\n and then use %(http_port)s in your config files.\n \"\"\"\n default_verbosity = 1\n\n parser = argparse.ArgumentParser(\n description=textwrap.dedent(description),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n parser.add_argument(\n '-n',\n '--app-name',\n dest='app_name',\n metavar='NAME',\n help=\"Load the named application (default main)\",\n )\n parser.add_argument(\n '-s',\n '--server',\n dest='server',\n metavar='SERVER_TYPE',\n help=\"Use the named server.\",\n )\n parser.add_argument(\n '--server-name',\n dest='server_name',\n metavar='SECTION_NAME',\n help=(\n \"Use the named server as defined in the configuration file \"\n \"(default: main)\"\n ),\n )\n parser.add_argument(\n '--reload',\n dest='reload',\n action='store_true',\n help=\"Use auto-restart file monitor\",\n )\n parser.add_argument(\n '--reload-interval',\n dest='reload_interval',\n default=1,\n help=(\n \"Seconds between checking files (low number can cause \"\n \"significant CPU usage)\"\n ),\n )\n parser.add_argument(\n '-b',\n '--browser',\n dest='browser',\n action='store_true',\n help=(\n \"Open a web browser to the server url. The server url is \"\n \"determined from the 'open_url' setting in the 'pserve' \"\n \"section of the configuration file.\"\n ),\n )\n parser.add_argument(\n '-v',\n '--verbose',\n default=default_verbosity,\n dest='verbose',\n action='count',\n help=\"Set verbose level (default \" + str(default_verbosity) + \")\",\n )\n parser.add_argument(\n '-q',\n '--quiet',\n action='store_const',\n const=0,\n dest='verbose',\n help=\"Suppress verbose output\",\n )\n parser.add_argument(\n 'config_uri',\n nargs='?',\n default=None,\n help='The URI to the configuration file.',\n )\n parser.add_argument(\n 'config_vars',\n nargs='*',\n default=(),\n help=\"Variables required by the config file. For example, \"\n \"`http_port=%%(http_port)s` would expect `http_port=8080` to be \"\n \"passed here.\",\n )\n\n _get_config_loader = staticmethod(get_config_loader) # for testing\n\n open_url = None\n\n _scheme_re = re.compile(r'^[a-z][a-z]+:', re.I)\n\n def __init__(self, argv, quiet=False, original_ignore_files=None):\n self.args = self.parser.parse_args(argv[1:])\n if quiet:\n self.args.verbose = 0\n if self.args.reload:\n self.worker_kwargs = {'argv': argv, \"quiet\": quiet}\n self.watch_files = set()\n self.ignore_files = set()\n self.original_ignore_files = original_ignore_files\n\n def out(self, msg): # pragma: no cover\n if self.args.verbose > 0:\n print(msg, file=sys.stderr)\n\n def get_config_path(self, loader):\n return os.path.abspath(loader.uri.path)\n\n def pserve_file_config(self, loader, global_conf=None):\n settings = loader.get_settings('pserve', global_conf)\n config_path = self.get_config_path(loader)\n here = os.path.dirname(config_path)\n watch_files = aslist(settings.get('watch_files', ''), flatten=False)\n self.ignore_files = set(\n aslist(settings.get('ignore_files', ''), flatten=False)\n )\n\n # track file paths relative to the ini file\n resolver = AssetResolver(package=None)\n for file in watch_files:\n if ':' in file:\n file = resolver.resolve(file).abspath()\n elif not os.path.isabs(file):\n file = os.path.join(here, file)\n self.watch_files.add(os.path.abspath(file))\n\n # attempt to determine the url of the server\n open_url = settings.get('open_url')\n if open_url:\n self.open_url = open_url\n\n def guess_server_url(self, loader, server_name, global_conf=None):\n server_name = server_name or 'main'\n settings = loader.get_settings('server:' + server_name, global_conf)\n if 'port' in settings:\n return 'http://127.0.0.1:{port}'.format(**settings)\n\n def run(self): # pragma: no cover\n if not self.args.config_uri:\n self.out('You must give a config file')\n return 2\n config_uri = self.args.config_uri\n config_vars = parse_vars(self.args.config_vars)\n app_spec = self.args.config_uri\n app_name = self.args.app_name\n\n loader = self._get_config_loader(config_uri)\n\n # setup logging only in the worker process incase the logging config\n # opens files which should not be opened by multiple processes at once\n if not self.args.reload or hupper.is_active():\n loader.setup_logging(config_vars)\n\n self.pserve_file_config(loader, global_conf=config_vars)\n\n server_name = self.args.server_name\n if self.args.server:\n server_spec = 'egg:pyramid'\n assert server_name is None\n server_name = self.args.server\n else:\n server_spec = app_spec\n\n server_loader = loader\n if server_spec != app_spec:\n server_loader = self.get_config_loader(server_spec)\n\n # do not open the browser on each reload so check hupper first\n if self.args.browser and not hupper.is_active():\n url = self.open_url\n\n if not url:\n url = self.guess_server_url(\n server_loader, server_name, config_vars\n )\n\n if not url:\n self.out(\n 'WARNING: could not determine the server\\'s url to '\n 'open the browser. To fix this set the \"open_url\" '\n 'setting in the [pserve] section of the '\n 'configuration file.'\n )\n\n else:\n\n def open_browser():\n time.sleep(1)\n webbrowser.open(url)\n\n t = threading.Thread(target=open_browser)\n t.daemon = True\n t.start()\n\n if self.args.reload and not hupper.is_active():\n if self.args.verbose > 1:\n self.out('Running reloading file monitor')\n self.worker_kwargs['original_ignore_files'] = self.ignore_files\n hupper.start_reloader(\n 'pyramid.scripts.pserve.main',\n reload_interval=int(self.args.reload_interval),\n verbose=self.args.verbose,\n worker_kwargs=self.worker_kwargs,\n ignore_files=self.ignore_files,\n )\n return 0\n\n config_path = self.get_config_path(loader)\n self.watch_files.add(config_path)\n\n server_path = self.get_config_path(server_loader)\n self.watch_files.add(server_path)\n\n if hupper.is_active():\n reloader = hupper.get_reloader()\n reloader.watch_files(list(self.watch_files))\n\n if (\n self.original_ignore_files is not None\n and self.original_ignore_files != self.ignore_files\n ):\n self.out(\n 'A change to \"ignore_files\" was detected but it will not take'\n ' effect until pserve is restarted.'\n )\n\n server = server_loader.get_wsgi_server(server_name, config_vars)\n\n app = loader.get_wsgi_app(app_name, config_vars)\n\n if self.args.verbose > 0:\n if hasattr(os, 'getpid'):\n msg = 'Starting server in PID %i.' % os.getpid()\n else:\n msg = 'Starting server.'\n self.out(msg)\n\n try:\n server(app)\n except (SystemExit, KeyboardInterrupt) as e:\n if self.args.verbose > 1:\n raise\n if str(e):\n msg = ' ' + str(e)\n else:\n msg = ''\n self.out('Exiting%s (-v to see traceback)' % msg)\n\n\n# For paste.deploy server instantiation (egg:pyramid#wsgiref)\ndef wsgiref_server_runner(wsgi_app, global_conf, **kw): # pragma: no cover\n from wsgiref.simple_server import make_server\n\n host = kw.get('host', '0.0.0.0')\n port = int(kw.get('port', 8080))\n server = make_server(host, port, wsgi_app)\n print(\n 'Starting HTTP server on http://%s:%s' % (host, port), file=sys.stderr\n )\n server.serve_forever()\n\n\n# For paste.deploy server instantiation (egg:pyramid#cherrypy)\ndef cherrypy_server_runner(\n app,\n global_conf=None,\n host='127.0.0.1',\n port=None,\n ssl_pem=None,\n protocol_version=None,\n numthreads=None,\n server_name=None,\n max=None,\n request_queue_size=None,\n timeout=None,\n): # pragma: no cover\n \"\"\"\n Entry point for CherryPy's WSGI server\n\n Serves the specified WSGI app via CherryPyWSGIServer.\n\n ``app``\n\n The WSGI 'application callable'; multiple WSGI applications\n may be passed as (script_name, callable) pairs.\n\n ``host``\n\n This is the ipaddress to bind to (or a hostname if your\n nameserver is properly configured). This defaults to\n 127.0.0.1, which is not a public interface.\n\n ``port``\n\n The port to run on, defaults to 8080 for HTTP, or 4443 for\n HTTPS. This can be a string or an integer value.\n\n ``ssl_pem``\n\n This an optional SSL certificate file (via OpenSSL) You can\n generate a self-signed test PEM certificate file as follows:\n\n $ openssl genrsa 1024 > host.key\n $ chmod 400 host.key\n $ openssl req -new -x509 -nodes -sha1 -days 365 \\\\\n -key host.key > host.cert\n $ cat host.cert host.key > host.pem\n $ chmod 400 host.pem\n\n ``protocol_version``\n\n The protocol used by the server, by default ``HTTP/1.1``.\n\n ``numthreads``\n\n The number of worker threads to create.\n\n ``server_name``\n\n The string to set for WSGI's SERVER_NAME environ entry.\n\n ``max``\n\n The maximum number of queued requests. (defaults to -1 = no\n limit).\n\n ``request_queue_size``\n\n The 'backlog' argument to socket.listen(); specifies the\n maximum number of queued connections.\n\n ``timeout``\n\n The timeout in seconds for accepted connections.\n \"\"\"\n is_ssl = False\n if ssl_pem:\n port = port or 4443\n is_ssl = True\n\n if not port:\n if ':' in host:\n host, port = host.split(':', 1)\n else:\n port = 8080\n bind_addr = (host, int(port))\n\n kwargs = {}\n for var_name in ('numthreads', 'max', 'request_queue_size', 'timeout'):\n var = locals()[var_name]\n if var is not None:\n kwargs[var_name] = int(var)\n\n try:\n from cheroot.wsgi import Server as WSGIServer\n except ImportError:\n from cherrypy.wsgiserver import CherryPyWSGIServer as WSGIServer\n\n server = WSGIServer(bind_addr, app, server_name=server_name, **kwargs)\n if ssl_pem is not None:\n # creates wsgiserver.ssl_builtin as side-effect\n try:\n from cheroot.server import get_ssl_adapter_class\n from cheroot.ssl.builtin import BuiltinSSLAdapter\n except ImportError:\n from cherrypy.wsgiserver import get_ssl_adapter_class\n from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter\n get_ssl_adapter_class()\n server.ssl_adapter = BuiltinSSLAdapter(ssl_pem, ssl_pem)\n\n if protocol_version:\n server.protocol = protocol_version\n\n try:\n protocol = is_ssl and 'https' or 'http'\n if host == '0.0.0.0':\n print(\n 'serving on 0.0.0.0:%s view at %s://127.0.0.1:%s'\n % (port, protocol, port),\n file=sys.stderr,\n )\n else:\n print(\n 'serving on %s://%s:%s' % (protocol, host, port),\n file=sys.stderr,\n )\n server.start()\n except (KeyboardInterrupt, SystemExit):\n server.stop()\n\n return server\n\n\nif __name__ == '__main__': # pragma: no cover\n sys.exit(main() or 0)\n",
"path": "src/pyramid/scripts/pserve.py"
}
] | diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt
index d527b1a042..96fb6fd76f 100644
--- a/CONTRIBUTORS.txt
+++ b/CONTRIBUTORS.txt
@@ -353,4 +353,6 @@ Contributors
- Sergey Maranchuk, 2020/04/18
-- Thibault Ravera, 2020/06/03
\ No newline at end of file
+- Thibault Ravera, 2020/06/03
+
+- Karthikeyan Singaravelan, 2021/08/24
diff --git a/src/pyramid/scripts/pserve.py b/src/pyramid/scripts/pserve.py
index 6906a0410f..1bcf6c543e 100644
--- a/src/pyramid/scripts/pserve.py
+++ b/src/pyramid/scripts/pserve.py
@@ -231,7 +231,7 @@ def open_browser():
webbrowser.open(url)
t = threading.Thread(target=open_browser)
- t.setDaemon(True)
+ t.daemon = True
t.start()
if self.args.reload and not hupper.is_active():
|
gratipay__gratipay.com-2792 | broken facebook link when no user_name
If all we have is a user_id, we construct the URL improperly. In that case we need:
`http://facebook.com/profile.php?id=$ID`
But we have:
`http://facebook.com/None`
broken facebook link when no user_name
If all we have is a user_id, we construct the URL improperly. In that case we need:
`http://facebook.com/profile.php?id=$ID`
But we have:
`http://facebook.com/None`
| [
{
"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom gratipay.elsewhere import PlatformOAuth2\nfrom gratipay.elsewhere._extractors import key\n\n\nclass Facebook(PlatformOAuth2):\n\n # Platform attributes\n name = 'facebook'\n display_name = 'Facebook'\n account_url = 'https://www.facebook.com/{user_name}'\n\n # Auth attributes\n auth_url = 'https://www.facebook.com/dialog/oauth'\n access_token_url = 'https://graph.facebook.com/oauth/access_token'\n oauth_default_scope = ['public_profile,email']\n\n # API attributes\n api_format = 'json'\n api_url = 'https://graph.facebook.com'\n api_user_info_path = '/{user_name}'\n api_user_self_info_path = '/me'\n\n # User info extractors\n x_user_id = key('id')\n x_user_name = key('username')\n x_display_name = key('name')\n x_email = key('email')\n\n def x_avatar_url(self, extracted, info, default):\n return 'https://graph.facebook.com/' + extracted.user_id + '/picture?width=256&height=256'\n",
"path": "gratipay/elsewhere/facebook.py"
}
] | [
{
"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom gratipay.elsewhere import PlatformOAuth2\nfrom gratipay.elsewhere._extractors import key\n\n\nclass Facebook(PlatformOAuth2):\n\n # Platform attributes\n name = 'facebook'\n display_name = 'Facebook'\n account_url = 'https://www.facebook.com/profile.php?id={user_id}'\n\n # Auth attributes\n auth_url = 'https://www.facebook.com/dialog/oauth'\n access_token_url = 'https://graph.facebook.com/oauth/access_token'\n oauth_default_scope = ['public_profile,email']\n\n # API attributes\n api_format = 'json'\n api_url = 'https://graph.facebook.com'\n api_user_info_path = '/{user_name}'\n api_user_self_info_path = '/me'\n\n # User info extractors\n x_user_id = key('id')\n x_user_name = key('username')\n x_display_name = key('name')\n x_email = key('email')\n\n def x_avatar_url(self, extracted, info, default):\n return 'https://graph.facebook.com/' + extracted.user_id + '/picture?width=256&height=256'\n",
"path": "gratipay/elsewhere/facebook.py"
}
] | diff --git a/gratipay/elsewhere/facebook.py b/gratipay/elsewhere/facebook.py
index f69328bffd..fe6fba15c8 100644
--- a/gratipay/elsewhere/facebook.py
+++ b/gratipay/elsewhere/facebook.py
@@ -9,7 +9,7 @@ class Facebook(PlatformOAuth2):
# Platform attributes
name = 'facebook'
display_name = 'Facebook'
- account_url = 'https://www.facebook.com/{user_name}'
+ account_url = 'https://www.facebook.com/profile.php?id={user_id}'
# Auth attributes
auth_url = 'https://www.facebook.com/dialog/oauth'
|
pydantic__pydantic-6364 | pydantic.v1.parse_obj_as internally uses pydantic.main.create_model instead of pydantic.v1.main.create_model
### Initial Checks
- [X] I confirm that I'm using Pydantic V2 installed directly from the `main` branch, or equivalent
### Description
I was trying to migrate my codebase from V1 to V2 (mostly by replacing `import pydantic` with `import pydantic.v1`) and noticed that `pydantic.v1.parse_obj_as` was not working as intended and was leading to the following error:
```
Traceback (most recent call last):
File "/Users/sharathhuddar/workspace/django-rest-api/core/tests/test_types.py", line 177, in test_non_https_url
parse_obj_as(HttpsUrl, url)
File "/Users/sharathhuddar/workspace/django-rest-api/django-rest-api-3.7/lib/python3.7/site-packages/pydantic/v1/tools.py", line 37, in parse_obj_as
model_type = _get_parsing_type(type_, type_name=type_name) # type: ignore[arg-type]
File "/Users/sharathhuddar/workspace/django-rest-api/django-rest-api-3.7/lib/python3.7/site-packages/pydantic/v1/tools.py", line 30, in _get_parsing_type
return create_model(type_name, __root__=(type_, ...))
File "/Users/sharathhuddar/workspace/django-rest-api/django-rest-api-3.7/lib/python3.7/site-packages/pydantic/main.py", line 1319, in create_model
return meta(__model_name, resolved_bases, namespace, __pydantic_reset_parent_namespace__=False, **kwds)
File "/Users/sharathhuddar/workspace/django-rest-api/django-rest-api-3.7/lib/python3.7/site-packages/pydantic/_internal/_model_construction.py", line 96, in __new__
namespace, config_wrapper.ignored_types, class_vars, base_field_names
File "/Users/sharathhuddar/workspace/django-rest-api/django-rest-api-3.7/lib/python3.7/site-packages/pydantic/_internal/_model_construction.py", line 279, in inspect_namespace
raise TypeError("To define root models, use `pydantic.RootModel` rather than a field called '__root__'")
TypeError: To define root models, use `pydantic.RootModel` rather than a field called '__root__'
```
On inspecting the source code, I noticed that `parse_obj_as` calls `_get_parsing_type` which inturn calls `pydantic.main.create_model` instead of `pydantic.v1.main.create_model`
The issue gets resolved on updating the import statement in `pydantic.v1.tools._get_parsing_type: 24` from `from pydantic.main import create_model` to `from pydantic.v1.main import create_model`
### Example Code
_No response_
### Python, Pydantic & OS Version
```Text
python -c "import pydantic.version; print(pydantic.version.version_info())"
pydantic version: 2.0
pydantic-core version: 2.0.1 release build profile
install path: /Users/sharathhuddar/workspace/django-rest-api/django-rest-api-3.7/lib/python3.7/site-packages/pydantic
python version: 3.7.12 (default, Nov 22 2022, 14:45:00) [Clang 13.1.6 (clang-1316.0.21.2.5)]
platform: Darwin-22.2.0-x86_64-i386-64bit
optional deps. installed: ['email-validator', 'typing-extensions']
```
Selected Assignee: @lig
| [
{
"content": "import json\nfrom functools import lru_cache\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Callable, Optional, Type, TypeVar, Union\n\nfrom .parse import Protocol, load_file, load_str_bytes\nfrom .types import StrBytes\nfrom .typing import display_as_type\n\n__all__ = ('parse_file_as', 'parse_obj_as', 'parse_raw_as', 'schema_of', 'schema_json_of')\n\nNameFactory = Union[str, Callable[[Type[Any]], str]]\n\nif TYPE_CHECKING:\n from .typing import DictStrAny\n\n\ndef _generate_parsing_type_name(type_: Any) -> str:\n return f'ParsingModel[{display_as_type(type_)}]'\n\n\n@lru_cache(maxsize=2048)\ndef _get_parsing_type(type_: Any, *, type_name: Optional[NameFactory] = None) -> Any:\n from pydantic.main import create_model\n\n if type_name is None:\n type_name = _generate_parsing_type_name\n if not isinstance(type_name, str):\n type_name = type_name(type_)\n return create_model(type_name, __root__=(type_, ...))\n\n\nT = TypeVar('T')\n\n\ndef parse_obj_as(type_: Type[T], obj: Any, *, type_name: Optional[NameFactory] = None) -> T:\n model_type = _get_parsing_type(type_, type_name=type_name) # type: ignore[arg-type]\n return model_type(__root__=obj).__root__\n\n\ndef parse_file_as(\n type_: Type[T],\n path: Union[str, Path],\n *,\n content_type: str = None,\n encoding: str = 'utf8',\n proto: Protocol = None,\n allow_pickle: bool = False,\n json_loads: Callable[[str], Any] = json.loads,\n type_name: Optional[NameFactory] = None,\n) -> T:\n obj = load_file(\n path,\n proto=proto,\n content_type=content_type,\n encoding=encoding,\n allow_pickle=allow_pickle,\n json_loads=json_loads,\n )\n return parse_obj_as(type_, obj, type_name=type_name)\n\n\ndef parse_raw_as(\n type_: Type[T],\n b: StrBytes,\n *,\n content_type: str = None,\n encoding: str = 'utf8',\n proto: Protocol = None,\n allow_pickle: bool = False,\n json_loads: Callable[[str], Any] = json.loads,\n type_name: Optional[NameFactory] = None,\n) -> T:\n obj = load_str_bytes(\n b,\n proto=proto,\n content_type=content_type,\n encoding=encoding,\n allow_pickle=allow_pickle,\n json_loads=json_loads,\n )\n return parse_obj_as(type_, obj, type_name=type_name)\n\n\ndef schema_of(type_: Any, *, title: Optional[NameFactory] = None, **schema_kwargs: Any) -> 'DictStrAny':\n \"\"\"Generate a JSON schema (as dict) for the passed model or dynamically generated one\"\"\"\n return _get_parsing_type(type_, type_name=title).schema(**schema_kwargs)\n\n\ndef schema_json_of(type_: Any, *, title: Optional[NameFactory] = None, **schema_json_kwargs: Any) -> str:\n \"\"\"Generate a JSON schema (as JSON) for the passed model or dynamically generated one\"\"\"\n return _get_parsing_type(type_, type_name=title).schema_json(**schema_json_kwargs)\n",
"path": "pydantic/tools.py"
}
] | [
{
"content": "import json\nfrom functools import lru_cache\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Callable, Optional, Type, TypeVar, Union\n\nfrom .parse import Protocol, load_file, load_str_bytes\nfrom .types import StrBytes\nfrom .typing import display_as_type\n\n__all__ = ('parse_file_as', 'parse_obj_as', 'parse_raw_as', 'schema_of', 'schema_json_of')\n\nNameFactory = Union[str, Callable[[Type[Any]], str]]\n\nif TYPE_CHECKING:\n from .typing import DictStrAny\n\n\ndef _generate_parsing_type_name(type_: Any) -> str:\n return f'ParsingModel[{display_as_type(type_)}]'\n\n\n@lru_cache(maxsize=2048)\ndef _get_parsing_type(type_: Any, *, type_name: Optional[NameFactory] = None) -> Any:\n from .main import create_model\n\n if type_name is None:\n type_name = _generate_parsing_type_name\n if not isinstance(type_name, str):\n type_name = type_name(type_)\n return create_model(type_name, __root__=(type_, ...))\n\n\nT = TypeVar('T')\n\n\ndef parse_obj_as(type_: Type[T], obj: Any, *, type_name: Optional[NameFactory] = None) -> T:\n model_type = _get_parsing_type(type_, type_name=type_name) # type: ignore[arg-type]\n return model_type(__root__=obj).__root__\n\n\ndef parse_file_as(\n type_: Type[T],\n path: Union[str, Path],\n *,\n content_type: str = None,\n encoding: str = 'utf8',\n proto: Protocol = None,\n allow_pickle: bool = False,\n json_loads: Callable[[str], Any] = json.loads,\n type_name: Optional[NameFactory] = None,\n) -> T:\n obj = load_file(\n path,\n proto=proto,\n content_type=content_type,\n encoding=encoding,\n allow_pickle=allow_pickle,\n json_loads=json_loads,\n )\n return parse_obj_as(type_, obj, type_name=type_name)\n\n\ndef parse_raw_as(\n type_: Type[T],\n b: StrBytes,\n *,\n content_type: str = None,\n encoding: str = 'utf8',\n proto: Protocol = None,\n allow_pickle: bool = False,\n json_loads: Callable[[str], Any] = json.loads,\n type_name: Optional[NameFactory] = None,\n) -> T:\n obj = load_str_bytes(\n b,\n proto=proto,\n content_type=content_type,\n encoding=encoding,\n allow_pickle=allow_pickle,\n json_loads=json_loads,\n )\n return parse_obj_as(type_, obj, type_name=type_name)\n\n\ndef schema_of(type_: Any, *, title: Optional[NameFactory] = None, **schema_kwargs: Any) -> 'DictStrAny':\n \"\"\"Generate a JSON schema (as dict) for the passed model or dynamically generated one\"\"\"\n return _get_parsing_type(type_, type_name=title).schema(**schema_kwargs)\n\n\ndef schema_json_of(type_: Any, *, title: Optional[NameFactory] = None, **schema_json_kwargs: Any) -> str:\n \"\"\"Generate a JSON schema (as JSON) for the passed model or dynamically generated one\"\"\"\n return _get_parsing_type(type_, type_name=title).schema_json(**schema_json_kwargs)\n",
"path": "pydantic/tools.py"
}
] | diff --git a/changes/6361-SharathHuddar.md b/changes/6361-SharathHuddar.md
new file mode 100644
index 00000000000..2897d88ff21
--- /dev/null
+++ b/changes/6361-SharathHuddar.md
@@ -0,0 +1 @@
+Importing create_model in tools.py through relative path instead of absolute path - so that it doesn't import V2 code when copied over to V2 branch
diff --git a/pydantic/tools.py b/pydantic/tools.py
index 9cdb4538eb5..45be27704cb 100644
--- a/pydantic/tools.py
+++ b/pydantic/tools.py
@@ -21,7 +21,7 @@ def _generate_parsing_type_name(type_: Any) -> str:
@lru_cache(maxsize=2048)
def _get_parsing_type(type_: Any, *, type_name: Optional[NameFactory] = None) -> Any:
- from pydantic.main import create_model
+ from .main import create_model
if type_name is None:
type_name = _generate_parsing_type_name
|
getsentry__sentry-python-1554 | Redis integration tests have side effects
### How do you use Sentry?
Self-hosted/on-premise
### Version
1.9.2
### Steps to Reproduce
While working on https://github.com/getsentry/sentry-python/pull/1543, I noticed the following:
1. Checked out `sentry-sdk` for development.
2. Installed redis:
```
fakeredis==1.9.0
redis==3.5.3
redis-py-cluster==2.1.3
````
3. Run redis integration tests twice, in different order:
```bash
# first rediscluster, then redis
pytest 'tests/integrations/rediscluster/test_rediscluster.py::test_rediscluster_basic[RedisCluster]' tests/integrations/redis/test_redis.py::test_basic
# first redis, then rediscluster
pytest tests/integrations/redis/test_redis.py::test_basic 'tests/integrations/rediscluster/test_rediscluster.py::test_rediscluster_basic[RedisCluster]'
### Expected Result
Both test runs pass.
### Actual Result
The second test run
```bash
pytest tests/integrations/redis/test_redis.py::test_basic 'tests/integrations/rediscluster/test_rediscluster.py::test_rediscluster_basic[RedisCluster]'
```
fails with
```pytest
tests/integrations/redis/test_redis.py . [ 50%]
tests/integrations/rediscluster/test_rediscluster.py F [100%]
============================================================================================================================================ FAILURES =============================================================================================================================================
______________________________________________________________________________________________________________________________ test_rediscluster_basic[RedisCluster] ______________________________________________________________________________________________________________________________
tests/integrations/rediscluster/test_rediscluster.py:29: in test_rediscluster_basic
(crumb,) = event["breadcrumbs"]["values"]
E ValueError: not enough values to unpack (expected 1, got 0)
```
| [
{
"content": "from __future__ import absolute_import\n\nfrom sentry_sdk import Hub\nfrom sentry_sdk.utils import capture_internal_exceptions, logger\nfrom sentry_sdk.integrations import Integration, DidNotEnable\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from typing import Any, Sequence\n\n_SINGLE_KEY_COMMANDS = frozenset(\n [\"decr\", \"decrby\", \"get\", \"incr\", \"incrby\", \"pttl\", \"set\", \"setex\", \"setnx\", \"ttl\"]\n)\n_MULTI_KEY_COMMANDS = frozenset([\"del\", \"touch\", \"unlink\"])\n\n#: Trim argument lists to this many values\n_MAX_NUM_ARGS = 10\n\n\ndef patch_redis_pipeline(pipeline_cls, is_cluster, get_command_args_fn):\n # type: (Any, bool, Any) -> None\n old_execute = pipeline_cls.execute\n\n def sentry_patched_execute(self, *args, **kwargs):\n # type: (Any, *Any, **Any) -> Any\n hub = Hub.current\n\n if hub.get_integration(RedisIntegration) is None:\n return old_execute(self, *args, **kwargs)\n\n with hub.start_span(op=\"redis\", description=\"redis.pipeline.execute\") as span:\n with capture_internal_exceptions():\n span.set_tag(\"redis.is_cluster\", is_cluster)\n transaction = self.transaction if not is_cluster else False\n span.set_tag(\"redis.transaction\", transaction)\n\n commands = []\n for i, arg in enumerate(self.command_stack):\n if i > _MAX_NUM_ARGS:\n break\n command_args = []\n for j, command_arg in enumerate(get_command_args_fn(arg)):\n if j > 0:\n command_arg = repr(command_arg)\n command_args.append(command_arg)\n commands.append(\" \".join(command_args))\n\n span.set_data(\n \"redis.commands\",\n {\"count\": len(self.command_stack), \"first_ten\": commands},\n )\n\n return old_execute(self, *args, **kwargs)\n\n pipeline_cls.execute = sentry_patched_execute\n\n\ndef _get_redis_command_args(command):\n # type: (Any) -> Sequence[Any]\n return command[0]\n\n\ndef _parse_rediscluster_command(command):\n # type: (Any) -> Sequence[Any]\n return command.args\n\n\ndef _patch_rediscluster():\n # type: () -> None\n try:\n import rediscluster # type: ignore\n except ImportError:\n return\n\n patch_redis_client(rediscluster.RedisCluster, is_cluster=True)\n\n # up to v1.3.6, __version__ attribute is a tuple\n # from v2.0.0, __version__ is a string and VERSION a tuple\n version = getattr(rediscluster, \"VERSION\", rediscluster.__version__)\n\n # StrictRedisCluster was introduced in v0.2.0 and removed in v2.0.0\n # https://github.com/Grokzen/redis-py-cluster/blob/master/docs/release-notes.rst\n if (0, 2, 0) < version < (2, 0, 0):\n pipeline_cls = rediscluster.StrictClusterPipeline\n patch_redis_client(rediscluster.StrictRedisCluster, is_cluster=True)\n else:\n pipeline_cls = rediscluster.ClusterPipeline\n\n patch_redis_pipeline(pipeline_cls, True, _parse_rediscluster_command)\n\n\nclass RedisIntegration(Integration):\n identifier = \"redis\"\n\n @staticmethod\n def setup_once():\n # type: () -> None\n try:\n import redis\n except ImportError:\n raise DidNotEnable(\"Redis client not installed\")\n\n patch_redis_client(redis.StrictRedis, is_cluster=False)\n patch_redis_pipeline(redis.client.Pipeline, False, _get_redis_command_args)\n try:\n strict_pipeline = redis.client.StrictPipeline # type: ignore\n except AttributeError:\n pass\n else:\n patch_redis_pipeline(strict_pipeline, False, _get_redis_command_args)\n\n try:\n import rb.clients # type: ignore\n except ImportError:\n pass\n else:\n patch_redis_client(rb.clients.FanoutClient, is_cluster=False)\n patch_redis_client(rb.clients.MappingClient, is_cluster=False)\n patch_redis_client(rb.clients.RoutingClient, is_cluster=False)\n\n try:\n _patch_rediscluster()\n except Exception:\n logger.exception(\"Error occurred while patching `rediscluster` library\")\n\n\ndef patch_redis_client(cls, is_cluster):\n # type: (Any, bool) -> None\n \"\"\"\n This function can be used to instrument custom redis client classes or\n subclasses.\n \"\"\"\n\n old_execute_command = cls.execute_command\n\n def sentry_patched_execute_command(self, name, *args, **kwargs):\n # type: (Any, str, *Any, **Any) -> Any\n hub = Hub.current\n\n if hub.get_integration(RedisIntegration) is None:\n return old_execute_command(self, name, *args, **kwargs)\n\n description = name\n\n with capture_internal_exceptions():\n description_parts = [name]\n for i, arg in enumerate(args):\n if i > _MAX_NUM_ARGS:\n break\n\n description_parts.append(repr(arg))\n\n description = \" \".join(description_parts)\n\n with hub.start_span(op=\"redis\", description=description) as span:\n span.set_tag(\"redis.is_cluster\", is_cluster)\n if name:\n span.set_tag(\"redis.command\", name)\n\n if name and args:\n name_low = name.lower()\n if (name_low in _SINGLE_KEY_COMMANDS) or (\n name_low in _MULTI_KEY_COMMANDS and len(args) == 1\n ):\n span.set_tag(\"redis.key\", args[0])\n\n return old_execute_command(self, name, *args, **kwargs)\n\n cls.execute_command = sentry_patched_execute_command\n",
"path": "sentry_sdk/integrations/redis.py"
}
] | [
{
"content": "from __future__ import absolute_import\n\nfrom sentry_sdk import Hub\nfrom sentry_sdk.utils import capture_internal_exceptions, logger\nfrom sentry_sdk.integrations import Integration, DidNotEnable\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from typing import Any, Sequence\n\n_SINGLE_KEY_COMMANDS = frozenset(\n [\"decr\", \"decrby\", \"get\", \"incr\", \"incrby\", \"pttl\", \"set\", \"setex\", \"setnx\", \"ttl\"]\n)\n_MULTI_KEY_COMMANDS = frozenset([\"del\", \"touch\", \"unlink\"])\n\n#: Trim argument lists to this many values\n_MAX_NUM_ARGS = 10\n\n\ndef patch_redis_pipeline(pipeline_cls, is_cluster, get_command_args_fn):\n # type: (Any, bool, Any) -> None\n old_execute = pipeline_cls.execute\n\n def sentry_patched_execute(self, *args, **kwargs):\n # type: (Any, *Any, **Any) -> Any\n hub = Hub.current\n\n if hub.get_integration(RedisIntegration) is None:\n return old_execute(self, *args, **kwargs)\n\n with hub.start_span(op=\"redis\", description=\"redis.pipeline.execute\") as span:\n with capture_internal_exceptions():\n span.set_tag(\"redis.is_cluster\", is_cluster)\n transaction = self.transaction if not is_cluster else False\n span.set_tag(\"redis.transaction\", transaction)\n\n commands = []\n for i, arg in enumerate(self.command_stack):\n if i > _MAX_NUM_ARGS:\n break\n command_args = []\n for j, command_arg in enumerate(get_command_args_fn(arg)):\n if j > 0:\n command_arg = repr(command_arg)\n command_args.append(command_arg)\n commands.append(\" \".join(command_args))\n\n span.set_data(\n \"redis.commands\",\n {\"count\": len(self.command_stack), \"first_ten\": commands},\n )\n\n return old_execute(self, *args, **kwargs)\n\n pipeline_cls.execute = sentry_patched_execute\n\n\ndef _get_redis_command_args(command):\n # type: (Any) -> Sequence[Any]\n return command[0]\n\n\ndef _parse_rediscluster_command(command):\n # type: (Any) -> Sequence[Any]\n return command.args\n\n\ndef _patch_rediscluster():\n # type: () -> None\n try:\n import rediscluster # type: ignore\n except ImportError:\n return\n\n patch_redis_client(rediscluster.RedisCluster, is_cluster=True)\n\n # up to v1.3.6, __version__ attribute is a tuple\n # from v2.0.0, __version__ is a string and VERSION a tuple\n version = getattr(rediscluster, \"VERSION\", rediscluster.__version__)\n\n # StrictRedisCluster was introduced in v0.2.0 and removed in v2.0.0\n # https://github.com/Grokzen/redis-py-cluster/blob/master/docs/release-notes.rst\n if (0, 2, 0) < version < (2, 0, 0):\n pipeline_cls = rediscluster.StrictClusterPipeline\n patch_redis_client(rediscluster.StrictRedisCluster, is_cluster=True)\n else:\n pipeline_cls = rediscluster.ClusterPipeline\n\n patch_redis_pipeline(pipeline_cls, True, _parse_rediscluster_command)\n\n\nclass RedisIntegration(Integration):\n identifier = \"redis\"\n\n @staticmethod\n def setup_once():\n # type: () -> None\n try:\n import redis\n except ImportError:\n raise DidNotEnable(\"Redis client not installed\")\n\n patch_redis_client(redis.StrictRedis, is_cluster=False)\n patch_redis_pipeline(redis.client.Pipeline, False, _get_redis_command_args)\n try:\n strict_pipeline = redis.client.StrictPipeline # type: ignore\n except AttributeError:\n pass\n else:\n patch_redis_pipeline(strict_pipeline, False, _get_redis_command_args)\n\n try:\n import rb.clients # type: ignore\n except ImportError:\n pass\n else:\n patch_redis_client(rb.clients.FanoutClient, is_cluster=False)\n patch_redis_client(rb.clients.MappingClient, is_cluster=False)\n patch_redis_client(rb.clients.RoutingClient, is_cluster=False)\n\n try:\n _patch_rediscluster()\n except Exception:\n logger.exception(\"Error occurred while patching `rediscluster` library\")\n\n\ndef patch_redis_client(cls, is_cluster):\n # type: (Any, bool) -> None\n \"\"\"\n This function can be used to instrument custom redis client classes or\n subclasses.\n \"\"\"\n old_execute_command = cls.execute_command\n\n def sentry_patched_execute_command(self, name, *args, **kwargs):\n # type: (Any, str, *Any, **Any) -> Any\n hub = Hub.current\n\n if hub.get_integration(RedisIntegration) is None:\n return old_execute_command(self, name, *args, **kwargs)\n\n description = name\n\n with capture_internal_exceptions():\n description_parts = [name]\n for i, arg in enumerate(args):\n if i > _MAX_NUM_ARGS:\n break\n\n description_parts.append(repr(arg))\n\n description = \" \".join(description_parts)\n\n with hub.start_span(op=\"redis\", description=description) as span:\n span.set_tag(\"redis.is_cluster\", is_cluster)\n if name:\n span.set_tag(\"redis.command\", name)\n\n if name and args:\n name_low = name.lower()\n if (name_low in _SINGLE_KEY_COMMANDS) or (\n name_low in _MULTI_KEY_COMMANDS and len(args) == 1\n ):\n span.set_tag(\"redis.key\", args[0])\n\n return old_execute_command(self, name, *args, **kwargs)\n\n cls.execute_command = sentry_patched_execute_command\n",
"path": "sentry_sdk/integrations/redis.py"
}
] | diff --git a/sentry_sdk/integrations/redis.py b/sentry_sdk/integrations/redis.py
index a4434a3f01..fc4e9cc7c2 100644
--- a/sentry_sdk/integrations/redis.py
+++ b/sentry_sdk/integrations/redis.py
@@ -131,7 +131,6 @@ def patch_redis_client(cls, is_cluster):
This function can be used to instrument custom redis client classes or
subclasses.
"""
-
old_execute_command = cls.execute_command
def sentry_patched_execute_command(self, name, *args, **kwargs):
diff --git a/tests/conftest.py b/tests/conftest.py
index 61f25d98ee..7479a3e213 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -19,6 +19,7 @@
from sentry_sdk.transport import Transport
from sentry_sdk.envelope import Envelope
from sentry_sdk.utils import capture_internal_exceptions
+from sentry_sdk.integrations import _installed_integrations # noqa: F401
from tests import _warning_recorder, _warning_recorder_mgr
@@ -165,6 +166,17 @@ def inner(event):
return inner
[email protected]
+def reset_integrations():
+ """
+ Use with caution, sometimes we really need to start
+ with a clean slate to ensure monkeypatching works well,
+ but this also means some other stuff will be monkeypatched twice.
+ """
+ global _installed_integrations
+ _installed_integrations.clear()
+
+
@pytest.fixture
def sentry_init(monkeypatch_test_transport, request):
def inner(*a, **kw):
diff --git a/tests/integrations/rediscluster/test_rediscluster.py b/tests/integrations/rediscluster/test_rediscluster.py
index 7442490b2e..9be21a2953 100644
--- a/tests/integrations/rediscluster/test_rediscluster.py
+++ b/tests/integrations/rediscluster/test_rediscluster.py
@@ -11,8 +11,8 @@
rediscluster_classes.append(rediscluster.StrictRedisCluster)
[email protected](scope="module", autouse=True)
-def monkeypatch_rediscluster_classes():
[email protected](autouse=True)
+def monkeypatch_rediscluster_classes(reset_integrations):
try:
pipeline_cls = rediscluster.ClusterPipeline
diff --git a/tests/integrations/sanic/test_sanic.py b/tests/integrations/sanic/test_sanic.py
index f8fdd696bc..808c6f14c3 100644
--- a/tests/integrations/sanic/test_sanic.py
+++ b/tests/integrations/sanic/test_sanic.py
@@ -1,5 +1,5 @@
+import os
import sys
-
import random
import asyncio
from unittest.mock import Mock
@@ -18,6 +18,20 @@
@pytest.fixture
def app():
+ if SANIC_VERSION < (19,):
+ """
+ Older Sanic versions 0.8 and 18 bind to the same fixed port which
+ creates problems when we run tests concurrently.
+ """
+ old_test_client = Sanic.test_client.__get__
+
+ def new_test_client(self):
+ client = old_test_client(self, Sanic)
+ client.port += os.getpid() % 100
+ return client
+
+ Sanic.test_client = property(new_test_client)
+
if SANIC_VERSION >= (20, 12):
# Build (20.12.0) adds a feature where the instance is stored in an internal class
# registry for later retrieval, and so add register=False to disable that
|
spyder-ide__spyder-20541 | Error when trying to add directories in PythonPath Manager
## Description
### What steps will reproduce the problem?
<!--- You can use Markdown here --->
Installed selenium with pip. Try adding /sitepackages in the tools in spyder, but doesn't work.
### Traceback
```python-traceback
Traceback (most recent call last):
File "/Applications/Spyder.app/Contents/Resources/lib/python3.9/spyder/plugins/pythonpath/widgets/pathmanager.py", line 169, in <lambda>
triggered=lambda x: self.add_path())
File "/Applications/Spyder.app/Contents/Resources/lib/python3.9/spyder/plugins/pythonpath/widgets/pathmanager.py", line 456, in add_path
if self.listwidget.row(self.user_header) < 0:
RuntimeError: wrapped C/C++ object of type QListWidgetItem has been deleted
```
## Versions
* Spyder version: 5.4.2 93124668b (standalone)
* Python version: 3.9.14 64-bit
* Qt version: 5.15.2
* PyQt5 version: 5.15.7
* Operating System: Darwin 22.2.0
### Dependencies
```
# Mandatory:
atomicwrites >=1.2.0 : 1.4.1 (OK)
chardet >=2.0.0 : 5.1.0 (OK)
cloudpickle >=0.5.0 : 2.2.0 (OK)
cookiecutter >=1.6.0 : 2.1.1 (OK)
diff_match_patch >=20181111 : 20200713 (OK)
intervaltree >=3.0.2 : 3.1.0 (OK)
IPython >=7.31.1;<9.0.0 : 8.8.0 (OK)
jedi >=0.17.2;<0.19.0 : 0.18.2 (OK)
jellyfish >=0.7 : 0.9.0 (OK)
jsonschema >=3.2.0 : 4.17.3 (OK)
keyring >=17.0.0 : 23.13.1 (OK)
nbconvert >=4.0 : 7.2.8 (OK)
numpydoc >=0.6.0 : 1.5.0 (OK)
parso >=0.7.0;<0.9.0 : 0.8.3 (OK)
pexpect >=4.4.0 : 4.8.0 (OK)
pickleshare >=0.4 : 0.7.5 (OK)
psutil >=5.3 : 5.9.4 (OK)
pygments >=2.0 : 2.14.0 (OK)
pylint >=2.5.0;<3.0 : 2.15.10 (OK)
pylint_venv >=2.1.1 : None (OK)
pyls_spyder >=0.4.0 : 0.4.0 (OK)
pylsp >=1.7.1;<1.8.0 : 1.7.1 (OK)
pylsp_black >=1.2.0 : 1.2.1 (OK)
qdarkstyle >=3.0.2;<3.1.0 : 3.0.3 (OK)
qstylizer >=0.2.2 : 0.2.2 (OK)
qtawesome >=1.2.1 : 1.2.2 (OK)
qtconsole >=5.4.0;<5.5.0 : 5.4.0 (OK)
qtpy >=2.1.0 : 2.3.0 (OK)
rtree >=0.9.7 : 1.0.1 (OK)
setuptools >=49.6.0 : 66.0.0 (OK)
sphinx >=0.6.6 : 5.1.1 (OK)
spyder_kernels >=2.4.2;<2.5.0 : 2.4.2 (OK)
textdistance >=4.2.0 : 4.5.0 (OK)
three_merge >=0.1.1 : 0.1.1 (OK)
watchdog >=0.10.3 : 2.2.1 (OK)
zmq >=22.1.0 : 24.0.1 (OK)
# Optional:
cython >=0.21 : 0.29.33 (OK)
matplotlib >=3.0.0 : 3.6.3 (OK)
numpy >=1.7 : 1.24.1 (OK)
pandas >=1.1.1 : 1.5.2 (OK)
scipy >=0.17.0 : 1.10.0 (OK)
sympy >=0.7.3 : 1.11.1 (OK)
# Spyder plugins:
spyder_terminal.terminalplugin 1.2.2 : 1.2.2 (OK)
```
| [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Copyright © Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"Spyder path manager.\"\"\"\n\n# Standard library imports\nfrom collections import OrderedDict\nimport os\nimport os.path as osp\nimport sys\n\n# Third party imports\nfrom qtpy import PYQT5\nfrom qtpy.compat import getexistingdirectory\nfrom qtpy.QtCore import Qt, Signal, Slot\nfrom qtpy.QtWidgets import (QDialog, QDialogButtonBox, QHBoxLayout,\n QListWidget, QListWidgetItem, QMessageBox,\n QVBoxLayout, QLabel)\n\n# Local imports\nfrom spyder.api.widgets.mixins import SpyderWidgetMixin\nfrom spyder.config.base import _\nfrom spyder.plugins.pythonpath.utils import check_path, get_system_pythonpath\nfrom spyder.utils.environ import get_user_env, set_user_env\nfrom spyder.utils.icon_manager import ima\nfrom spyder.utils.misc import getcwd_or_home\nfrom spyder.utils.stylesheet import PANES_TOOLBAR_STYLESHEET\n\n\nclass PathManagerToolbuttons:\n MoveTop = 'move_top'\n MoveUp = 'move_up'\n MoveDown = 'move_down'\n MoveToBottom = 'move_to_bottom'\n AddPath = 'add_path'\n RemovePath = 'remove_path'\n ExportPaths = 'export_paths'\n\n\nclass PathManager(QDialog, SpyderWidgetMixin):\n \"\"\"Path manager dialog.\"\"\"\n\n redirect_stdio = Signal(bool)\n sig_path_changed = Signal(object)\n\n # This is required for our tests\n CONF_SECTION = 'pythonpath_manager'\n\n def __init__(self, parent, path=None, project_path=None,\n not_active_path=None, sync=True):\n \"\"\"Path manager dialog.\"\"\"\n if PYQT5:\n super().__init__(parent, class_parent=parent)\n else:\n QDialog.__init__(self, parent)\n SpyderWidgetMixin.__init__(self, class_parent=parent)\n\n assert isinstance(path, (tuple, type(None)))\n\n # Match buttons style with the rest of Spyder\n self.setStyleSheet(str(PANES_TOOLBAR_STYLESHEET))\n\n self.path = path or ()\n self.project_path = project_path or ()\n self.not_active_path = not_active_path or ()\n self.last_path = getcwd_or_home()\n self.original_path_dict = None\n self.system_path = ()\n self.user_path = []\n\n # This is necessary to run our tests\n if self.path:\n self.update_paths(system_path=get_system_pythonpath())\n\n # Widgets\n self.add_button = None\n self.remove_button = None\n self.movetop_button = None\n self.moveup_button = None\n self.movedown_button = None\n self.movebottom_button = None\n self.export_button = None\n self.user_header = None\n self.project_header = None\n self.system_header = None\n self.headers = []\n self.selection_widgets = []\n self.right_buttons = self._setup_right_toolbar()\n self.listwidget = QListWidget(self)\n self.bbox = QDialogButtonBox(QDialogButtonBox.Ok\n | QDialogButtonBox.Cancel)\n self.button_ok = self.bbox.button(QDialogButtonBox.Ok)\n\n # Widget setup\n self.setWindowTitle(_(\"PYTHONPATH manager\"))\n self.setWindowIcon(ima.icon('pythonpath'))\n self.resize(500, 400)\n self.export_button.setVisible(os.name == 'nt' and sync)\n\n # Description\n description = QLabel(\n _(\"The paths listed below will be passed to IPython consoles and \"\n \"the Python language server as additional locations to search \"\n \"for Python modules.\")\n )\n description.setWordWrap(True)\n\n # Buttons layout\n buttons_layout = QVBoxLayout()\n self._add_buttons_to_layout(self.right_buttons, buttons_layout)\n buttons_layout.addStretch(1)\n\n # Middle layout\n middle_layout = QHBoxLayout()\n middle_layout.addWidget(self.listwidget)\n middle_layout.addLayout(buttons_layout)\n\n # Widget layout\n layout = QVBoxLayout()\n layout.addWidget(description)\n layout.addSpacing(12)\n layout.addLayout(middle_layout)\n layout.addWidget(self.bbox)\n self.setLayout(layout)\n\n # Signals\n self.listwidget.currentRowChanged.connect(lambda x: self.refresh())\n self.listwidget.itemChanged.connect(lambda x: self.refresh())\n self.bbox.accepted.connect(self.accept)\n self.bbox.rejected.connect(self.reject)\n\n # Setup\n self.setup()\n\n def _add_buttons_to_layout(self, widgets, layout):\n \"\"\"Helper to add buttons to its layout.\"\"\"\n for widget in widgets:\n layout.addWidget(widget)\n\n def _setup_right_toolbar(self):\n \"\"\"Create top toolbar and actions.\"\"\"\n self.movetop_button = self.create_toolbutton(\n PathManagerToolbuttons.MoveTop,\n text=_(\"Move path to the top\"),\n icon=self.create_icon('2uparrow'),\n triggered=lambda: self.move_to(absolute=0))\n self.moveup_button = self.create_toolbutton(\n PathManagerToolbuttons.MoveUp,\n tip=_(\"Move path up\"),\n icon=self.create_icon('1uparrow'),\n triggered=lambda: self.move_to(relative=-1))\n self.movedown_button = self.create_toolbutton(\n PathManagerToolbuttons.MoveDown,\n tip=_(\"Move path down\"),\n icon=self.create_icon('1downarrow'),\n triggered=lambda: self.move_to(relative=1))\n self.movebottom_button = self.create_toolbutton(\n PathManagerToolbuttons.MoveToBottom,\n text=_(\"Move path to the bottom\"),\n icon=self.create_icon('2downarrow'),\n triggered=lambda: self.move_to(absolute=1))\n self.add_button = self.create_toolbutton(\n PathManagerToolbuttons.AddPath,\n tip=_('Add path'),\n icon=self.create_icon('edit_add'),\n triggered=lambda x: self.add_path())\n self.remove_button = self.create_toolbutton(\n PathManagerToolbuttons.RemovePath,\n tip=_('Remove path'),\n icon=self.create_icon('editclear'),\n triggered=lambda x: self.remove_path())\n self.export_button = self.create_toolbutton(\n PathManagerToolbuttons.ExportPaths,\n icon=self.create_icon('fileexport'),\n triggered=self.export_pythonpath,\n tip=_(\"Export to PYTHONPATH environment variable\"))\n\n self.selection_widgets = [self.movetop_button, self.moveup_button,\n self.movedown_button, self.movebottom_button]\n return (\n [self.add_button, self.remove_button] +\n self.selection_widgets + [self.export_button]\n )\n\n def _create_item(self, path):\n \"\"\"Helper to create a new list item.\"\"\"\n item = QListWidgetItem(path)\n item.setIcon(ima.icon('DirClosedIcon'))\n\n if path in self.project_path:\n item.setFlags(Qt.NoItemFlags | Qt.ItemIsUserCheckable)\n item.setCheckState(Qt.Checked)\n elif path in self.not_active_path:\n item.setFlags(item.flags() | Qt.ItemIsUserCheckable)\n item.setCheckState(Qt.Unchecked)\n else:\n item.setFlags(item.flags() | Qt.ItemIsUserCheckable)\n item.setCheckState(Qt.Checked)\n\n return item\n\n def _create_header(self, text):\n \"\"\"Create a header for a given path section.\"\"\"\n header = QListWidgetItem(text)\n\n # Header is centered and it can't be selected\n header.setTextAlignment(Qt.AlignHCenter)\n header.setFlags(Qt.ItemIsEnabled)\n\n # Make header appear in bold\n font = header.font()\n font.setBold(True)\n header.setFont(font)\n\n return header\n\n @property\n def editable_bottom_row(self):\n \"\"\"Maximum bottom row count that is editable.\"\"\"\n bottom_row = 0\n\n if self.project_header:\n bottom_row += len(self.project_path) + 1\n if self.user_header:\n bottom_row += len(self.user_path)\n\n return bottom_row\n\n @property\n def editable_top_row(self):\n \"\"\"Maximum top row count that is editable.\"\"\"\n top_row = 0\n\n if self.project_header:\n top_row += len(self.project_path) + 1\n if self.user_header:\n top_row += 1\n\n return top_row\n\n def setup(self):\n \"\"\"Populate list widget.\"\"\"\n self.listwidget.clear()\n\n # Project path\n if self.project_path:\n self.project_header = self._create_header(_(\"Project path\"))\n self.headers.append(self.project_header)\n self.listwidget.addItem(self.project_header)\n\n for path in self.project_path:\n item = self._create_item(path)\n self.listwidget.addItem(item)\n\n # Paths added by the user\n if self.user_path:\n self.user_header = self._create_header(_(\"User paths\"))\n self.headers.append(self.user_header)\n self.listwidget.addItem(self.user_header)\n\n for path in self.user_path:\n item = self._create_item(path)\n self.listwidget.addItem(item)\n\n # System path\n if self.system_path:\n self.system_header = self._create_header(_(\"System PYTHONPATH\"))\n self.headers.append(self.system_header)\n self.listwidget.addItem(self.system_header)\n\n for path in self.system_path:\n item = self._create_item(path)\n self.listwidget.addItem(item)\n\n self.listwidget.setCurrentRow(0)\n self.original_path_dict = self.get_path_dict()\n self.refresh()\n\n @Slot()\n def export_pythonpath(self):\n \"\"\"\n Export to PYTHONPATH environment variable\n Only apply to: current user.\n \"\"\"\n answer = QMessageBox.question(\n self,\n _(\"Export\"),\n _(\"This will export Spyder's path list to the \"\n \"<b>PYTHONPATH</b> environment variable for the current user, \"\n \"allowing you to run your Python modules outside Spyder \"\n \"without having to configure sys.path. \"\n \"<br><br>\"\n \"Do you want to clear the contents of PYTHONPATH before \"\n \"adding Spyder's path list?\"),\n QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel\n )\n\n if answer == QMessageBox.Cancel:\n return\n\n env = get_user_env()\n\n # This doesn't include the project path because it's a transient\n # directory, i.e. only used in Spyder and during specific\n # circumstances.\n active_path = [k for k, v in self.get_path_dict().items() if v]\n\n if answer == QMessageBox.Yes:\n ppath = active_path\n else:\n ppath = env.get('PYTHONPATH', [])\n if not isinstance(ppath, list):\n ppath = [ppath]\n\n ppath = [p for p in ppath if p not in active_path]\n ppath = ppath + active_path\n\n os.environ['PYTHONPATH'] = os.pathsep.join(ppath)\n\n # Update widget so changes are reflected on it immediately\n self.update_paths(system_path=tuple(ppath))\n self.set_conf('system_path', tuple(ppath))\n self.setup()\n\n env['PYTHONPATH'] = list(ppath)\n set_user_env(env, parent=self)\n\n def get_path_dict(self, project_path=False):\n \"\"\"\n Return an ordered dict with the path entries as keys and the active\n state as the value.\n\n If `project_path` is True, its entries are also included.\n \"\"\"\n odict = OrderedDict()\n for row in range(self.listwidget.count()):\n item = self.listwidget.item(row)\n path = item.text()\n if item not in self.headers:\n if path in self.project_path and not project_path:\n continue\n odict[path] = item.checkState() == Qt.Checked\n return odict\n\n def get_user_path(self):\n \"\"\"Get current user path as displayed on listwidget.\"\"\"\n user_path = []\n for row in range(self.listwidget.count()):\n item = self.listwidget.item(row)\n path = item.text()\n if item not in self.headers:\n if path not in (self.project_path + self.system_path):\n user_path.append(path)\n return user_path\n\n def update_paths(self, path=None, not_active_path=None, system_path=None):\n \"\"\"Update path attributes.\"\"\"\n if path is not None:\n self.path = path\n if not_active_path is not None:\n self.not_active_path = not_active_path\n if system_path is not None:\n self.system_path = system_path\n\n previous_system_path = self.get_conf('system_path', ())\n self.user_path = [\n path for path in self.path\n if path not in (self.system_path + previous_system_path)\n ]\n\n def refresh(self):\n \"\"\"Refresh toolbar widgets.\"\"\"\n current_item = self.listwidget.currentItem()\n enabled = current_item is not None\n for widget in self.selection_widgets:\n widget.setEnabled(enabled)\n\n # Main variables\n row = self.listwidget.currentRow()\n disable_widgets = []\n\n # Move up/top disabled for less than top editable item.\n if row <= self.editable_top_row:\n disable_widgets.extend([self.movetop_button, self.moveup_button])\n\n # Move down/bottom disabled for bottom item\n if row == self.editable_bottom_row:\n disable_widgets.extend([self.movebottom_button,\n self.movedown_button])\n\n # Disable almost all buttons on headers or system PYTHONPATH\n if current_item in self.headers or row > self.editable_bottom_row:\n disable_widgets.extend(\n [self.movetop_button, self.moveup_button,\n self.movebottom_button, self.movedown_button]\n )\n\n for widget in disable_widgets:\n widget.setEnabled(False)\n\n # Enable remove button only for user paths\n self.remove_button.setEnabled(\n not current_item in self.headers\n and (self.editable_top_row <= row <= self.editable_bottom_row)\n )\n\n self.export_button.setEnabled(self.listwidget.count() > 0)\n\n # Ok button only enabled if actual changes occur\n self.button_ok.setEnabled(\n self.original_path_dict != self.get_path_dict())\n\n @Slot()\n def add_path(self, directory=None):\n \"\"\"\n Add path to list widget.\n\n If `directory` is provided, the folder dialog is overridden.\n \"\"\"\n if directory is None:\n self.redirect_stdio.emit(False)\n directory = getexistingdirectory(self, _(\"Select directory\"),\n self.last_path)\n self.redirect_stdio.emit(True)\n if not directory:\n return\n\n directory = osp.abspath(directory)\n self.last_path = directory\n\n if directory in self.get_path_dict():\n item = self.listwidget.findItems(directory, Qt.MatchExactly)[0]\n item.setCheckState(Qt.Checked)\n answer = QMessageBox.question(\n self,\n _(\"Add path\"),\n _(\"This directory is already included in the list.\"\n \"<br> \"\n \"Do you want to move it to the top of it?\"),\n QMessageBox.Yes | QMessageBox.No)\n\n if answer == QMessageBox.Yes:\n item = self.listwidget.takeItem(self.listwidget.row(item))\n self.listwidget.insertItem(1, item)\n self.listwidget.setCurrentRow(1)\n else:\n if check_path(directory):\n if not self.user_header:\n self.user_header = self._create_header(_(\"User paths\"))\n self.headers.append(self.user_header)\n\n # Add header if not visible\n if self.listwidget.row(self.user_header) < 0:\n if self.editable_top_row > 0:\n header_row = self.editable_top_row - 1\n else:\n header_row = 0\n self.listwidget.insertItem(header_row,\n self.user_header)\n\n # Add new path\n item = self._create_item(directory)\n self.listwidget.insertItem(self.editable_top_row, item)\n self.listwidget.setCurrentRow(self.editable_top_row)\n\n self.user_path.insert(0, directory)\n else:\n answer = QMessageBox.warning(\n self,\n _(\"Add path\"),\n _(\"This directory cannot be added to the path!\"\n \"<br><br>\"\n \"If you want to set a different Python interpreter, \"\n \"please go to <tt>Preferences > Main interpreter</tt>\"\n \".\"),\n QMessageBox.Ok)\n\n self.refresh()\n\n @Slot()\n def remove_path(self, force=False):\n \"\"\"\n Remove path from list widget.\n\n If `force` is True, the message box is overridden.\n \"\"\"\n if self.listwidget.currentItem():\n if not force:\n answer = QMessageBox.warning(\n self,\n _(\"Remove path\"),\n _(\"Do you really want to remove the selected path?\"),\n QMessageBox.Yes | QMessageBox.No)\n\n if force or answer == QMessageBox.Yes:\n # Remove current item from user_path\n item = self.listwidget.currentItem()\n self.user_path.remove(item.text())\n\n # Remove selected item from view\n self.listwidget.takeItem(self.listwidget.currentRow())\n\n # Remove user header if there are no more user paths\n if len(self.user_path) == 0:\n self.listwidget.takeItem(\n self.listwidget.row(self.user_header))\n\n # Refresh widget\n self.refresh()\n\n def move_to(self, absolute=None, relative=None):\n \"\"\"Move items of list widget.\"\"\"\n index = self.listwidget.currentRow()\n if absolute is not None:\n if absolute:\n new_index = self.editable_bottom_row\n else:\n new_index = self.editable_top_row\n else:\n new_index = index + relative\n\n new_index = max(1, min(self.editable_bottom_row, new_index))\n item = self.listwidget.takeItem(index)\n self.listwidget.insertItem(new_index, item)\n self.listwidget.setCurrentRow(new_index)\n\n self.user_path = self.get_user_path()\n self.refresh()\n\n def current_row(self):\n \"\"\"Returns the current row of the list.\"\"\"\n return self.listwidget.currentRow()\n\n def set_current_row(self, row):\n \"\"\"Set the current row of the list.\"\"\"\n self.listwidget.setCurrentRow(row)\n\n def row_check_state(self, row):\n \"\"\"Return the checked state for item in row.\"\"\"\n item = self.listwidget.item(row)\n return item.checkState()\n\n def set_row_check_state(self, row, value):\n \"\"\"Set the current checked state for item in row.\"\"\"\n item = self.listwidget.item(row)\n item.setCheckState(value)\n\n def count(self):\n \"\"\"Return the number of items.\"\"\"\n return self.listwidget.count()\n\n # ---- Qt methods\n # -------------------------------------------------------------------------\n def _update_system_path(self):\n \"\"\"\n Request to update path values on main window if current and previous\n system paths are different.\n \"\"\"\n if self.system_path != self.get_conf('system_path', default=()):\n self.sig_path_changed.emit(self.get_path_dict())\n self.set_conf('system_path', self.system_path)\n\n def accept(self):\n \"\"\"Override Qt method.\"\"\"\n path_dict = self.get_path_dict()\n if self.original_path_dict != path_dict:\n self.sig_path_changed.emit(path_dict)\n super().accept()\n\n def reject(self):\n self._update_system_path()\n super().reject()\n\n def closeEvent(self, event):\n self._update_system_path()\n super().closeEvent(event)\n\n\ndef test():\n \"\"\"Run path manager test.\"\"\"\n from spyder.utils.qthelpers import qapplication\n\n _ = qapplication()\n dlg = PathManager(\n None,\n path=tuple(sys.path[:1]),\n project_path=tuple(sys.path[-2:]),\n )\n\n def callback(path_dict):\n sys.stdout.write(str(path_dict))\n\n dlg.sig_path_changed.connect(callback)\n sys.exit(dlg.exec_())\n\n\nif __name__ == \"__main__\":\n test()\n",
"path": "spyder/plugins/pythonpath/widgets/pathmanager.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Copyright © Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"Spyder path manager.\"\"\"\n\n# Standard library imports\nfrom collections import OrderedDict\nimport os\nimport os.path as osp\nimport sys\n\n# Third party imports\nfrom qtpy import PYQT5\nfrom qtpy.compat import getexistingdirectory\nfrom qtpy.QtCore import Qt, Signal, Slot\nfrom qtpy.QtWidgets import (QDialog, QDialogButtonBox, QHBoxLayout,\n QListWidget, QListWidgetItem, QMessageBox,\n QVBoxLayout, QLabel)\n\n# Local imports\nfrom spyder.api.widgets.mixins import SpyderWidgetMixin\nfrom spyder.config.base import _\nfrom spyder.plugins.pythonpath.utils import check_path, get_system_pythonpath\nfrom spyder.utils.environ import get_user_env, set_user_env\nfrom spyder.utils.icon_manager import ima\nfrom spyder.utils.misc import getcwd_or_home\nfrom spyder.utils.stylesheet import PANES_TOOLBAR_STYLESHEET\n\n\nclass PathManagerToolbuttons:\n MoveTop = 'move_top'\n MoveUp = 'move_up'\n MoveDown = 'move_down'\n MoveToBottom = 'move_to_bottom'\n AddPath = 'add_path'\n RemovePath = 'remove_path'\n ExportPaths = 'export_paths'\n\n\nclass PathManager(QDialog, SpyderWidgetMixin):\n \"\"\"Path manager dialog.\"\"\"\n\n redirect_stdio = Signal(bool)\n sig_path_changed = Signal(object)\n\n # This is required for our tests\n CONF_SECTION = 'pythonpath_manager'\n\n def __init__(self, parent, path=None, project_path=None,\n not_active_path=None, sync=True):\n \"\"\"Path manager dialog.\"\"\"\n if PYQT5:\n super().__init__(parent, class_parent=parent)\n else:\n QDialog.__init__(self, parent)\n SpyderWidgetMixin.__init__(self, class_parent=parent)\n\n assert isinstance(path, (tuple, type(None)))\n\n # Match buttons style with the rest of Spyder\n self.setStyleSheet(str(PANES_TOOLBAR_STYLESHEET))\n\n self.path = path or ()\n self.project_path = project_path or ()\n self.not_active_path = not_active_path or ()\n self.last_path = getcwd_or_home()\n self.original_path_dict = None\n self.system_path = ()\n self.user_path = []\n\n # This is necessary to run our tests\n if self.path:\n self.update_paths(system_path=get_system_pythonpath())\n\n # Widgets\n self.add_button = None\n self.remove_button = None\n self.movetop_button = None\n self.moveup_button = None\n self.movedown_button = None\n self.movebottom_button = None\n self.export_button = None\n self.user_header = None\n self.project_header = None\n self.system_header = None\n self.headers = []\n self.selection_widgets = []\n self.right_buttons = self._setup_right_toolbar()\n self.listwidget = QListWidget(self)\n self.bbox = QDialogButtonBox(QDialogButtonBox.Ok\n | QDialogButtonBox.Cancel)\n self.button_ok = self.bbox.button(QDialogButtonBox.Ok)\n\n # Widget setup\n self.setWindowTitle(_(\"PYTHONPATH manager\"))\n self.setWindowIcon(ima.icon('pythonpath'))\n self.resize(500, 400)\n self.export_button.setVisible(os.name == 'nt' and sync)\n\n # Description\n description = QLabel(\n _(\"The paths listed below will be passed to IPython consoles and \"\n \"the Python language server as additional locations to search \"\n \"for Python modules.\")\n )\n description.setWordWrap(True)\n\n # Buttons layout\n buttons_layout = QVBoxLayout()\n self._add_buttons_to_layout(self.right_buttons, buttons_layout)\n buttons_layout.addStretch(1)\n\n # Middle layout\n middle_layout = QHBoxLayout()\n middle_layout.addWidget(self.listwidget)\n middle_layout.addLayout(buttons_layout)\n\n # Widget layout\n layout = QVBoxLayout()\n layout.addWidget(description)\n layout.addSpacing(12)\n layout.addLayout(middle_layout)\n layout.addWidget(self.bbox)\n self.setLayout(layout)\n\n # Signals\n self.listwidget.currentRowChanged.connect(lambda x: self.refresh())\n self.listwidget.itemChanged.connect(lambda x: self.refresh())\n self.bbox.accepted.connect(self.accept)\n self.bbox.rejected.connect(self.reject)\n\n # Setup\n self.setup()\n\n def _add_buttons_to_layout(self, widgets, layout):\n \"\"\"Helper to add buttons to its layout.\"\"\"\n for widget in widgets:\n layout.addWidget(widget)\n\n def _setup_right_toolbar(self):\n \"\"\"Create top toolbar and actions.\"\"\"\n self.movetop_button = self.create_toolbutton(\n PathManagerToolbuttons.MoveTop,\n text=_(\"Move path to the top\"),\n icon=self.create_icon('2uparrow'),\n triggered=lambda: self.move_to(absolute=0))\n self.moveup_button = self.create_toolbutton(\n PathManagerToolbuttons.MoveUp,\n tip=_(\"Move path up\"),\n icon=self.create_icon('1uparrow'),\n triggered=lambda: self.move_to(relative=-1))\n self.movedown_button = self.create_toolbutton(\n PathManagerToolbuttons.MoveDown,\n tip=_(\"Move path down\"),\n icon=self.create_icon('1downarrow'),\n triggered=lambda: self.move_to(relative=1))\n self.movebottom_button = self.create_toolbutton(\n PathManagerToolbuttons.MoveToBottom,\n text=_(\"Move path to the bottom\"),\n icon=self.create_icon('2downarrow'),\n triggered=lambda: self.move_to(absolute=1))\n self.add_button = self.create_toolbutton(\n PathManagerToolbuttons.AddPath,\n tip=_('Add path'),\n icon=self.create_icon('edit_add'),\n triggered=lambda x: self.add_path())\n self.remove_button = self.create_toolbutton(\n PathManagerToolbuttons.RemovePath,\n tip=_('Remove path'),\n icon=self.create_icon('editclear'),\n triggered=lambda x: self.remove_path())\n self.export_button = self.create_toolbutton(\n PathManagerToolbuttons.ExportPaths,\n icon=self.create_icon('fileexport'),\n triggered=self.export_pythonpath,\n tip=_(\"Export to PYTHONPATH environment variable\"))\n\n self.selection_widgets = [self.movetop_button, self.moveup_button,\n self.movedown_button, self.movebottom_button]\n return (\n [self.add_button, self.remove_button] +\n self.selection_widgets + [self.export_button]\n )\n\n def _create_item(self, path):\n \"\"\"Helper to create a new list item.\"\"\"\n item = QListWidgetItem(path)\n item.setIcon(ima.icon('DirClosedIcon'))\n\n if path in self.project_path:\n item.setFlags(Qt.NoItemFlags | Qt.ItemIsUserCheckable)\n item.setCheckState(Qt.Checked)\n elif path in self.not_active_path:\n item.setFlags(item.flags() | Qt.ItemIsUserCheckable)\n item.setCheckState(Qt.Unchecked)\n else:\n item.setFlags(item.flags() | Qt.ItemIsUserCheckable)\n item.setCheckState(Qt.Checked)\n\n return item\n\n def _create_header(self, text):\n \"\"\"Create a header for a given path section.\"\"\"\n header = QListWidgetItem(text)\n\n # Header is centered and it can't be selected\n header.setTextAlignment(Qt.AlignHCenter)\n header.setFlags(Qt.ItemIsEnabled)\n\n # Make header appear in bold\n font = header.font()\n font.setBold(True)\n header.setFont(font)\n\n return header\n\n @property\n def editable_bottom_row(self):\n \"\"\"Maximum bottom row count that is editable.\"\"\"\n bottom_row = 0\n\n if self.project_header:\n bottom_row += len(self.project_path) + 1\n if self.user_header:\n bottom_row += len(self.user_path)\n\n return bottom_row\n\n @property\n def editable_top_row(self):\n \"\"\"Maximum top row count that is editable.\"\"\"\n top_row = 0\n\n if self.project_header:\n top_row += len(self.project_path) + 1\n if self.user_header:\n top_row += 1\n\n return top_row\n\n def setup(self):\n \"\"\"Populate list widget.\"\"\"\n self.listwidget.clear()\n self.headers.clear()\n self.project_header = None\n self.user_header = None\n self.system_header = None\n\n # Project path\n if self.project_path:\n self.project_header = self._create_header(_(\"Project path\"))\n self.headers.append(self.project_header)\n self.listwidget.addItem(self.project_header)\n\n for path in self.project_path:\n item = self._create_item(path)\n self.listwidget.addItem(item)\n\n # Paths added by the user\n if self.user_path:\n self.user_header = self._create_header(_(\"User paths\"))\n self.headers.append(self.user_header)\n self.listwidget.addItem(self.user_header)\n\n for path in self.user_path:\n item = self._create_item(path)\n self.listwidget.addItem(item)\n\n # System path\n if self.system_path:\n self.system_header = self._create_header(_(\"System PYTHONPATH\"))\n self.headers.append(self.system_header)\n self.listwidget.addItem(self.system_header)\n\n for path in self.system_path:\n item = self._create_item(path)\n self.listwidget.addItem(item)\n\n self.listwidget.setCurrentRow(0)\n self.original_path_dict = self.get_path_dict()\n self.refresh()\n\n @Slot()\n def export_pythonpath(self):\n \"\"\"\n Export to PYTHONPATH environment variable\n Only apply to: current user.\n \"\"\"\n answer = QMessageBox.question(\n self,\n _(\"Export\"),\n _(\"This will export Spyder's path list to the \"\n \"<b>PYTHONPATH</b> environment variable for the current user, \"\n \"allowing you to run your Python modules outside Spyder \"\n \"without having to configure sys.path. \"\n \"<br><br>\"\n \"Do you want to clear the contents of PYTHONPATH before \"\n \"adding Spyder's path list?\"),\n QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel\n )\n\n if answer == QMessageBox.Cancel:\n return\n\n env = get_user_env()\n\n # This doesn't include the project path because it's a transient\n # directory, i.e. only used in Spyder and during specific\n # circumstances.\n active_path = [k for k, v in self.get_path_dict().items() if v]\n\n if answer == QMessageBox.Yes:\n ppath = active_path\n else:\n ppath = env.get('PYTHONPATH', [])\n if not isinstance(ppath, list):\n ppath = [ppath]\n\n ppath = [p for p in ppath if p not in active_path]\n ppath = ppath + active_path\n\n os.environ['PYTHONPATH'] = os.pathsep.join(ppath)\n\n # Update widget so changes are reflected on it immediately\n self.update_paths(system_path=tuple(ppath))\n self.set_conf('system_path', tuple(ppath))\n self.setup()\n\n env['PYTHONPATH'] = list(ppath)\n set_user_env(env, parent=self)\n\n def get_path_dict(self, project_path=False):\n \"\"\"\n Return an ordered dict with the path entries as keys and the active\n state as the value.\n\n If `project_path` is True, its entries are also included.\n \"\"\"\n odict = OrderedDict()\n for row in range(self.listwidget.count()):\n item = self.listwidget.item(row)\n path = item.text()\n if item not in self.headers:\n if path in self.project_path and not project_path:\n continue\n odict[path] = item.checkState() == Qt.Checked\n return odict\n\n def get_user_path(self):\n \"\"\"Get current user path as displayed on listwidget.\"\"\"\n user_path = []\n for row in range(self.listwidget.count()):\n item = self.listwidget.item(row)\n path = item.text()\n if item not in self.headers:\n if path not in (self.project_path + self.system_path):\n user_path.append(path)\n return user_path\n\n def update_paths(self, path=None, not_active_path=None, system_path=None):\n \"\"\"Update path attributes.\"\"\"\n if path is not None:\n self.path = path\n if not_active_path is not None:\n self.not_active_path = not_active_path\n if system_path is not None:\n self.system_path = system_path\n\n previous_system_path = self.get_conf('system_path', ())\n self.user_path = [\n path for path in self.path\n if path not in (self.system_path + previous_system_path)\n ]\n\n def refresh(self):\n \"\"\"Refresh toolbar widgets.\"\"\"\n current_item = self.listwidget.currentItem()\n enabled = current_item is not None\n for widget in self.selection_widgets:\n widget.setEnabled(enabled)\n\n # Main variables\n row = self.listwidget.currentRow()\n disable_widgets = []\n\n # Move up/top disabled for less than top editable item.\n if row <= self.editable_top_row:\n disable_widgets.extend([self.movetop_button, self.moveup_button])\n\n # Move down/bottom disabled for bottom item\n if row == self.editable_bottom_row:\n disable_widgets.extend([self.movebottom_button,\n self.movedown_button])\n\n # Disable almost all buttons on headers or system PYTHONPATH\n if current_item in self.headers or row > self.editable_bottom_row:\n disable_widgets.extend(\n [self.movetop_button, self.moveup_button,\n self.movebottom_button, self.movedown_button]\n )\n\n for widget in disable_widgets:\n widget.setEnabled(False)\n\n # Enable remove button only for user paths\n self.remove_button.setEnabled(\n not current_item in self.headers\n and (self.editable_top_row <= row <= self.editable_bottom_row)\n )\n\n self.export_button.setEnabled(self.listwidget.count() > 0)\n\n # Ok button only enabled if actual changes occur\n self.button_ok.setEnabled(\n self.original_path_dict != self.get_path_dict())\n\n @Slot()\n def add_path(self, directory=None):\n \"\"\"\n Add path to list widget.\n\n If `directory` is provided, the folder dialog is overridden.\n \"\"\"\n if directory is None:\n self.redirect_stdio.emit(False)\n directory = getexistingdirectory(self, _(\"Select directory\"),\n self.last_path)\n self.redirect_stdio.emit(True)\n if not directory:\n return\n\n directory = osp.abspath(directory)\n self.last_path = directory\n\n if directory in self.get_path_dict():\n item = self.listwidget.findItems(directory, Qt.MatchExactly)[0]\n item.setCheckState(Qt.Checked)\n answer = QMessageBox.question(\n self,\n _(\"Add path\"),\n _(\"This directory is already included in the list.\"\n \"<br> \"\n \"Do you want to move it to the top of it?\"),\n QMessageBox.Yes | QMessageBox.No)\n\n if answer == QMessageBox.Yes:\n item = self.listwidget.takeItem(self.listwidget.row(item))\n self.listwidget.insertItem(1, item)\n self.listwidget.setCurrentRow(1)\n else:\n if check_path(directory):\n if not self.user_header:\n self.user_header = self._create_header(_(\"User paths\"))\n self.headers.append(self.user_header)\n\n # Add header if not visible\n if self.listwidget.row(self.user_header) < 0:\n if self.editable_top_row > 0:\n header_row = self.editable_top_row - 1\n else:\n header_row = 0\n self.listwidget.insertItem(header_row,\n self.user_header)\n\n # Add new path\n item = self._create_item(directory)\n self.listwidget.insertItem(self.editable_top_row, item)\n self.listwidget.setCurrentRow(self.editable_top_row)\n\n self.user_path.insert(0, directory)\n else:\n answer = QMessageBox.warning(\n self,\n _(\"Add path\"),\n _(\"This directory cannot be added to the path!\"\n \"<br><br>\"\n \"If you want to set a different Python interpreter, \"\n \"please go to <tt>Preferences > Main interpreter</tt>\"\n \".\"),\n QMessageBox.Ok)\n\n self.refresh()\n\n @Slot()\n def remove_path(self, force=False):\n \"\"\"\n Remove path from list widget.\n\n If `force` is True, the message box is overridden.\n \"\"\"\n if self.listwidget.currentItem():\n if not force:\n answer = QMessageBox.warning(\n self,\n _(\"Remove path\"),\n _(\"Do you really want to remove the selected path?\"),\n QMessageBox.Yes | QMessageBox.No)\n\n if force or answer == QMessageBox.Yes:\n # Remove current item from user_path\n item = self.listwidget.currentItem()\n self.user_path.remove(item.text())\n\n # Remove selected item from view\n self.listwidget.takeItem(self.listwidget.currentRow())\n\n # Remove user header if there are no more user paths\n if len(self.user_path) == 0:\n self.listwidget.takeItem(\n self.listwidget.row(self.user_header))\n\n # Refresh widget\n self.refresh()\n\n def move_to(self, absolute=None, relative=None):\n \"\"\"Move items of list widget.\"\"\"\n index = self.listwidget.currentRow()\n if absolute is not None:\n if absolute:\n new_index = self.editable_bottom_row\n else:\n new_index = self.editable_top_row\n else:\n new_index = index + relative\n\n new_index = max(1, min(self.editable_bottom_row, new_index))\n item = self.listwidget.takeItem(index)\n self.listwidget.insertItem(new_index, item)\n self.listwidget.setCurrentRow(new_index)\n\n self.user_path = self.get_user_path()\n self.refresh()\n\n def current_row(self):\n \"\"\"Returns the current row of the list.\"\"\"\n return self.listwidget.currentRow()\n\n def set_current_row(self, row):\n \"\"\"Set the current row of the list.\"\"\"\n self.listwidget.setCurrentRow(row)\n\n def row_check_state(self, row):\n \"\"\"Return the checked state for item in row.\"\"\"\n item = self.listwidget.item(row)\n return item.checkState()\n\n def set_row_check_state(self, row, value):\n \"\"\"Set the current checked state for item in row.\"\"\"\n item = self.listwidget.item(row)\n item.setCheckState(value)\n\n def count(self):\n \"\"\"Return the number of items.\"\"\"\n return self.listwidget.count()\n\n # ---- Qt methods\n # -------------------------------------------------------------------------\n def _update_system_path(self):\n \"\"\"\n Request to update path values on main window if current and previous\n system paths are different.\n \"\"\"\n if self.system_path != self.get_conf('system_path', default=()):\n self.sig_path_changed.emit(self.get_path_dict())\n self.set_conf('system_path', self.system_path)\n\n def accept(self):\n \"\"\"Override Qt method.\"\"\"\n path_dict = self.get_path_dict()\n if self.original_path_dict != path_dict:\n self.sig_path_changed.emit(path_dict)\n super().accept()\n\n def reject(self):\n self._update_system_path()\n super().reject()\n\n def closeEvent(self, event):\n self._update_system_path()\n super().closeEvent(event)\n\n\ndef test():\n \"\"\"Run path manager test.\"\"\"\n from spyder.utils.qthelpers import qapplication\n\n _ = qapplication()\n dlg = PathManager(\n None,\n path=tuple(sys.path[:1]),\n project_path=tuple(sys.path[-2:]),\n )\n\n def callback(path_dict):\n sys.stdout.write(str(path_dict))\n\n dlg.sig_path_changed.connect(callback)\n sys.exit(dlg.exec_())\n\n\nif __name__ == \"__main__\":\n test()\n",
"path": "spyder/plugins/pythonpath/widgets/pathmanager.py"
}
] | diff --git a/spyder/plugins/pythonpath/widgets/pathmanager.py b/spyder/plugins/pythonpath/widgets/pathmanager.py
index 3422d2963b4..2a2d7b1e7f8 100644
--- a/spyder/plugins/pythonpath/widgets/pathmanager.py
+++ b/spyder/plugins/pythonpath/widgets/pathmanager.py
@@ -244,6 +244,10 @@ def editable_top_row(self):
def setup(self):
"""Populate list widget."""
self.listwidget.clear()
+ self.headers.clear()
+ self.project_header = None
+ self.user_header = None
+ self.system_header = None
# Project path
if self.project_path:
|
kivy__kivy-5983 | Rotating Scatter does not dispatch on_transform_with_touch
### Versions
* Python 3.6.1 + Kivy 1.10 on Windows
* Python 3.6.4 + recent master on Linux
### Description
1. Rotating Scatter does not dispatch the on_transform_with_touch event
2. Translating Scatter with `do_translation=False` still dispatches the on_transform_with_touch event
### Code and Logs
```python
from kivy.base import runTouchApp
from kivy.lang import Builder
runTouchApp(Builder.load_string('''
Scatter:
size_hint: None, None
pos: 200, 200
do_scale: False
do_translation: False
on_transform_with_touch: print("!!! Transform")
canvas:
Color:
rgba: 1, 0, 0, 1
Rectangle:
pos: 0, 0
size: self.size
'''))
```
| [
{
"content": "'''\nScatter\n=======\n\n.. image:: images/scatter.gif\n :align: right\n\n:class:`Scatter` is used to build interactive widgets that can be translated,\nrotated and scaled with two or more fingers on a multitouch system.\n\nScatter has its own matrix transformation: the modelview matrix is changed\nbefore the children are drawn and the previous matrix is restored when the\ndrawing is finished. That makes it possible to perform rotation, scaling and\ntranslation over the entire children tree without changing any widget\nproperties. That specific behavior makes the scatter unique, but there are some\nadvantages / constraints that you should consider:\n\n#. The children are positioned relative to the scatter similarly to a\n :mod:`~kivy.uix.relativelayout.RelativeLayout`. So when dragging the\n scatter, the position of the children don't change, only the position of\n the scatter does.\n#. The scatter size has no impact on the size of it's children.\n#. If you want to resize the scatter, use scale, not size (read #2). Scale\n transforms both the scatter and its children, but does not change size.\n#. The scatter is not a layout. You must manage the size of the children\n yourself.\n\nFor touch events, the scatter converts from the parent matrix to the scatter\nmatrix automatically in on_touch_down/move/up events. If you are doing things\nmanually, you will need to use :meth:`~kivy.uix.widget.Widget.to_parent` and\n:meth:`~kivy.uix.widget.Widget.to_local`.\n\nUsage\n-----\n\nBy default, the Scatter does not have a graphical representation: it is a\ncontainer only. The idea is to combine the Scatter with another widget, for\nexample an :class:`~kivy.uix.image.Image`::\n\n scatter = Scatter()\n image = Image(source='sun.jpg')\n scatter.add_widget(image)\n\nControl Interactions\n--------------------\n\nBy default, all interactions are enabled. You can selectively disable\nthem using the do_rotation, do_translation and do_scale properties.\n\nDisable rotation::\n\n scatter = Scatter(do_rotation=False)\n\nAllow only translation::\n\n scatter = Scatter(do_rotation=False, do_scale=False)\n\nAllow only translation on x axis::\n\n scatter = Scatter(do_rotation=False, do_scale=False,\n do_translation_y=False)\n\n\nAutomatic Bring to Front\n------------------------\n\nIf the :attr:`Scatter.auto_bring_to_front` property is True, the scatter\nwidget will be removed and re-added to the parent when it is touched\n(brought to front, above all other widgets in the parent). This is useful\nwhen you are manipulating several scatter widgets and don't want the active\none to be partially hidden.\n\nScale Limitation\n----------------\n\nWe are using a 32-bit matrix in double representation. That means we have\na limit for scaling. You cannot do infinite scaling down/up with our\nimplementation. Generally, you don't hit the minimum scale (because you don't\nsee it on the screen), but the maximum scale is 9.99506983235e+19 (2^66).\n\nYou can also limit the minimum and maximum scale allowed::\n\n scatter = Scatter(scale_min=.5, scale_max=3.)\n\nBehavior\n--------\n\n.. versionchanged:: 1.1.0\n If no control interactions are enabled, then the touch handler will never\n return True.\n\n'''\n\n__all__ = ('Scatter', 'ScatterPlane')\n\nfrom math import radians\nfrom kivy.properties import BooleanProperty, AliasProperty, \\\n NumericProperty, ObjectProperty, BoundedNumericProperty\nfrom kivy.vector import Vector\nfrom kivy.uix.widget import Widget\nfrom kivy.graphics.transformation import Matrix\n\n\nclass Scatter(Widget):\n '''Scatter class. See module documentation for more information.\n\n :Events:\n `on_transform_with_touch`:\n Fired when the scatter has been transformed by user touch\n or multitouch, such as panning or zooming.\n `on_bring_to_front`:\n Fired when the scatter is brought to the front.\n\n .. versionchanged:: 1.9.0\n Event `on_bring_to_front` added.\n\n .. versionchanged:: 1.8.0\n Event `on_transform_with_touch` added.\n '''\n\n __events__ = ('on_transform_with_touch', 'on_bring_to_front')\n\n auto_bring_to_front = BooleanProperty(True)\n '''If True, the widget will be automatically pushed on the top of parent\n widget list for drawing.\n\n :attr:`auto_bring_to_front` is a :class:`~kivy.properties.BooleanProperty`\n and defaults to True.\n '''\n\n do_translation_x = BooleanProperty(True)\n '''Allow translation on the X axis.\n\n :attr:`do_translation_x` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to True.\n '''\n\n do_translation_y = BooleanProperty(True)\n '''Allow translation on Y axis.\n\n :attr:`do_translation_y` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to True.\n '''\n\n def _get_do_translation(self):\n return (self.do_translation_x, self.do_translation_y)\n\n def _set_do_translation(self, value):\n if type(value) in (list, tuple):\n self.do_translation_x, self.do_translation_y = value\n else:\n self.do_translation_x = self.do_translation_y = bool(value)\n do_translation = AliasProperty(\n _get_do_translation, _set_do_translation,\n bind=('do_translation_x', 'do_translation_y'))\n '''Allow translation on the X or Y axis.\n\n :attr:`do_translation` is an :class:`~kivy.properties.AliasProperty` of\n (:attr:`do_translation_x` + :attr:`do_translation_y`)\n '''\n\n translation_touches = BoundedNumericProperty(1, min=1)\n '''Determine whether translation was triggered by a single or multiple\n touches. This only has effect when :attr:`do_translation` = True.\n\n :attr:`translation_touches` is a :class:`~kivy.properties.NumericProperty`\n and defaults to 1.\n\n .. versionadded:: 1.7.0\n '''\n\n do_rotation = BooleanProperty(True)\n '''Allow rotation.\n\n :attr:`do_rotation` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to True.\n '''\n\n do_scale = BooleanProperty(True)\n '''Allow scaling.\n\n :attr:`do_scale` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to True.\n '''\n\n do_collide_after_children = BooleanProperty(False)\n '''If True, the collision detection for limiting the touch inside the\n scatter will be done after dispaching the touch to the children.\n You can put children outside the bounding box of the scatter and still be\n able to touch them.\n\n :attr:`do_collide_after_children` is a\n :class:`~kivy.properties.BooleanProperty` and defaults to False.\n\n .. versionadded:: 1.3.0\n '''\n\n scale_min = NumericProperty(0.01)\n '''Minimum scaling factor allowed.\n\n :attr:`scale_min` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 0.01.\n '''\n\n scale_max = NumericProperty(1e20)\n '''Maximum scaling factor allowed.\n\n :attr:`scale_max` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 1e20.\n '''\n\n transform = ObjectProperty(Matrix())\n '''Transformation matrix.\n\n :attr:`transform` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to the identity matrix.\n\n .. note::\n\n This matrix reflects the current state of the transformation matrix\n but setting it directly will erase previously applied\n transformations. To apply a transformation considering context,\n please use the :attr:`~Scatter.apply_transform` method.\n\n '''\n\n transform_inv = ObjectProperty(Matrix())\n '''Inverse of the transformation matrix.\n\n :attr:`transform_inv` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to the identity matrix.\n '''\n\n def _get_bbox(self):\n xmin, ymin = xmax, ymax = self.to_parent(0, 0)\n for point in [(self.width, 0), (0, self.height), self.size]:\n x, y = self.to_parent(*point)\n if x < xmin:\n xmin = x\n if y < ymin:\n ymin = y\n if x > xmax:\n xmax = x\n if y > ymax:\n ymax = y\n return (xmin, ymin), (xmax - xmin, ymax - ymin)\n bbox = AliasProperty(_get_bbox, None, bind=(\n 'transform', 'width', 'height'))\n '''Bounding box of the widget in parent space::\n\n ((x, y), (w, h))\n # x, y = lower left corner\n\n :attr:`bbox` is an :class:`~kivy.properties.AliasProperty`.\n '''\n\n def _get_rotation(self):\n v1 = Vector(0, 10)\n tp = self.to_parent\n v2 = Vector(*tp(*self.pos)) - tp(self.x, self.y + 10)\n return -1.0 * (v1.angle(v2) + 180) % 360\n\n def _set_rotation(self, rotation):\n angle_change = self.rotation - rotation\n r = Matrix().rotate(-radians(angle_change), 0, 0, 1)\n self.apply_transform(r, post_multiply=True,\n anchor=self.to_local(*self.center))\n rotation = AliasProperty(_get_rotation, _set_rotation, bind=(\n 'x', 'y', 'transform'))\n '''Rotation value of the scatter in degrees moving in a counterclockwise\n direction.\n\n :attr:`rotation` is an :class:`~kivy.properties.AliasProperty` and defaults\n to 0.0.\n '''\n\n def _get_scale(self):\n p1 = Vector(*self.to_parent(0, 0))\n p2 = Vector(*self.to_parent(1, 0))\n scale = p1.distance(p2)\n\n # XXX float calculation are not accurate, and then, scale can be\n # throwed again even with only the position change. So to\n # prevent anything wrong with scale, just avoid to dispatch it\n # if the scale \"visually\" didn't change. #947\n # Remove this ugly hack when we'll be Python 3 only.\n if hasattr(self, '_scale_p'):\n if str(scale) == str(self._scale_p):\n return self._scale_p\n\n self._scale_p = scale\n return scale\n\n def _set_scale(self, scale):\n rescale = scale * 1.0 / self.scale\n self.apply_transform(Matrix().scale(rescale, rescale, rescale),\n post_multiply=True,\n anchor=self.to_local(*self.center))\n scale = AliasProperty(_get_scale, _set_scale, bind=('x', 'y', 'transform'))\n '''Scale value of the scatter.\n\n :attr:`scale` is an :class:`~kivy.properties.AliasProperty` and defaults to\n 1.0.\n '''\n\n def _get_center(self):\n return (self.bbox[0][0] + self.bbox[1][0] / 2.0,\n self.bbox[0][1] + self.bbox[1][1] / 2.0)\n\n def _set_center(self, center):\n if center == self.center:\n return False\n t = Vector(*center) - self.center\n trans = Matrix().translate(t.x, t.y, 0)\n self.apply_transform(trans)\n center = AliasProperty(_get_center, _set_center, bind=('bbox', ))\n\n def _get_pos(self):\n return self.bbox[0]\n\n def _set_pos(self, pos):\n _pos = self.bbox[0]\n if pos == _pos:\n return\n t = Vector(*pos) - _pos\n trans = Matrix().translate(t.x, t.y, 0)\n self.apply_transform(trans)\n pos = AliasProperty(_get_pos, _set_pos, bind=('bbox', ))\n\n def _get_x(self):\n return self.bbox[0][0]\n\n def _set_x(self, x):\n if x == self.bbox[0][0]:\n return False\n self.pos = (x, self.y)\n return True\n x = AliasProperty(_get_x, _set_x, bind=('bbox', ))\n\n def _get_y(self):\n return self.bbox[0][1]\n\n def _set_y(self, y):\n if y == self.bbox[0][1]:\n return False\n self.pos = (self.x, y)\n return True\n y = AliasProperty(_get_y, _set_y, bind=('bbox', ))\n\n def get_right(self):\n return self.x + self.bbox[1][0]\n\n def set_right(self, value):\n self.x = value - self.bbox[1][0]\n\n right = AliasProperty(get_right, set_right, bind=('x', 'width'))\n\n def get_top(self):\n return self.y + self.bbox[1][1]\n\n def set_top(self, value):\n self.y = value - self.bbox[1][1]\n\n top = AliasProperty(get_top, set_top, bind=('y', 'height'))\n\n def get_center_x(self):\n return self.x + self.bbox[1][0] / 2.\n\n def set_center_x(self, value):\n self.x = value - self.bbox[1][0] / 2.\n center_x = AliasProperty(get_center_x, set_center_x, bind=('x', 'width'))\n\n def get_center_y(self):\n return self.y + self.bbox[1][1] / 2.\n\n def set_center_y(self, value):\n self.y = value - self.bbox[1][1] / 2.\n center_y = AliasProperty(get_center_y, set_center_y, bind=('y', 'height'))\n\n def __init__(self, **kwargs):\n self._touches = []\n self._last_touch_pos = {}\n super(Scatter, self).__init__(**kwargs)\n\n def on_transform(self, instance, value):\n self.transform_inv = value.inverse()\n\n def collide_point(self, x, y):\n x, y = self.to_local(x, y)\n return 0 <= x <= self.width and 0 <= y <= self.height\n\n def to_parent(self, x, y, **k):\n p = self.transform.transform_point(x, y, 0)\n return (p[0], p[1])\n\n def to_local(self, x, y, **k):\n p = self.transform_inv.transform_point(x, y, 0)\n return (p[0], p[1])\n\n def _apply_transform(self, m, pos=None):\n m = self.transform.multiply(m)\n return super(Scatter, self)._apply_transform(m, (0, 0))\n\n def apply_transform(self, trans, post_multiply=False, anchor=(0, 0)):\n '''\n Transforms the scatter by applying the \"trans\" transformation\n matrix (on top of its current transformation state). The resultant\n matrix can be found in the :attr:`~Scatter.transform` property.\n\n :Parameters:\n `trans`: :class:`~kivy.graphics.transformation.Matrix`.\n Transformation matix to be applied to the scatter widget.\n `anchor`: tuple, defaults to (0, 0).\n The point to use as the origin of the transformation\n (uses local widget space).\n `post_multiply`: bool, defaults to False.\n If True, the transform matrix is post multiplied\n (as if applied before the current transform).\n\n Usage example::\n\n from kivy.graphics.transformation import Matrix\n mat = Matrix().scale(3, 3, 3)\n scatter_instance.apply_transform(mat)\n\n '''\n t = Matrix().translate(anchor[0], anchor[1], 0)\n t = t.multiply(trans)\n t = t.multiply(Matrix().translate(-anchor[0], -anchor[1], 0))\n\n if post_multiply:\n self.transform = self.transform.multiply(t)\n else:\n self.transform = t.multiply(self.transform)\n\n def transform_with_touch(self, touch):\n # just do a simple one finger drag\n changed = False\n if len(self._touches) == self.translation_touches:\n # _last_touch_pos has last pos in correct parent space,\n # just like incoming touch\n dx = (touch.x - self._last_touch_pos[touch][0]) \\\n * self.do_translation_x\n dy = (touch.y - self._last_touch_pos[touch][1]) \\\n * self.do_translation_y\n dx = dx / self.translation_touches\n dy = dy / self.translation_touches\n self.apply_transform(Matrix().translate(dx, dy, 0))\n changed = True\n\n if len(self._touches) == 1:\n return changed\n\n # we have more than one touch... list of last known pos\n points = [Vector(self._last_touch_pos[t]) for t in self._touches\n if t is not touch]\n # add current touch last\n points.append(Vector(touch.pos))\n\n # we only want to transform if the touch is part of the two touches\n # farthest apart! So first we find anchor, the point to transform\n # around as another touch farthest away from current touch's pos\n anchor = max(points[:-1], key=lambda p: p.distance(touch.pos))\n\n # now we find the touch farthest away from anchor, if its not the\n # same as touch. Touch is not one of the two touches used to transform\n farthest = max(points, key=anchor.distance)\n if farthest is not points[-1]:\n return changed\n\n # ok, so we have touch, and anchor, so we can actually compute the\n # transformation\n old_line = Vector(*touch.ppos) - anchor\n new_line = Vector(*touch.pos) - anchor\n if not old_line.length(): # div by zero\n return changed\n\n angle = radians(new_line.angle(old_line)) * self.do_rotation\n self.apply_transform(Matrix().rotate(angle, 0, 0, 1), anchor=anchor)\n\n if self.do_scale:\n scale = new_line.length() / old_line.length()\n new_scale = scale * self.scale\n if new_scale < self.scale_min:\n scale = self.scale_min / self.scale\n elif new_scale > self.scale_max:\n scale = self.scale_max / self.scale\n self.apply_transform(Matrix().scale(scale, scale, scale),\n anchor=anchor)\n changed = True\n return changed\n\n def _bring_to_front(self, touch):\n # auto bring to front\n if self.auto_bring_to_front and self.parent:\n parent = self.parent\n if parent.children[0] is self:\n return\n parent.remove_widget(self)\n parent.add_widget(self)\n self.dispatch('on_bring_to_front', touch)\n\n def on_touch_down(self, touch):\n x, y = touch.x, touch.y\n\n # if the touch isnt on the widget we do nothing\n if not self.do_collide_after_children:\n if not self.collide_point(x, y):\n return False\n\n # let the child widgets handle the event if they want\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if super(Scatter, self).on_touch_down(touch):\n touch.pop()\n self._bring_to_front(touch)\n return True\n touch.pop()\n\n # if our child didn't do anything, and if we don't have any active\n # interaction control, then don't accept the touch.\n if not self.do_translation_x and \\\n not self.do_translation_y and \\\n not self.do_rotation and \\\n not self.do_scale:\n return False\n\n if self.do_collide_after_children:\n if not self.collide_point(x, y):\n return False\n\n if 'multitouch_sim' in touch.profile:\n touch.multitouch_sim = True\n # grab the touch so we get all it later move events for sure\n self._bring_to_front(touch)\n touch.grab(self)\n self._touches.append(touch)\n self._last_touch_pos[touch] = touch.pos\n\n return True\n\n def on_touch_move(self, touch):\n x, y = touch.x, touch.y\n # let the child widgets handle the event if they want\n if self.collide_point(x, y) and not touch.grab_current == self:\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if super(Scatter, self).on_touch_move(touch):\n touch.pop()\n return True\n touch.pop()\n\n # rotate/scale/translate\n if touch in self._touches and touch.grab_current == self:\n if self.transform_with_touch(touch):\n self.dispatch('on_transform_with_touch', touch)\n self._last_touch_pos[touch] = touch.pos\n\n # stop propagating if its within our bounds\n if self.collide_point(x, y):\n return True\n\n def on_transform_with_touch(self, touch):\n '''\n Called when a touch event has transformed the scatter widget.\n By default this does nothing, but can be overriden by derived\n classes that need to react to transformations caused by user\n input.\n\n :Parameters:\n `touch`:\n The touch object which triggered the transformation.\n\n .. versionadded:: 1.8.0\n '''\n pass\n\n def on_bring_to_front(self, touch):\n '''\n Called when a touch event causes the scatter to be brought to the\n front of the parent (only if :attr:`auto_bring_to_front` is True)\n\n :Parameters:\n `touch`:\n The touch object which brought the scatter to front.\n\n .. versionadded:: 1.9.0\n '''\n pass\n\n def on_touch_up(self, touch):\n x, y = touch.x, touch.y\n # if the touch isnt on the widget we do nothing, just try children\n if not touch.grab_current == self:\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if super(Scatter, self).on_touch_up(touch):\n touch.pop()\n return True\n touch.pop()\n\n # remove it from our saved touches\n if touch in self._touches and touch.grab_state:\n touch.ungrab(self)\n del self._last_touch_pos[touch]\n self._touches.remove(touch)\n\n # stop propagating if its within our bounds\n if self.collide_point(x, y):\n return True\n\n\nclass ScatterPlane(Scatter):\n '''This is essentially an unbounded Scatter widget. It's a convenience\n class to make it easier to handle infinite planes.\n '''\n\n def __init__(self, **kwargs):\n if 'auto_bring_to_front' not in kwargs:\n self.auto_bring_to_front = False\n super(ScatterPlane, self).__init__(**kwargs)\n\n def collide_point(self, x, y):\n return True\n",
"path": "kivy/uix/scatter.py"
}
] | [
{
"content": "'''\nScatter\n=======\n\n.. image:: images/scatter.gif\n :align: right\n\n:class:`Scatter` is used to build interactive widgets that can be translated,\nrotated and scaled with two or more fingers on a multitouch system.\n\nScatter has its own matrix transformation: the modelview matrix is changed\nbefore the children are drawn and the previous matrix is restored when the\ndrawing is finished. That makes it possible to perform rotation, scaling and\ntranslation over the entire children tree without changing any widget\nproperties. That specific behavior makes the scatter unique, but there are some\nadvantages / constraints that you should consider:\n\n#. The children are positioned relative to the scatter similarly to a\n :mod:`~kivy.uix.relativelayout.RelativeLayout`. So when dragging the\n scatter, the position of the children don't change, only the position of\n the scatter does.\n#. The scatter size has no impact on the size of it's children.\n#. If you want to resize the scatter, use scale, not size (read #2). Scale\n transforms both the scatter and its children, but does not change size.\n#. The scatter is not a layout. You must manage the size of the children\n yourself.\n\nFor touch events, the scatter converts from the parent matrix to the scatter\nmatrix automatically in on_touch_down/move/up events. If you are doing things\nmanually, you will need to use :meth:`~kivy.uix.widget.Widget.to_parent` and\n:meth:`~kivy.uix.widget.Widget.to_local`.\n\nUsage\n-----\n\nBy default, the Scatter does not have a graphical representation: it is a\ncontainer only. The idea is to combine the Scatter with another widget, for\nexample an :class:`~kivy.uix.image.Image`::\n\n scatter = Scatter()\n image = Image(source='sun.jpg')\n scatter.add_widget(image)\n\nControl Interactions\n--------------------\n\nBy default, all interactions are enabled. You can selectively disable\nthem using the do_rotation, do_translation and do_scale properties.\n\nDisable rotation::\n\n scatter = Scatter(do_rotation=False)\n\nAllow only translation::\n\n scatter = Scatter(do_rotation=False, do_scale=False)\n\nAllow only translation on x axis::\n\n scatter = Scatter(do_rotation=False, do_scale=False,\n do_translation_y=False)\n\n\nAutomatic Bring to Front\n------------------------\n\nIf the :attr:`Scatter.auto_bring_to_front` property is True, the scatter\nwidget will be removed and re-added to the parent when it is touched\n(brought to front, above all other widgets in the parent). This is useful\nwhen you are manipulating several scatter widgets and don't want the active\none to be partially hidden.\n\nScale Limitation\n----------------\n\nWe are using a 32-bit matrix in double representation. That means we have\na limit for scaling. You cannot do infinite scaling down/up with our\nimplementation. Generally, you don't hit the minimum scale (because you don't\nsee it on the screen), but the maximum scale is 9.99506983235e+19 (2^66).\n\nYou can also limit the minimum and maximum scale allowed::\n\n scatter = Scatter(scale_min=.5, scale_max=3.)\n\nBehavior\n--------\n\n.. versionchanged:: 1.1.0\n If no control interactions are enabled, then the touch handler will never\n return True.\n\n'''\n\n__all__ = ('Scatter', 'ScatterPlane')\n\nfrom math import radians\nfrom kivy.properties import BooleanProperty, AliasProperty, \\\n NumericProperty, ObjectProperty, BoundedNumericProperty\nfrom kivy.vector import Vector\nfrom kivy.uix.widget import Widget\nfrom kivy.graphics.transformation import Matrix\n\n\nclass Scatter(Widget):\n '''Scatter class. See module documentation for more information.\n\n :Events:\n `on_transform_with_touch`:\n Fired when the scatter has been transformed by user touch\n or multitouch, such as panning or zooming.\n `on_bring_to_front`:\n Fired when the scatter is brought to the front.\n\n .. versionchanged:: 1.9.0\n Event `on_bring_to_front` added.\n\n .. versionchanged:: 1.8.0\n Event `on_transform_with_touch` added.\n '''\n\n __events__ = ('on_transform_with_touch', 'on_bring_to_front')\n\n auto_bring_to_front = BooleanProperty(True)\n '''If True, the widget will be automatically pushed on the top of parent\n widget list for drawing.\n\n :attr:`auto_bring_to_front` is a :class:`~kivy.properties.BooleanProperty`\n and defaults to True.\n '''\n\n do_translation_x = BooleanProperty(True)\n '''Allow translation on the X axis.\n\n :attr:`do_translation_x` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to True.\n '''\n\n do_translation_y = BooleanProperty(True)\n '''Allow translation on Y axis.\n\n :attr:`do_translation_y` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to True.\n '''\n\n def _get_do_translation(self):\n return (self.do_translation_x, self.do_translation_y)\n\n def _set_do_translation(self, value):\n if type(value) in (list, tuple):\n self.do_translation_x, self.do_translation_y = value\n else:\n self.do_translation_x = self.do_translation_y = bool(value)\n do_translation = AliasProperty(\n _get_do_translation, _set_do_translation,\n bind=('do_translation_x', 'do_translation_y'))\n '''Allow translation on the X or Y axis.\n\n :attr:`do_translation` is an :class:`~kivy.properties.AliasProperty` of\n (:attr:`do_translation_x` + :attr:`do_translation_y`)\n '''\n\n translation_touches = BoundedNumericProperty(1, min=1)\n '''Determine whether translation was triggered by a single or multiple\n touches. This only has effect when :attr:`do_translation` = True.\n\n :attr:`translation_touches` is a :class:`~kivy.properties.NumericProperty`\n and defaults to 1.\n\n .. versionadded:: 1.7.0\n '''\n\n do_rotation = BooleanProperty(True)\n '''Allow rotation.\n\n :attr:`do_rotation` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to True.\n '''\n\n do_scale = BooleanProperty(True)\n '''Allow scaling.\n\n :attr:`do_scale` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to True.\n '''\n\n do_collide_after_children = BooleanProperty(False)\n '''If True, the collision detection for limiting the touch inside the\n scatter will be done after dispaching the touch to the children.\n You can put children outside the bounding box of the scatter and still be\n able to touch them.\n\n :attr:`do_collide_after_children` is a\n :class:`~kivy.properties.BooleanProperty` and defaults to False.\n\n .. versionadded:: 1.3.0\n '''\n\n scale_min = NumericProperty(0.01)\n '''Minimum scaling factor allowed.\n\n :attr:`scale_min` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 0.01.\n '''\n\n scale_max = NumericProperty(1e20)\n '''Maximum scaling factor allowed.\n\n :attr:`scale_max` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 1e20.\n '''\n\n transform = ObjectProperty(Matrix())\n '''Transformation matrix.\n\n :attr:`transform` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to the identity matrix.\n\n .. note::\n\n This matrix reflects the current state of the transformation matrix\n but setting it directly will erase previously applied\n transformations. To apply a transformation considering context,\n please use the :attr:`~Scatter.apply_transform` method.\n\n '''\n\n transform_inv = ObjectProperty(Matrix())\n '''Inverse of the transformation matrix.\n\n :attr:`transform_inv` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to the identity matrix.\n '''\n\n def _get_bbox(self):\n xmin, ymin = xmax, ymax = self.to_parent(0, 0)\n for point in [(self.width, 0), (0, self.height), self.size]:\n x, y = self.to_parent(*point)\n if x < xmin:\n xmin = x\n if y < ymin:\n ymin = y\n if x > xmax:\n xmax = x\n if y > ymax:\n ymax = y\n return (xmin, ymin), (xmax - xmin, ymax - ymin)\n bbox = AliasProperty(_get_bbox, None, bind=(\n 'transform', 'width', 'height'))\n '''Bounding box of the widget in parent space::\n\n ((x, y), (w, h))\n # x, y = lower left corner\n\n :attr:`bbox` is an :class:`~kivy.properties.AliasProperty`.\n '''\n\n def _get_rotation(self):\n v1 = Vector(0, 10)\n tp = self.to_parent\n v2 = Vector(*tp(*self.pos)) - tp(self.x, self.y + 10)\n return -1.0 * (v1.angle(v2) + 180) % 360\n\n def _set_rotation(self, rotation):\n angle_change = self.rotation - rotation\n r = Matrix().rotate(-radians(angle_change), 0, 0, 1)\n self.apply_transform(r, post_multiply=True,\n anchor=self.to_local(*self.center))\n rotation = AliasProperty(_get_rotation, _set_rotation, bind=(\n 'x', 'y', 'transform'))\n '''Rotation value of the scatter in degrees moving in a counterclockwise\n direction.\n\n :attr:`rotation` is an :class:`~kivy.properties.AliasProperty` and defaults\n to 0.0.\n '''\n\n def _get_scale(self):\n p1 = Vector(*self.to_parent(0, 0))\n p2 = Vector(*self.to_parent(1, 0))\n scale = p1.distance(p2)\n\n # XXX float calculation are not accurate, and then, scale can be\n # throwed again even with only the position change. So to\n # prevent anything wrong with scale, just avoid to dispatch it\n # if the scale \"visually\" didn't change. #947\n # Remove this ugly hack when we'll be Python 3 only.\n if hasattr(self, '_scale_p'):\n if str(scale) == str(self._scale_p):\n return self._scale_p\n\n self._scale_p = scale\n return scale\n\n def _set_scale(self, scale):\n rescale = scale * 1.0 / self.scale\n self.apply_transform(Matrix().scale(rescale, rescale, rescale),\n post_multiply=True,\n anchor=self.to_local(*self.center))\n scale = AliasProperty(_get_scale, _set_scale, bind=('x', 'y', 'transform'))\n '''Scale value of the scatter.\n\n :attr:`scale` is an :class:`~kivy.properties.AliasProperty` and defaults to\n 1.0.\n '''\n\n def _get_center(self):\n return (self.bbox[0][0] + self.bbox[1][0] / 2.0,\n self.bbox[0][1] + self.bbox[1][1] / 2.0)\n\n def _set_center(self, center):\n if center == self.center:\n return False\n t = Vector(*center) - self.center\n trans = Matrix().translate(t.x, t.y, 0)\n self.apply_transform(trans)\n center = AliasProperty(_get_center, _set_center, bind=('bbox', ))\n\n def _get_pos(self):\n return self.bbox[0]\n\n def _set_pos(self, pos):\n _pos = self.bbox[0]\n if pos == _pos:\n return\n t = Vector(*pos) - _pos\n trans = Matrix().translate(t.x, t.y, 0)\n self.apply_transform(trans)\n pos = AliasProperty(_get_pos, _set_pos, bind=('bbox', ))\n\n def _get_x(self):\n return self.bbox[0][0]\n\n def _set_x(self, x):\n if x == self.bbox[0][0]:\n return False\n self.pos = (x, self.y)\n return True\n x = AliasProperty(_get_x, _set_x, bind=('bbox', ))\n\n def _get_y(self):\n return self.bbox[0][1]\n\n def _set_y(self, y):\n if y == self.bbox[0][1]:\n return False\n self.pos = (self.x, y)\n return True\n y = AliasProperty(_get_y, _set_y, bind=('bbox', ))\n\n def get_right(self):\n return self.x + self.bbox[1][0]\n\n def set_right(self, value):\n self.x = value - self.bbox[1][0]\n\n right = AliasProperty(get_right, set_right, bind=('x', 'width'))\n\n def get_top(self):\n return self.y + self.bbox[1][1]\n\n def set_top(self, value):\n self.y = value - self.bbox[1][1]\n\n top = AliasProperty(get_top, set_top, bind=('y', 'height'))\n\n def get_center_x(self):\n return self.x + self.bbox[1][0] / 2.\n\n def set_center_x(self, value):\n self.x = value - self.bbox[1][0] / 2.\n center_x = AliasProperty(get_center_x, set_center_x, bind=('x', 'width'))\n\n def get_center_y(self):\n return self.y + self.bbox[1][1] / 2.\n\n def set_center_y(self, value):\n self.y = value - self.bbox[1][1] / 2.\n center_y = AliasProperty(get_center_y, set_center_y, bind=('y', 'height'))\n\n def __init__(self, **kwargs):\n self._touches = []\n self._last_touch_pos = {}\n super(Scatter, self).__init__(**kwargs)\n\n def on_transform(self, instance, value):\n self.transform_inv = value.inverse()\n\n def collide_point(self, x, y):\n x, y = self.to_local(x, y)\n return 0 <= x <= self.width and 0 <= y <= self.height\n\n def to_parent(self, x, y, **k):\n p = self.transform.transform_point(x, y, 0)\n return (p[0], p[1])\n\n def to_local(self, x, y, **k):\n p = self.transform_inv.transform_point(x, y, 0)\n return (p[0], p[1])\n\n def _apply_transform(self, m, pos=None):\n m = self.transform.multiply(m)\n return super(Scatter, self)._apply_transform(m, (0, 0))\n\n def apply_transform(self, trans, post_multiply=False, anchor=(0, 0)):\n '''\n Transforms the scatter by applying the \"trans\" transformation\n matrix (on top of its current transformation state). The resultant\n matrix can be found in the :attr:`~Scatter.transform` property.\n\n :Parameters:\n `trans`: :class:`~kivy.graphics.transformation.Matrix`.\n Transformation matix to be applied to the scatter widget.\n `anchor`: tuple, defaults to (0, 0).\n The point to use as the origin of the transformation\n (uses local widget space).\n `post_multiply`: bool, defaults to False.\n If True, the transform matrix is post multiplied\n (as if applied before the current transform).\n\n Usage example::\n\n from kivy.graphics.transformation import Matrix\n mat = Matrix().scale(3, 3, 3)\n scatter_instance.apply_transform(mat)\n\n '''\n t = Matrix().translate(anchor[0], anchor[1], 0)\n t = t.multiply(trans)\n t = t.multiply(Matrix().translate(-anchor[0], -anchor[1], 0))\n\n if post_multiply:\n self.transform = self.transform.multiply(t)\n else:\n self.transform = t.multiply(self.transform)\n\n def transform_with_touch(self, touch):\n # just do a simple one finger drag\n changed = False\n if len(self._touches) == self.translation_touches:\n # _last_touch_pos has last pos in correct parent space,\n # just like incoming touch\n dx = (touch.x - self._last_touch_pos[touch][0]) \\\n * self.do_translation_x\n dy = (touch.y - self._last_touch_pos[touch][1]) \\\n * self.do_translation_y\n dx = dx / self.translation_touches\n dy = dy / self.translation_touches\n self.apply_transform(Matrix().translate(dx, dy, 0))\n changed = True\n\n if len(self._touches) == 1:\n return changed\n\n # we have more than one touch... list of last known pos\n points = [Vector(self._last_touch_pos[t]) for t in self._touches\n if t is not touch]\n # add current touch last\n points.append(Vector(touch.pos))\n\n # we only want to transform if the touch is part of the two touches\n # farthest apart! So first we find anchor, the point to transform\n # around as another touch farthest away from current touch's pos\n anchor = max(points[:-1], key=lambda p: p.distance(touch.pos))\n\n # now we find the touch farthest away from anchor, if its not the\n # same as touch. Touch is not one of the two touches used to transform\n farthest = max(points, key=anchor.distance)\n if farthest is not points[-1]:\n return changed\n\n # ok, so we have touch, and anchor, so we can actually compute the\n # transformation\n old_line = Vector(*touch.ppos) - anchor\n new_line = Vector(*touch.pos) - anchor\n if not old_line.length(): # div by zero\n return changed\n\n angle = radians(new_line.angle(old_line)) * self.do_rotation\n if angle:\n changed = True\n self.apply_transform(Matrix().rotate(angle, 0, 0, 1), anchor=anchor)\n\n if self.do_scale:\n scale = new_line.length() / old_line.length()\n new_scale = scale * self.scale\n if new_scale < self.scale_min:\n scale = self.scale_min / self.scale\n elif new_scale > self.scale_max:\n scale = self.scale_max / self.scale\n self.apply_transform(Matrix().scale(scale, scale, scale),\n anchor=anchor)\n changed = True\n return changed\n\n def _bring_to_front(self, touch):\n # auto bring to front\n if self.auto_bring_to_front and self.parent:\n parent = self.parent\n if parent.children[0] is self:\n return\n parent.remove_widget(self)\n parent.add_widget(self)\n self.dispatch('on_bring_to_front', touch)\n\n def on_touch_down(self, touch):\n x, y = touch.x, touch.y\n\n # if the touch isnt on the widget we do nothing\n if not self.do_collide_after_children:\n if not self.collide_point(x, y):\n return False\n\n # let the child widgets handle the event if they want\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if super(Scatter, self).on_touch_down(touch):\n touch.pop()\n self._bring_to_front(touch)\n return True\n touch.pop()\n\n # if our child didn't do anything, and if we don't have any active\n # interaction control, then don't accept the touch.\n if not self.do_translation_x and \\\n not self.do_translation_y and \\\n not self.do_rotation and \\\n not self.do_scale:\n return False\n\n if self.do_collide_after_children:\n if not self.collide_point(x, y):\n return False\n\n if 'multitouch_sim' in touch.profile:\n touch.multitouch_sim = True\n # grab the touch so we get all it later move events for sure\n self._bring_to_front(touch)\n touch.grab(self)\n self._touches.append(touch)\n self._last_touch_pos[touch] = touch.pos\n\n return True\n\n def on_touch_move(self, touch):\n x, y = touch.x, touch.y\n # let the child widgets handle the event if they want\n if self.collide_point(x, y) and not touch.grab_current == self:\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if super(Scatter, self).on_touch_move(touch):\n touch.pop()\n return True\n touch.pop()\n\n # rotate/scale/translate\n if touch in self._touches and touch.grab_current == self:\n if self.transform_with_touch(touch):\n self.dispatch('on_transform_with_touch', touch)\n self._last_touch_pos[touch] = touch.pos\n\n # stop propagating if its within our bounds\n if self.collide_point(x, y):\n return True\n\n def on_transform_with_touch(self, touch):\n '''\n Called when a touch event has transformed the scatter widget.\n By default this does nothing, but can be overriden by derived\n classes that need to react to transformations caused by user\n input.\n\n :Parameters:\n `touch`:\n The touch object which triggered the transformation.\n\n .. versionadded:: 1.8.0\n '''\n pass\n\n def on_bring_to_front(self, touch):\n '''\n Called when a touch event causes the scatter to be brought to the\n front of the parent (only if :attr:`auto_bring_to_front` is True)\n\n :Parameters:\n `touch`:\n The touch object which brought the scatter to front.\n\n .. versionadded:: 1.9.0\n '''\n pass\n\n def on_touch_up(self, touch):\n x, y = touch.x, touch.y\n # if the touch isnt on the widget we do nothing, just try children\n if not touch.grab_current == self:\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if super(Scatter, self).on_touch_up(touch):\n touch.pop()\n return True\n touch.pop()\n\n # remove it from our saved touches\n if touch in self._touches and touch.grab_state:\n touch.ungrab(self)\n del self._last_touch_pos[touch]\n self._touches.remove(touch)\n\n # stop propagating if its within our bounds\n if self.collide_point(x, y):\n return True\n\n\nclass ScatterPlane(Scatter):\n '''This is essentially an unbounded Scatter widget. It's a convenience\n class to make it easier to handle infinite planes.\n '''\n\n def __init__(self, **kwargs):\n if 'auto_bring_to_front' not in kwargs:\n self.auto_bring_to_front = False\n super(ScatterPlane, self).__init__(**kwargs)\n\n def collide_point(self, x, y):\n return True\n",
"path": "kivy/uix/scatter.py"
}
] | diff --git a/kivy/uix/scatter.py b/kivy/uix/scatter.py
index b8a4b65982..85b9eee8ab 100644
--- a/kivy/uix/scatter.py
+++ b/kivy/uix/scatter.py
@@ -476,6 +476,8 @@ def transform_with_touch(self, touch):
return changed
angle = radians(new_line.angle(old_line)) * self.do_rotation
+ if angle:
+ changed = True
self.apply_transform(Matrix().rotate(angle, 0, 0, 1), anchor=anchor)
if self.do_scale:
|
mesonbuild__meson-1538 | VS 2017 backend emits bad WindowsTargetPlatformVersion value
When I tried generating a VS 2017 solution, the generated app.vcxproj contained this:
```
<WindowsTargetPlatformVersion>10.0.14393.0\</WindowsTargetPlatformVersion>
```
Which then causes errors in other `.targets` files attempting to do a numeric comparison against that.
This value is probably taken straight from one of these environment variables:
```
WindowsSDKLibVersion=10.0.14393.0\
WindowsSDKVersion=10.0.14393.0\
```
The trailing backslash is a bit suspect, but may be there intentionally so it can be concatenated to
```
WindowsSdkDir=C:\Program Files (x86)\Windows Kits\10\
```
directly.
| [
{
"content": "# Copyright 2014-2016 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom .vs2010backend import Vs2010Backend\n\n\nclass Vs2017Backend(Vs2010Backend):\n def __init__(self, build):\n super().__init__(build)\n self.name = 'vs2017'\n self.platform_toolset = 'v141'\n self.vs_version = '2017'\n # WindowsSDKVersion should be set by command prompt.\n self.windows_target_platform_version = os.getenv('WindowsSDKVersion', None)\n",
"path": "mesonbuild/backend/vs2017backend.py"
}
] | [
{
"content": "# Copyright 2014-2016 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom .vs2010backend import Vs2010Backend\n\n\nclass Vs2017Backend(Vs2010Backend):\n def __init__(self, build):\n super().__init__(build)\n self.name = 'vs2017'\n self.platform_toolset = 'v141'\n self.vs_version = '2017'\n # WindowsSDKVersion should be set by command prompt.\n self.windows_target_platform_version = os.getenv('WindowsSDKVersion', None).rstrip('\\\\')\n",
"path": "mesonbuild/backend/vs2017backend.py"
}
] | diff --git a/authors.txt b/authors.txt
index 64792a20cc0c..0c575e723f0d 100644
--- a/authors.txt
+++ b/authors.txt
@@ -72,3 +72,4 @@ Aaron Small
Joe Baldino
Peter Harris
Roger Boerdijk
+melak47
diff --git a/mesonbuild/backend/vs2017backend.py b/mesonbuild/backend/vs2017backend.py
index 8301790de2a2..35d56f3c3b32 100644
--- a/mesonbuild/backend/vs2017backend.py
+++ b/mesonbuild/backend/vs2017backend.py
@@ -24,4 +24,4 @@ def __init__(self, build):
self.platform_toolset = 'v141'
self.vs_version = '2017'
# WindowsSDKVersion should be set by command prompt.
- self.windows_target_platform_version = os.getenv('WindowsSDKVersion', None)
+ self.windows_target_platform_version = os.getenv('WindowsSDKVersion', None).rstrip('\\')
|
encode__starlette-195 | Check directory exists when instantiating `StaticFiles`
The `StaticFiles` application should ensure that the directory exists at the point it is instantiated.
(With an optional switch to turn this behavior off)
| [
{
"content": "import os\nimport stat\n\nfrom aiofiles.os import stat as aio_stat\n\nfrom starlette.responses import FileResponse, PlainTextResponse, Response\nfrom starlette.types import ASGIInstance, Receive, Scope, Send\n\n\nclass StaticFiles:\n def __init__(self, *, directory: str) -> None:\n self.directory = directory\n self.config_checked = False\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n assert scope[\"type\"] == \"http\"\n if scope[\"method\"] not in (\"GET\", \"HEAD\"):\n return PlainTextResponse(\"Method Not Allowed\", status_code=405)\n path = os.path.normpath(os.path.join(*scope[\"path\"].split(\"/\")))\n if path.startswith(\"..\"):\n return PlainTextResponse(\"Not Found\", status_code=404)\n path = os.path.join(self.directory, path)\n if self.config_checked:\n check_directory = None\n else:\n check_directory = self.directory\n self.config_checked = True\n return _StaticFilesResponder(scope, path=path, check_directory=check_directory)\n\n\nclass _StaticFilesResponder:\n def __init__(self, scope: Scope, path: str, check_directory: str = None) -> None:\n self.scope = scope\n self.path = path\n self.check_directory = check_directory\n\n async def check_directory_configured_correctly(self) -> None:\n \"\"\"\n Perform a one-off configuration check that StaticFiles is actually\n pointed at a directory, so that we can raise loud errors rather than\n just returning 404 responses.\n \"\"\"\n directory = self.check_directory\n try:\n stat_result = await aio_stat(directory)\n except FileNotFoundError:\n raise RuntimeError(\"StaticFiles directory '%s' does not exist.\" % directory)\n if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):\n raise RuntimeError(\"StaticFiles path '%s' is not a directory.\" % directory)\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n if self.check_directory is not None:\n await self.check_directory_configured_correctly()\n\n try:\n stat_result = await aio_stat(self.path)\n except FileNotFoundError:\n response = PlainTextResponse(\"Not Found\", status_code=404) # type: Response\n else:\n mode = stat_result.st_mode\n if not stat.S_ISREG(mode):\n response = PlainTextResponse(\"Not Found\", status_code=404)\n else:\n response = FileResponse(self.path, stat_result=stat_result)\n\n await response(receive, send)\n",
"path": "starlette/staticfiles.py"
}
] | [
{
"content": "import os\nimport stat\n\nfrom aiofiles.os import stat as aio_stat\n\nfrom starlette.responses import FileResponse, PlainTextResponse, Response\nfrom starlette.types import ASGIInstance, Receive, Scope, Send\n\n\nclass StaticFiles:\n def __init__(self, *, directory: str, check_dir: bool = True) -> None:\n if check_dir and not os.path.isdir(directory):\n raise RuntimeError(\"Directory '%s' does not exist\" % directory)\n self.directory = directory\n self.config_checked = False\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n assert scope[\"type\"] == \"http\"\n if scope[\"method\"] not in (\"GET\", \"HEAD\"):\n return PlainTextResponse(\"Method Not Allowed\", status_code=405)\n path = os.path.normpath(os.path.join(*scope[\"path\"].split(\"/\")))\n if path.startswith(\"..\"):\n return PlainTextResponse(\"Not Found\", status_code=404)\n path = os.path.join(self.directory, path)\n if self.config_checked:\n check_directory = None\n else:\n check_directory = self.directory\n self.config_checked = True\n return _StaticFilesResponder(scope, path=path, check_directory=check_directory)\n\n\nclass _StaticFilesResponder:\n def __init__(self, scope: Scope, path: str, check_directory: str = None) -> None:\n self.scope = scope\n self.path = path\n self.check_directory = check_directory\n\n async def check_directory_configured_correctly(self) -> None:\n \"\"\"\n Perform a one-off configuration check that StaticFiles is actually\n pointed at a directory, so that we can raise loud errors rather than\n just returning 404 responses.\n \"\"\"\n directory = self.check_directory\n try:\n stat_result = await aio_stat(directory)\n except FileNotFoundError:\n raise RuntimeError(\"StaticFiles directory '%s' does not exist.\" % directory)\n if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):\n raise RuntimeError(\"StaticFiles path '%s' is not a directory.\" % directory)\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n if self.check_directory is not None:\n await self.check_directory_configured_correctly()\n\n try:\n stat_result = await aio_stat(self.path)\n except FileNotFoundError:\n response = PlainTextResponse(\"Not Found\", status_code=404) # type: Response\n else:\n mode = stat_result.st_mode\n if not stat.S_ISREG(mode):\n response = PlainTextResponse(\"Not Found\", status_code=404)\n else:\n response = FileResponse(self.path, stat_result=stat_result)\n\n await response(receive, send)\n",
"path": "starlette/staticfiles.py"
}
] | diff --git a/docs/staticfiles.md b/docs/staticfiles.md
index 448cc3b92..cc8db5e79 100644
--- a/docs/staticfiles.md
+++ b/docs/staticfiles.md
@@ -1,7 +1,12 @@
-Starlette also includes a `StaticFiles` class for serving a specific directory:
+Starlette also includes a `StaticFiles` class for serving files in a given directory:
-* `StaticFiles(directory)` - Serve any files in the given `directory`.
+### StaticFiles
+
+Signature: `StaticFiles(directory, check_dir=True)`
+
+* `directory` - A string denoting the directory path
+* `check_dir` - Ensure that the directory exists upon instantiation. Defaults to `True`
You can combine this ASGI application with Starlette's routing to provide
comprehensive static file serving.
diff --git a/starlette/staticfiles.py b/starlette/staticfiles.py
index 8a7527a61..ec2418ef7 100644
--- a/starlette/staticfiles.py
+++ b/starlette/staticfiles.py
@@ -8,7 +8,9 @@
class StaticFiles:
- def __init__(self, *, directory: str) -> None:
+ def __init__(self, *, directory: str, check_dir: bool = True) -> None:
+ if check_dir and not os.path.isdir(directory):
+ raise RuntimeError("Directory '%s' does not exist" % directory)
self.directory = directory
self.config_checked = False
diff --git a/tests/test_staticfiles.py b/tests/test_staticfiles.py
index 9d12de894..84f33682d 100644
--- a/tests/test_staticfiles.py
+++ b/tests/test_staticfiles.py
@@ -54,9 +54,16 @@ def test_staticfiles_with_missing_file_returns_404(tmpdir):
assert response.text == "Not Found"
+def test_staticfiles_instantiated_with_missing_directory(tmpdir):
+ with pytest.raises(RuntimeError) as exc:
+ path = os.path.join(tmpdir, "no_such_directory")
+ app = StaticFiles(directory=path)
+ assert "does not exist" in str(exc)
+
+
def test_staticfiles_configured_with_missing_directory(tmpdir):
path = os.path.join(tmpdir, "no_such_directory")
- app = StaticFiles(directory=path)
+ app = StaticFiles(directory=path, check_dir=False)
client = TestClient(app)
with pytest.raises(RuntimeError) as exc:
client.get("/example.txt")
@@ -68,7 +75,7 @@ def test_staticfiles_configured_with_file_instead_of_directory(tmpdir):
with open(path, "w") as file:
file.write("<file content>")
- app = StaticFiles(directory=path)
+ app = StaticFiles(directory=path, check_dir=False)
client = TestClient(app)
with pytest.raises(RuntimeError) as exc:
client.get("/example.txt")
|
netbox-community__netbox-15136 | API call to add VPN tunnel fails: group field is required
### Deployment Type
Self-hosted
### NetBox Version
v3.7.2
### Python Version
3.11
### Steps to Reproduce
```
$ curl -s -i http://netbox-test.lein.io/api/vpn/tunnels/ \
-H "Authorization: Token 176d4c04ccc8f2a549ea6fd393567d9da5a796ff" \
-H "Content-type: application/json" \
-H "Accept: application/json; indent=4" \
-d '{"name":"TestTunnel", "encapsulation":"ipsec-tunnel", "status":"active"}'
```
### Expected Behavior
Tunnel "TestTunnel" is added successfully.
### Observed Behavior
```
HTTP/1.1 400 Bad Request
API-Version: 3.7
...
{
"group": [
"This field is required."
]
}
```
Adding the same tunnel in GUI is successful (using only those three mandatory fields).
### Workaround
Create a tunnel group like "TEMP", then add `"group":1` (where 1 is the group ID) in the create call, and finally edit the resulted tunnel to remove the TEMP group.
| [
{
"content": "from django.contrib.contenttypes.models import ContentType\nfrom drf_spectacular.utils import extend_schema_field\nfrom rest_framework import serializers\n\nfrom ipam.api.nested_serializers import NestedIPAddressSerializer, NestedRouteTargetSerializer\nfrom ipam.models import RouteTarget\nfrom netbox.api.fields import ChoiceField, ContentTypeField, SerializedPKRelatedField\nfrom netbox.api.serializers import NetBoxModelSerializer\nfrom netbox.constants import NESTED_SERIALIZER_PREFIX\nfrom tenancy.api.nested_serializers import NestedTenantSerializer\nfrom utilities.api import get_serializer_for_model\nfrom vpn.choices import *\nfrom vpn.models import *\nfrom .nested_serializers import *\n\n__all__ = (\n 'IKEPolicySerializer',\n 'IKEProposalSerializer',\n 'IPSecPolicySerializer',\n 'IPSecProfileSerializer',\n 'IPSecProposalSerializer',\n 'L2VPNSerializer',\n 'L2VPNTerminationSerializer',\n 'TunnelGroupSerializer',\n 'TunnelSerializer',\n 'TunnelTerminationSerializer',\n)\n\n\nclass TunnelGroupSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='vpn-api:tunnelgroup-detail')\n tunnel_count = serializers.IntegerField(read_only=True)\n\n class Meta:\n model = TunnelGroup\n fields = [\n 'id', 'url', 'display', 'name', 'slug', 'description', 'tags', 'custom_fields', 'created', 'last_updated',\n 'tunnel_count',\n ]\n\n\nclass TunnelSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(\n view_name='vpn-api:tunnel-detail'\n )\n status = ChoiceField(\n choices=TunnelStatusChoices\n )\n group = NestedTunnelGroupSerializer()\n encapsulation = ChoiceField(\n choices=TunnelEncapsulationChoices\n )\n ipsec_profile = NestedIPSecProfileSerializer(\n required=False,\n allow_null=True\n )\n tenant = NestedTenantSerializer(\n required=False,\n allow_null=True\n )\n\n class Meta:\n model = Tunnel\n fields = (\n 'id', 'url', 'display', 'name', 'status', 'group', 'encapsulation', 'ipsec_profile', 'tenant', 'tunnel_id',\n 'description', 'comments', 'tags', 'custom_fields', 'created', 'last_updated',\n )\n\n\nclass TunnelTerminationSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(\n view_name='vpn-api:tunneltermination-detail'\n )\n tunnel = NestedTunnelSerializer()\n role = ChoiceField(\n choices=TunnelTerminationRoleChoices\n )\n termination_type = ContentTypeField(\n queryset=ContentType.objects.all()\n )\n termination = serializers.SerializerMethodField(\n read_only=True\n )\n outside_ip = NestedIPAddressSerializer(\n required=False,\n allow_null=True\n )\n\n class Meta:\n model = TunnelTermination\n fields = (\n 'id', 'url', 'display', 'tunnel', 'role', 'termination_type', 'termination_id', 'termination', 'outside_ip',\n 'tags', 'custom_fields', 'created', 'last_updated',\n )\n\n @extend_schema_field(serializers.JSONField(allow_null=True))\n def get_termination(self, obj):\n serializer = get_serializer_for_model(obj.termination, prefix=NESTED_SERIALIZER_PREFIX)\n context = {'request': self.context['request']}\n return serializer(obj.termination, context=context).data\n\n\nclass IKEProposalSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(\n view_name='vpn-api:ikeproposal-detail'\n )\n authentication_method = ChoiceField(\n choices=AuthenticationMethodChoices\n )\n encryption_algorithm = ChoiceField(\n choices=EncryptionAlgorithmChoices\n )\n authentication_algorithm = ChoiceField(\n choices=AuthenticationAlgorithmChoices\n )\n group = ChoiceField(\n choices=DHGroupChoices\n )\n\n class Meta:\n model = IKEProposal\n fields = (\n 'id', 'url', 'display', 'name', 'description', 'authentication_method', 'encryption_algorithm',\n 'authentication_algorithm', 'group', 'sa_lifetime', 'comments', 'tags', 'custom_fields', 'created',\n 'last_updated',\n )\n\n\nclass IKEPolicySerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(\n view_name='vpn-api:ikepolicy-detail'\n )\n version = ChoiceField(\n choices=IKEVersionChoices\n )\n mode = ChoiceField(\n choices=IKEModeChoices\n )\n proposals = SerializedPKRelatedField(\n queryset=IKEProposal.objects.all(),\n serializer=NestedIKEProposalSerializer,\n required=False,\n many=True\n )\n\n class Meta:\n model = IKEPolicy\n fields = (\n 'id', 'url', 'display', 'name', 'description', 'version', 'mode', 'proposals', 'preshared_key', 'comments',\n 'tags', 'custom_fields', 'created', 'last_updated',\n )\n\n\nclass IPSecProposalSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(\n view_name='vpn-api:ipsecproposal-detail'\n )\n encryption_algorithm = ChoiceField(\n choices=EncryptionAlgorithmChoices\n )\n authentication_algorithm = ChoiceField(\n choices=AuthenticationAlgorithmChoices\n )\n\n class Meta:\n model = IPSecProposal\n fields = (\n 'id', 'url', 'display', 'name', 'description', 'encryption_algorithm', 'authentication_algorithm',\n 'sa_lifetime_seconds', 'sa_lifetime_data', 'comments', 'tags', 'custom_fields', 'created', 'last_updated',\n )\n\n\nclass IPSecPolicySerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(\n view_name='vpn-api:ipsecpolicy-detail'\n )\n proposals = SerializedPKRelatedField(\n queryset=IPSecProposal.objects.all(),\n serializer=NestedIPSecProposalSerializer,\n required=False,\n many=True\n )\n pfs_group = ChoiceField(\n choices=DHGroupChoices,\n required=False\n )\n\n class Meta:\n model = IPSecPolicy\n fields = (\n 'id', 'url', 'display', 'name', 'description', 'proposals', 'pfs_group', 'comments', 'tags',\n 'custom_fields', 'created', 'last_updated',\n )\n\n\nclass IPSecProfileSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(\n view_name='vpn-api:ipsecprofile-detail'\n )\n mode = ChoiceField(\n choices=IPSecModeChoices\n )\n ike_policy = NestedIKEPolicySerializer()\n ipsec_policy = NestedIPSecPolicySerializer()\n\n class Meta:\n model = IPSecProfile\n fields = (\n 'id', 'url', 'display', 'name', 'description', 'mode', 'ike_policy', 'ipsec_policy', 'comments', 'tags',\n 'custom_fields', 'created', 'last_updated',\n )\n\n\n#\n# L2VPN\n#\n\nclass L2VPNSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='vpn-api:l2vpn-detail')\n type = ChoiceField(choices=L2VPNTypeChoices, required=False)\n import_targets = SerializedPKRelatedField(\n queryset=RouteTarget.objects.all(),\n serializer=NestedRouteTargetSerializer,\n required=False,\n many=True\n )\n export_targets = SerializedPKRelatedField(\n queryset=RouteTarget.objects.all(),\n serializer=NestedRouteTargetSerializer,\n required=False,\n many=True\n )\n tenant = NestedTenantSerializer(required=False, allow_null=True)\n\n class Meta:\n model = L2VPN\n fields = [\n 'id', 'url', 'display', 'identifier', 'name', 'slug', 'type', 'import_targets', 'export_targets',\n 'description', 'comments', 'tenant', 'tags', 'custom_fields', 'created', 'last_updated'\n ]\n\n\nclass L2VPNTerminationSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='vpn-api:l2vpntermination-detail')\n l2vpn = NestedL2VPNSerializer()\n assigned_object_type = ContentTypeField(\n queryset=ContentType.objects.all()\n )\n assigned_object = serializers.SerializerMethodField(read_only=True)\n\n class Meta:\n model = L2VPNTermination\n fields = [\n 'id', 'url', 'display', 'l2vpn', 'assigned_object_type', 'assigned_object_id',\n 'assigned_object', 'tags', 'custom_fields', 'created', 'last_updated'\n ]\n\n @extend_schema_field(serializers.JSONField(allow_null=True))\n def get_assigned_object(self, instance):\n serializer = get_serializer_for_model(instance.assigned_object, prefix=NESTED_SERIALIZER_PREFIX)\n context = {'request': self.context['request']}\n return serializer(instance.assigned_object, context=context).data\n",
"path": "netbox/vpn/api/serializers.py"
}
] | [
{
"content": "from django.contrib.contenttypes.models import ContentType\nfrom drf_spectacular.utils import extend_schema_field\nfrom rest_framework import serializers\n\nfrom ipam.api.nested_serializers import NestedIPAddressSerializer, NestedRouteTargetSerializer\nfrom ipam.models import RouteTarget\nfrom netbox.api.fields import ChoiceField, ContentTypeField, SerializedPKRelatedField\nfrom netbox.api.serializers import NetBoxModelSerializer\nfrom netbox.constants import NESTED_SERIALIZER_PREFIX\nfrom tenancy.api.nested_serializers import NestedTenantSerializer\nfrom utilities.api import get_serializer_for_model\nfrom vpn.choices import *\nfrom vpn.models import *\nfrom .nested_serializers import *\n\n__all__ = (\n 'IKEPolicySerializer',\n 'IKEProposalSerializer',\n 'IPSecPolicySerializer',\n 'IPSecProfileSerializer',\n 'IPSecProposalSerializer',\n 'L2VPNSerializer',\n 'L2VPNTerminationSerializer',\n 'TunnelGroupSerializer',\n 'TunnelSerializer',\n 'TunnelTerminationSerializer',\n)\n\n\nclass TunnelGroupSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='vpn-api:tunnelgroup-detail')\n tunnel_count = serializers.IntegerField(read_only=True)\n\n class Meta:\n model = TunnelGroup\n fields = [\n 'id', 'url', 'display', 'name', 'slug', 'description', 'tags', 'custom_fields', 'created', 'last_updated',\n 'tunnel_count',\n ]\n\n\nclass TunnelSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(\n view_name='vpn-api:tunnel-detail'\n )\n status = ChoiceField(\n choices=TunnelStatusChoices\n )\n group = NestedTunnelGroupSerializer(\n required=False,\n allow_null=True\n )\n encapsulation = ChoiceField(\n choices=TunnelEncapsulationChoices\n )\n ipsec_profile = NestedIPSecProfileSerializer(\n required=False,\n allow_null=True\n )\n tenant = NestedTenantSerializer(\n required=False,\n allow_null=True\n )\n\n class Meta:\n model = Tunnel\n fields = (\n 'id', 'url', 'display', 'name', 'status', 'group', 'encapsulation', 'ipsec_profile', 'tenant', 'tunnel_id',\n 'description', 'comments', 'tags', 'custom_fields', 'created', 'last_updated',\n )\n\n\nclass TunnelTerminationSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(\n view_name='vpn-api:tunneltermination-detail'\n )\n tunnel = NestedTunnelSerializer()\n role = ChoiceField(\n choices=TunnelTerminationRoleChoices\n )\n termination_type = ContentTypeField(\n queryset=ContentType.objects.all()\n )\n termination = serializers.SerializerMethodField(\n read_only=True\n )\n outside_ip = NestedIPAddressSerializer(\n required=False,\n allow_null=True\n )\n\n class Meta:\n model = TunnelTermination\n fields = (\n 'id', 'url', 'display', 'tunnel', 'role', 'termination_type', 'termination_id', 'termination', 'outside_ip',\n 'tags', 'custom_fields', 'created', 'last_updated',\n )\n\n @extend_schema_field(serializers.JSONField(allow_null=True))\n def get_termination(self, obj):\n serializer = get_serializer_for_model(obj.termination, prefix=NESTED_SERIALIZER_PREFIX)\n context = {'request': self.context['request']}\n return serializer(obj.termination, context=context).data\n\n\nclass IKEProposalSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(\n view_name='vpn-api:ikeproposal-detail'\n )\n authentication_method = ChoiceField(\n choices=AuthenticationMethodChoices\n )\n encryption_algorithm = ChoiceField(\n choices=EncryptionAlgorithmChoices\n )\n authentication_algorithm = ChoiceField(\n choices=AuthenticationAlgorithmChoices\n )\n group = ChoiceField(\n choices=DHGroupChoices\n )\n\n class Meta:\n model = IKEProposal\n fields = (\n 'id', 'url', 'display', 'name', 'description', 'authentication_method', 'encryption_algorithm',\n 'authentication_algorithm', 'group', 'sa_lifetime', 'comments', 'tags', 'custom_fields', 'created',\n 'last_updated',\n )\n\n\nclass IKEPolicySerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(\n view_name='vpn-api:ikepolicy-detail'\n )\n version = ChoiceField(\n choices=IKEVersionChoices\n )\n mode = ChoiceField(\n choices=IKEModeChoices\n )\n proposals = SerializedPKRelatedField(\n queryset=IKEProposal.objects.all(),\n serializer=NestedIKEProposalSerializer,\n required=False,\n many=True\n )\n\n class Meta:\n model = IKEPolicy\n fields = (\n 'id', 'url', 'display', 'name', 'description', 'version', 'mode', 'proposals', 'preshared_key', 'comments',\n 'tags', 'custom_fields', 'created', 'last_updated',\n )\n\n\nclass IPSecProposalSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(\n view_name='vpn-api:ipsecproposal-detail'\n )\n encryption_algorithm = ChoiceField(\n choices=EncryptionAlgorithmChoices\n )\n authentication_algorithm = ChoiceField(\n choices=AuthenticationAlgorithmChoices\n )\n\n class Meta:\n model = IPSecProposal\n fields = (\n 'id', 'url', 'display', 'name', 'description', 'encryption_algorithm', 'authentication_algorithm',\n 'sa_lifetime_seconds', 'sa_lifetime_data', 'comments', 'tags', 'custom_fields', 'created', 'last_updated',\n )\n\n\nclass IPSecPolicySerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(\n view_name='vpn-api:ipsecpolicy-detail'\n )\n proposals = SerializedPKRelatedField(\n queryset=IPSecProposal.objects.all(),\n serializer=NestedIPSecProposalSerializer,\n required=False,\n many=True\n )\n pfs_group = ChoiceField(\n choices=DHGroupChoices,\n required=False\n )\n\n class Meta:\n model = IPSecPolicy\n fields = (\n 'id', 'url', 'display', 'name', 'description', 'proposals', 'pfs_group', 'comments', 'tags',\n 'custom_fields', 'created', 'last_updated',\n )\n\n\nclass IPSecProfileSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(\n view_name='vpn-api:ipsecprofile-detail'\n )\n mode = ChoiceField(\n choices=IPSecModeChoices\n )\n ike_policy = NestedIKEPolicySerializer()\n ipsec_policy = NestedIPSecPolicySerializer()\n\n class Meta:\n model = IPSecProfile\n fields = (\n 'id', 'url', 'display', 'name', 'description', 'mode', 'ike_policy', 'ipsec_policy', 'comments', 'tags',\n 'custom_fields', 'created', 'last_updated',\n )\n\n\n#\n# L2VPN\n#\n\nclass L2VPNSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='vpn-api:l2vpn-detail')\n type = ChoiceField(choices=L2VPNTypeChoices, required=False)\n import_targets = SerializedPKRelatedField(\n queryset=RouteTarget.objects.all(),\n serializer=NestedRouteTargetSerializer,\n required=False,\n many=True\n )\n export_targets = SerializedPKRelatedField(\n queryset=RouteTarget.objects.all(),\n serializer=NestedRouteTargetSerializer,\n required=False,\n many=True\n )\n tenant = NestedTenantSerializer(required=False, allow_null=True)\n\n class Meta:\n model = L2VPN\n fields = [\n 'id', 'url', 'display', 'identifier', 'name', 'slug', 'type', 'import_targets', 'export_targets',\n 'description', 'comments', 'tenant', 'tags', 'custom_fields', 'created', 'last_updated'\n ]\n\n\nclass L2VPNTerminationSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='vpn-api:l2vpntermination-detail')\n l2vpn = NestedL2VPNSerializer()\n assigned_object_type = ContentTypeField(\n queryset=ContentType.objects.all()\n )\n assigned_object = serializers.SerializerMethodField(read_only=True)\n\n class Meta:\n model = L2VPNTermination\n fields = [\n 'id', 'url', 'display', 'l2vpn', 'assigned_object_type', 'assigned_object_id',\n 'assigned_object', 'tags', 'custom_fields', 'created', 'last_updated'\n ]\n\n @extend_schema_field(serializers.JSONField(allow_null=True))\n def get_assigned_object(self, instance):\n serializer = get_serializer_for_model(instance.assigned_object, prefix=NESTED_SERIALIZER_PREFIX)\n context = {'request': self.context['request']}\n return serializer(instance.assigned_object, context=context).data\n",
"path": "netbox/vpn/api/serializers.py"
}
] | diff --git a/netbox/vpn/api/serializers.py b/netbox/vpn/api/serializers.py
index dedcbfbf5f7..5f6fcd5f771 100644
--- a/netbox/vpn/api/serializers.py
+++ b/netbox/vpn/api/serializers.py
@@ -46,7 +46,10 @@ class TunnelSerializer(NetBoxModelSerializer):
status = ChoiceField(
choices=TunnelStatusChoices
)
- group = NestedTunnelGroupSerializer()
+ group = NestedTunnelGroupSerializer(
+ required=False,
+ allow_null=True
+ )
encapsulation = ChoiceField(
choices=TunnelEncapsulationChoices
)
diff --git a/netbox/vpn/tests/test_api.py b/netbox/vpn/tests/test_api.py
index eb0520c8bff..64c175fe5bb 100644
--- a/netbox/vpn/tests/test_api.py
+++ b/netbox/vpn/tests/test_api.py
@@ -105,7 +105,6 @@ def setUpTestData(cls):
{
'name': 'Tunnel 6',
'status': TunnelStatusChoices.STATUS_DISABLED,
- 'group': tunnel_groups[1].pk,
'encapsulation': TunnelEncapsulationChoices.ENCAP_GRE,
},
]
|
OpenCTI-Platform__connectors-214 | [cybercrime-tracker] connector fails due to unchecked None
## Description
Cybercrime tracker connector fails while determining if the last run interval has been exceeded.
## Environment
1. OS (where OpenCTI server runs): Debian Buster 10.7
2. OpenCTI version: 4.0.x
3. OpenCTI client: pycti
4. Other environment details:
## Reproducible Steps
Steps to create the smallest reproducible scenario:
1. Setup the connector in accordance its the README (docker)
2. `docker-compose up`
3. See "ERROR" logs
```
Attaching to cybercrime-tracker_connector-cybercrimetracker_1
connector-cybercrimetracker_1 | INFO:root:Listing Threat-Actors with filters null.
connector-cybercrimetracker_1 | INFO:root:Connector registered with ID:cybercrime
connector-cybercrimetracker_1 | INFO:root:Starting ping alive thread
connector-cybercrimetracker_1 | INFO:root:Fetching data CYBERCRIME-TRACKER.NET...
connector-cybercrimetracker_1 | INFO:root:Listing Marking-Definitions with filters [{"key": "definition", "values": "TLP:WHITE"}].
connector-cybercrimetracker_1 | INFO:root:Connector last run: 2020-12-21 05:57:36
connector-cybercrimetracker_1 | ERROR:root:'>' not supported between instances of 'int' and 'NoneType'
```
This error seems to occur when [determining if it's time to run](https://github.com/OpenCTI-Platform/connectors/blob/master/cybercrime-tracker/src/cybercrime-tracker.py#L163).
## Expected Output
I expected the Data page to display the number of messages queued from this source.
## Actual Output
0 messages queued
| [
{
"content": "import os\nimport yaml\nimport time\nimport feedparser\nimport stix2\nimport datetime\n\nfrom pycti import OpenCTIConnectorHelper, get_config_variable\nfrom pycti.utils.opencti_stix2_utils import OpenCTIStix2Utils, SimpleObservable\nfrom pygrok import Grok\nfrom urllib.parse import urlparse, quote\n\n\nclass Cybercrimetracker:\n def __init__(self):\n # Instantiate the connector helper from config\n config_file_path = \"{}/config.yml\".format(\n os.path.dirname(os.path.abspath(__file__))\n )\n\n config = (\n yaml.load(open(config_file_path), Loader=yaml.FullLoader)\n if os.path.isfile(config_file_path)\n else {}\n )\n self.helper = OpenCTIConnectorHelper(config)\n\n # Connector Config\n self.confidence_level = get_config_variable(\n \"CONNECTOR_CONFIDENCE_LEVEL\",\n [\"connector\", \"confidence_level\"],\n config,\n isNumber=True,\n )\n self.update_existing_data = get_config_variable(\n \"CONNECTOR_UPDATE_EXISTING_DATA\",\n [\"connector\", \"update_existing_data\"],\n config,\n )\n\n # CYBERCRIME-TRACKER.NET Config\n self.feed_url = get_config_variable(\n \"CYBERCRIMET_RACKER_FEED_URL\", [\"cybercrime-tracker\", \"feed_url\"], config\n )\n self.connector_tlp = get_config_variable(\n \"CYBERCRIME_TRACKER_TLP\", [\"cybercrime-tracker\", \"tlp\"], config\n )\n self.create_indicators = get_config_variable(\n \"CYBERCRIME_TRACKER_CREATE_INDICATORS\",\n [\"cybercrime-tracker\", \"create_indicators\"],\n config,\n )\n self.create_observables = get_config_variable(\n \"CYBERCRIME_TRACKER_CREATE_OBSERVABLES\",\n [\"cybercrime-tracker\", \"create_observables\"],\n config,\n )\n self.interval = get_config_variable(\n \"CYBERCRIMETRACKER_INTERVAL\",\n [\"cybercrime-tracker\", \"interval\"],\n config,\n isNumber=True,\n )\n\n @staticmethod\n def _time_to_datetime(input_date: time) -> datetime.datetime:\n return datetime.datetime(\n input_date.tm_year,\n input_date.tm_mon,\n input_date.tm_mday,\n input_date.tm_hour,\n input_date.tm_min,\n input_date.tm_sec,\n tzinfo=datetime.timezone.utc,\n )\n\n def parse_feed_entry(self, entry):\n \"\"\"\n Parses an entry from the feed and returns a dict with:\n\n date: date in iso format\n type: name of the malware associated with the C2 server\n url: the url of the C2\n ip: the IP address of the C2\n ext_link: An external link to CYBERCRIME-TRACKER.NET with details\n\n Note: CYBERCRIME-TRACKER.NET does not provide the protocol in the url\n as such we always assume 'http'.\n \"\"\"\n parsed_entry = {}\n\n pattern = (\n r\"(?:\\[%{GREEDYDATA:cwhqid}\\]\\s+Type:\\s+%{GREEDYDATA:type}\"\n + r\"\\s+-%{GREEDYDATA}:\\s+%{IP:ip}|\"\n + r\"\\[%{GREEDYDATA:cwhqid}\\]\\s+Type:\\s+%{GREEDYDATA:type})\"\n )\n\n entry_summary = Grok(pattern).match(entry[\"summary\"])\n\n if entry_summary:\n parsed_entry[\"date\"] = self._time_to_datetime(entry[\"published_parsed\"])\n parsed_entry[\"type\"] = entry_summary[\"type\"]\n parsed_entry[\"ext_link\"] = entry[\"link\"]\n parsed_entry[\"url\"] = \"http://{}\".format(quote(entry[\"title\"]))\n hostname = urlparse(parsed_entry[\"url\"]).hostname\n\n if entry_summary[\"ip\"] is None:\n parsed_entry[\"ip\"] = hostname\n else:\n parsed_entry[\"ip\"] = entry_summary[\"ip\"]\n parsed_entry[\"domain\"] = hostname\n\n self.helper.log_info(\"Parsed entry: {}\".format(entry[\"title\"]))\n\n return parsed_entry\n else:\n self.helper.log_error(\"Could not parse: {}\".format(entry[\"title\"]))\n return False\n\n def gen_indicator_pattern(self, parsed_entry):\n\n if \"domain\" in parsed_entry.keys():\n indicator_pattern = (\n \"[ipv4-addr:value='{}'] \".format(parsed_entry[\"ip\"])\n + \"AND [url:value='{}'] \".format(parsed_entry[\"url\"])\n + \"AND [domain-name:value='{}']\".format(parsed_entry[\"domain\"])\n )\n else:\n indicator_pattern = \"[ipv4-addr:value='{}'] \".format(\n parsed_entry[\"ip\"]\n ) + \"AND [url:value='{}']\".format(parsed_entry[\"url\"])\n\n return indicator_pattern\n\n def run(self):\n self.helper.log_info(\"Fetching data CYBERCRIME-TRACKER.NET...\")\n tlp = self.helper.api.marking_definition.read(\n filters=[\n {\"key\": \"definition\", \"values\": \"TLP:{}\".format(self.connector_tlp)}\n ]\n )\n while True:\n try:\n # Get the current timestamp and check\n timestamp = int(time.time())\n current_state = self.helper.get_state()\n\n if current_state is not None and \"last_run\" in current_state:\n last_run = current_state[\"last_run\"]\n self.helper.log_info(\n \"Connector last run: {}\".format(\n datetime.datetime.utcfromtimestamp(last_run).strftime(\n \"%Y-%m-%d %H:%M:%S\"\n )\n )\n )\n else:\n last_run = None\n self.helper.log_info(\"Connector has never run\")\n\n # Run if it is the first time or we are past the interval\n\n if last_run is None or ((timestamp - last_run) > self.interval):\n self.helper.log_info(\"Connector will run!\")\n now = datetime.datetime.utcfromtimestamp(timestamp)\n friendly_name = \"Cybercrime-Tracker run @ \" + now.strftime(\n \"%Y-%m-%d %H:%M:%S\"\n )\n work_id = self.helper.api.work.initiate_work(\n self.helper.connect_id, friendly_name\n )\n\n # Get Feed Content\n feed = feedparser.parse(self.feed_url)\n\n self.helper.log_info(\n \"Found: {} entries.\".format(len(feed[\"entries\"]))\n )\n\n self.feed_summary = {\n \"Source\": feed[\"feed\"][\"title\"],\n \"Date\": self._time_to_datetime(\n feed[\"feed\"][\"published_parsed\"]\n ),\n \"Details\": feed[\"feed\"][\"subtitle\"],\n \"Link\": feed[\"feed\"][\"link\"],\n }\n\n # Create the bundle\n bundle_objects = list()\n\n organization = stix2.Identity(\n id=OpenCTIStix2Utils.generate_random_stix_id(\"identity\"),\n name=\"CYBERCRIME-TRACKER.NET\",\n identity_class=\"organization\",\n description=\"Tracker collecting and sharing daily updates of C2 IPs/Urls. http://cybercrime-tracker.net\",\n )\n bundle_objects.append(organization)\n for entry in feed[\"entries\"]:\n parsed_entry = self.parse_feed_entry(entry)\n external_reference = stix2.ExternalReference(\n source_name=\"{}\".format(self.feed_summary[\"Source\"]),\n url=parsed_entry[\"ext_link\"],\n )\n indicator_pattern = self.gen_indicator_pattern(parsed_entry)\n malware = stix2.Malware(\n id=OpenCTIStix2Utils.generate_random_stix_id(\"malware\"),\n is_family=True,\n name=parsed_entry[\"type\"],\n description=\"{} malware.\".format(parsed_entry[\"type\"]),\n )\n bundle_objects.append(malware)\n indicator = None\n if self.create_indicators:\n indicator = stix2.Indicator(\n id=OpenCTIStix2Utils.generate_random_stix_id(\n \"indicator\"\n ),\n name=parsed_entry[\"url\"],\n description=\"C2 URL for: {}\".format(\n parsed_entry[\"type\"]\n ),\n labels=[\"C2 Server\"],\n pattern_type=\"stix\",\n pattern=indicator_pattern,\n valid_from=parsed_entry[\"date\"],\n created=parsed_entry[\"date\"],\n modified=parsed_entry[\"date\"],\n created_by_ref=organization.id,\n object_marking_refs=[tlp[\"standard_id\"]],\n external_references=[external_reference],\n custom_properties={\n \"x_opencti_main_observable_type\": \"Url\"\n },\n )\n bundle_objects.append(indicator)\n relation = stix2.Relationship(\n id=OpenCTIStix2Utils.generate_random_stix_id(\n \"relationship\"\n ),\n source_ref=indicator.id,\n target_ref=malware.id,\n relationship_type=\"indicates\",\n start_time=self._time_to_datetime(\n entry[\"published_parsed\"]\n ),\n stop_time=self._time_to_datetime(\n entry[\"published_parsed\"]\n )\n + datetime.timedelta(0, 3),\n description=\"URLs associated to: \"\n + parsed_entry[\"type\"],\n confidence=self.confidence_level,\n created_by_ref=organization.id,\n object_marking_refs=[tlp[\"standard_id\"]],\n created=parsed_entry[\"date\"],\n modified=parsed_entry[\"date\"],\n external_references=[external_reference],\n )\n bundle_objects.append(relation)\n if self.create_observables:\n observable_url = SimpleObservable(\n id=OpenCTIStix2Utils.generate_random_stix_id(\n \"x-opencti-simple-observable\"\n ),\n key=\"Url.value\",\n labels=[\"C2 Server\"],\n value=parsed_entry[\"url\"],\n created_by_ref=organization.id,\n object_marking_refs=[tlp[\"standard_id\"]],\n external_references=[external_reference],\n )\n bundle_objects.append(observable_url)\n observable_ip = SimpleObservable(\n id=OpenCTIStix2Utils.generate_random_stix_id(\n \"x-opencti-simple-observable\"\n ),\n key=\"IPv4-Addr.value\",\n labels=[\"C2 Server\"],\n value=parsed_entry[\"ip\"],\n created_by_ref=organization.id,\n object_marking_refs=[tlp[\"standard_id\"]],\n external_references=[external_reference],\n )\n bundle_objects.append(observable_ip)\n observable_domain = None\n if \"domain\" in parsed_entry.keys():\n observable_domain = SimpleObservable(\n id=OpenCTIStix2Utils.generate_random_stix_id(\n \"x-opencti-simple-observable\"\n ),\n key=\"Domain-Name.value\",\n labels=[\"C2 Server\"],\n value=parsed_entry[\"domain\"],\n created_by_ref=organization.id,\n object_marking_refs=[tlp[\"standard_id\"]],\n external_references=[external_reference],\n )\n bundle_objects.append(observable_domain)\n\n if indicator is not None:\n relationship_1 = stix2.Relationship(\n id=OpenCTIStix2Utils.generate_random_stix_id(\n \"relationship\"\n ),\n relationship_type=\"based-on\",\n created_by_ref=organization.id,\n source_ref=indicator.id,\n target_ref=observable_url.id,\n )\n bundle_objects.append(relationship_1)\n relationship_2 = stix2.Relationship(\n id=OpenCTIStix2Utils.generate_random_stix_id(\n \"relationship\"\n ),\n relationship_type=\"based-on\",\n created_by_ref=organization.id,\n source_ref=indicator.id,\n target_ref=observable_ip.id,\n )\n bundle_objects.append(relationship_2)\n if observable_domain is not None:\n relationship_3 = stix2.Relationship(\n id=OpenCTIStix2Utils.generate_random_stix_id(\n \"relationship\"\n ),\n relationship_type=\"based-on\",\n created_by_ref=organization.id,\n source_ref=indicator.id,\n target_ref=observable_domain.id,\n )\n bundle_objects.append(relationship_3)\n\n # create stix bundle\n bundle = stix2.Bundle(objects=bundle_objects)\n # send data\n self.helper.send_stix2_bundle(\n bundle=bundle.serialize(),\n update=self.update_existing_data,\n work_id=work_id,\n )\n\n # Store the current timestamp as a last run\n message = (\n \"Connector successfully run, storing last_run as: {}\".format(\n str(timestamp)\n )\n )\n self.helper.log_info(message)\n self.helper.set_state({\"last_run\": timestamp})\n self.helper.api.work.to_processed(work_id, message)\n self.helper.log_info(\n \"Last_run stored, next run in: {} seconds.\".format(\n str(round(self.interval, 2))\n )\n )\n time.sleep(60)\n else:\n new_interval = self.interval - (timestamp - last_run)\n self.helper.log_info(\n \"Connector will not run. \\\n Next run in: {} seconds.\".format(\n str(round(new_interval, 2))\n )\n )\n time.sleep(60)\n\n except (KeyboardInterrupt, SystemExit):\n self.helper.log_info(\"Connector stop\")\n exit(0)\n except Exception as e:\n self.helper.log_error(str(e))\n time.sleep(60)\n\n\nif __name__ == \"__main__\":\n try:\n cybercrimetrackerConnector = Cybercrimetracker()\n cybercrimetrackerConnector.run()\n except Exception as e:\n print(e)\n time.sleep(10)\n exit(0)\n",
"path": "cybercrime-tracker/src/cybercrime-tracker.py"
}
] | [
{
"content": "import os\nimport yaml\nimport time\nimport feedparser\nimport stix2\nimport datetime\n\nfrom pycti import OpenCTIConnectorHelper, get_config_variable\nfrom pycti.utils.opencti_stix2_utils import OpenCTIStix2Utils, SimpleObservable\nfrom pygrok import Grok\nfrom urllib.parse import urlparse, quote\n\n\nclass Cybercrimetracker:\n def __init__(self):\n # Instantiate the connector helper from config\n config_file_path = \"{}/config.yml\".format(\n os.path.dirname(os.path.abspath(__file__))\n )\n\n config = (\n yaml.load(open(config_file_path), Loader=yaml.FullLoader)\n if os.path.isfile(config_file_path)\n else {}\n )\n self.helper = OpenCTIConnectorHelper(config)\n\n # Connector Config\n self.confidence_level = get_config_variable(\n \"CONNECTOR_CONFIDENCE_LEVEL\",\n [\"connector\", \"confidence_level\"],\n config,\n isNumber=True,\n )\n self.update_existing_data = get_config_variable(\n \"CONNECTOR_UPDATE_EXISTING_DATA\",\n [\"connector\", \"update_existing_data\"],\n config,\n )\n\n # CYBERCRIME-TRACKER.NET Config\n self.feed_url = get_config_variable(\n \"CYBERCRIMET_RACKER_FEED_URL\", [\"cybercrime-tracker\", \"feed_url\"], config\n )\n self.connector_tlp = get_config_variable(\n \"CYBERCRIME_TRACKER_TLP\", [\"cybercrime-tracker\", \"tlp\"], config\n )\n self.create_indicators = get_config_variable(\n \"CYBERCRIME_TRACKER_CREATE_INDICATORS\",\n [\"cybercrime-tracker\", \"create_indicators\"],\n config,\n )\n self.create_observables = get_config_variable(\n \"CYBERCRIME_TRACKER_CREATE_OBSERVABLES\",\n [\"cybercrime-tracker\", \"create_observables\"],\n config,\n )\n self.interval = get_config_variable(\n \"CYBERCRIME_TRACKER_INTERVAL\",\n [\"cybercrime-tracker\", \"interval\"],\n config,\n isNumber=True,\n )\n\n @staticmethod\n def _time_to_datetime(input_date: time) -> datetime.datetime:\n return datetime.datetime(\n input_date.tm_year,\n input_date.tm_mon,\n input_date.tm_mday,\n input_date.tm_hour,\n input_date.tm_min,\n input_date.tm_sec,\n tzinfo=datetime.timezone.utc,\n )\n\n def parse_feed_entry(self, entry):\n \"\"\"\n Parses an entry from the feed and returns a dict with:\n\n date: date in iso format\n type: name of the malware associated with the C2 server\n url: the url of the C2\n ip: the IP address of the C2\n ext_link: An external link to CYBERCRIME-TRACKER.NET with details\n\n Note: CYBERCRIME-TRACKER.NET does not provide the protocol in the url\n as such we always assume 'http'.\n \"\"\"\n parsed_entry = {}\n\n pattern = (\n r\"(?:\\[%{GREEDYDATA:cwhqid}\\]\\s+Type:\\s+%{GREEDYDATA:type}\"\n + r\"\\s+-%{GREEDYDATA}:\\s+%{IP:ip}|\"\n + r\"\\[%{GREEDYDATA:cwhqid}\\]\\s+Type:\\s+%{GREEDYDATA:type})\"\n )\n\n entry_summary = Grok(pattern).match(entry[\"summary\"])\n\n if entry_summary:\n parsed_entry[\"date\"] = self._time_to_datetime(entry[\"published_parsed\"])\n parsed_entry[\"type\"] = entry_summary[\"type\"]\n parsed_entry[\"ext_link\"] = entry[\"link\"]\n parsed_entry[\"url\"] = \"http://{}\".format(quote(entry[\"title\"]))\n hostname = urlparse(parsed_entry[\"url\"]).hostname\n\n if entry_summary[\"ip\"] is None:\n parsed_entry[\"ip\"] = hostname\n else:\n parsed_entry[\"ip\"] = entry_summary[\"ip\"]\n parsed_entry[\"domain\"] = hostname\n\n self.helper.log_info(\"Parsed entry: {}\".format(entry[\"title\"]))\n\n return parsed_entry\n else:\n self.helper.log_error(\"Could not parse: {}\".format(entry[\"title\"]))\n return False\n\n def gen_indicator_pattern(self, parsed_entry):\n\n if \"domain\" in parsed_entry.keys():\n indicator_pattern = (\n \"[ipv4-addr:value='{}'] \".format(parsed_entry[\"ip\"])\n + \"AND [url:value='{}'] \".format(parsed_entry[\"url\"])\n + \"AND [domain-name:value='{}']\".format(parsed_entry[\"domain\"])\n )\n else:\n indicator_pattern = \"[ipv4-addr:value='{}'] \".format(\n parsed_entry[\"ip\"]\n ) + \"AND [url:value='{}']\".format(parsed_entry[\"url\"])\n\n return indicator_pattern\n\n def run(self):\n self.helper.log_info(\"Fetching data CYBERCRIME-TRACKER.NET...\")\n tlp = self.helper.api.marking_definition.read(\n filters=[\n {\"key\": \"definition\", \"values\": \"TLP:{}\".format(self.connector_tlp)}\n ]\n )\n while True:\n try:\n # Get the current timestamp and check\n timestamp = int(time.time())\n current_state = self.helper.get_state()\n\n if current_state is not None and \"last_run\" in current_state:\n last_run = current_state[\"last_run\"]\n self.helper.log_info(\n \"Connector last run: {}\".format(\n datetime.datetime.utcfromtimestamp(last_run).strftime(\n \"%Y-%m-%d %H:%M:%S\"\n )\n )\n )\n else:\n last_run = None\n self.helper.log_info(\"Connector has never run\")\n\n # Run if it is the first time or we are past the interval\n\n if last_run is None or ((timestamp - last_run) > self.interval):\n self.helper.log_info(\"Connector will run!\")\n now = datetime.datetime.utcfromtimestamp(timestamp)\n friendly_name = \"Cybercrime-Tracker run @ \" + now.strftime(\n \"%Y-%m-%d %H:%M:%S\"\n )\n work_id = self.helper.api.work.initiate_work(\n self.helper.connect_id, friendly_name\n )\n\n # Get Feed Content\n feed = feedparser.parse(self.feed_url)\n\n self.helper.log_info(\n \"Found: {} entries.\".format(len(feed[\"entries\"]))\n )\n\n self.feed_summary = {\n \"Source\": feed[\"feed\"][\"title\"],\n \"Date\": self._time_to_datetime(\n feed[\"feed\"][\"published_parsed\"]\n ),\n \"Details\": feed[\"feed\"][\"subtitle\"],\n \"Link\": feed[\"feed\"][\"link\"],\n }\n\n # Create the bundle\n bundle_objects = list()\n\n organization = stix2.Identity(\n id=OpenCTIStix2Utils.generate_random_stix_id(\"identity\"),\n name=\"CYBERCRIME-TRACKER.NET\",\n identity_class=\"organization\",\n description=\"Tracker collecting and sharing daily updates of C2 IPs/Urls. http://cybercrime-tracker.net\",\n )\n bundle_objects.append(organization)\n for entry in feed[\"entries\"]:\n parsed_entry = self.parse_feed_entry(entry)\n external_reference = stix2.ExternalReference(\n source_name=\"{}\".format(self.feed_summary[\"Source\"]),\n url=parsed_entry[\"ext_link\"],\n )\n indicator_pattern = self.gen_indicator_pattern(parsed_entry)\n malware = stix2.Malware(\n id=OpenCTIStix2Utils.generate_random_stix_id(\"malware\"),\n is_family=True,\n name=parsed_entry[\"type\"],\n description=\"{} malware.\".format(parsed_entry[\"type\"]),\n )\n bundle_objects.append(malware)\n indicator = None\n if self.create_indicators:\n indicator = stix2.Indicator(\n id=OpenCTIStix2Utils.generate_random_stix_id(\n \"indicator\"\n ),\n name=parsed_entry[\"url\"],\n description=\"C2 URL for: {}\".format(\n parsed_entry[\"type\"]\n ),\n labels=[\"C2 Server\"],\n pattern_type=\"stix\",\n pattern=indicator_pattern,\n valid_from=parsed_entry[\"date\"],\n created=parsed_entry[\"date\"],\n modified=parsed_entry[\"date\"],\n created_by_ref=organization.id,\n object_marking_refs=[tlp[\"standard_id\"]],\n external_references=[external_reference],\n custom_properties={\n \"x_opencti_main_observable_type\": \"Url\"\n },\n )\n bundle_objects.append(indicator)\n relation = stix2.Relationship(\n id=OpenCTIStix2Utils.generate_random_stix_id(\n \"relationship\"\n ),\n source_ref=indicator.id,\n target_ref=malware.id,\n relationship_type=\"indicates\",\n start_time=self._time_to_datetime(\n entry[\"published_parsed\"]\n ),\n stop_time=self._time_to_datetime(\n entry[\"published_parsed\"]\n )\n + datetime.timedelta(0, 3),\n description=\"URLs associated to: \"\n + parsed_entry[\"type\"],\n confidence=self.confidence_level,\n created_by_ref=organization.id,\n object_marking_refs=[tlp[\"standard_id\"]],\n created=parsed_entry[\"date\"],\n modified=parsed_entry[\"date\"],\n external_references=[external_reference],\n )\n bundle_objects.append(relation)\n if self.create_observables:\n observable_url = SimpleObservable(\n id=OpenCTIStix2Utils.generate_random_stix_id(\n \"x-opencti-simple-observable\"\n ),\n key=\"Url.value\",\n labels=[\"C2 Server\"],\n value=parsed_entry[\"url\"],\n created_by_ref=organization.id,\n object_marking_refs=[tlp[\"standard_id\"]],\n external_references=[external_reference],\n )\n bundle_objects.append(observable_url)\n observable_ip = SimpleObservable(\n id=OpenCTIStix2Utils.generate_random_stix_id(\n \"x-opencti-simple-observable\"\n ),\n key=\"IPv4-Addr.value\",\n labels=[\"C2 Server\"],\n value=parsed_entry[\"ip\"],\n created_by_ref=organization.id,\n object_marking_refs=[tlp[\"standard_id\"]],\n external_references=[external_reference],\n )\n bundle_objects.append(observable_ip)\n observable_domain = None\n if \"domain\" in parsed_entry.keys():\n observable_domain = SimpleObservable(\n id=OpenCTIStix2Utils.generate_random_stix_id(\n \"x-opencti-simple-observable\"\n ),\n key=\"Domain-Name.value\",\n labels=[\"C2 Server\"],\n value=parsed_entry[\"domain\"],\n created_by_ref=organization.id,\n object_marking_refs=[tlp[\"standard_id\"]],\n external_references=[external_reference],\n )\n bundle_objects.append(observable_domain)\n\n if indicator is not None:\n relationship_1 = stix2.Relationship(\n id=OpenCTIStix2Utils.generate_random_stix_id(\n \"relationship\"\n ),\n relationship_type=\"based-on\",\n created_by_ref=organization.id,\n source_ref=indicator.id,\n target_ref=observable_url.id,\n )\n bundle_objects.append(relationship_1)\n relationship_2 = stix2.Relationship(\n id=OpenCTIStix2Utils.generate_random_stix_id(\n \"relationship\"\n ),\n relationship_type=\"based-on\",\n created_by_ref=organization.id,\n source_ref=indicator.id,\n target_ref=observable_ip.id,\n )\n bundle_objects.append(relationship_2)\n if observable_domain is not None:\n relationship_3 = stix2.Relationship(\n id=OpenCTIStix2Utils.generate_random_stix_id(\n \"relationship\"\n ),\n relationship_type=\"based-on\",\n created_by_ref=organization.id,\n source_ref=indicator.id,\n target_ref=observable_domain.id,\n )\n bundle_objects.append(relationship_3)\n\n # create stix bundle\n bundle = stix2.Bundle(objects=bundle_objects)\n # send data\n self.helper.send_stix2_bundle(\n bundle=bundle.serialize(),\n update=self.update_existing_data,\n work_id=work_id,\n )\n\n # Store the current timestamp as a last run\n message = (\n \"Connector successfully run, storing last_run as: {}\".format(\n str(timestamp)\n )\n )\n self.helper.log_info(message)\n self.helper.set_state({\"last_run\": timestamp})\n self.helper.api.work.to_processed(work_id, message)\n self.helper.log_info(\n \"Last_run stored, next run in: {} seconds.\".format(\n str(round(self.interval, 2))\n )\n )\n time.sleep(60)\n else:\n new_interval = self.interval - (timestamp - last_run)\n self.helper.log_info(\n \"Connector will not run. \\\n Next run in: {} seconds.\".format(\n str(round(new_interval, 2))\n )\n )\n time.sleep(60)\n\n except (KeyboardInterrupt, SystemExit):\n self.helper.log_info(\"Connector stop\")\n exit(0)\n except Exception as e:\n self.helper.log_error(str(e))\n time.sleep(60)\n\n\nif __name__ == \"__main__\":\n try:\n cybercrimetrackerConnector = Cybercrimetracker()\n cybercrimetrackerConnector.run()\n except Exception as e:\n print(e)\n time.sleep(10)\n exit(0)\n",
"path": "cybercrime-tracker/src/cybercrime-tracker.py"
}
] | diff --git a/cybercrime-tracker/src/cybercrime-tracker.py b/cybercrime-tracker/src/cybercrime-tracker.py
index f19ad605ae..d72f552802 100644
--- a/cybercrime-tracker/src/cybercrime-tracker.py
+++ b/cybercrime-tracker/src/cybercrime-tracker.py
@@ -56,7 +56,7 @@ def __init__(self):
config,
)
self.interval = get_config_variable(
- "CYBERCRIMETRACKER_INTERVAL",
+ "CYBERCRIME_TRACKER_INTERVAL",
["cybercrime-tracker", "interval"],
config,
isNumber=True,
|
PaddlePaddle__PaddleNLP-2877 | Support more model outputs for BERT/ERNIE/RoBERTa
<!-- Demo: https://github.com/PaddlePaddle/PaddleNLP/pull/26 -->
### PR types
<!-- One of [ New features | Bug fixes | Function optimization | Performance optimization | Breaking changes | Others ] -->
New features
### PR changes
<!-- One of [ Models | APIs | Docs | Others ] -->
APIs
### Description
<!-- Describe what this PR does -->
#2583 is reverted because of compatibility, this PR fix and then push it again.
| [
{
"content": "# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n# Copyright 2020 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nimport paddle\nimport numpy as np\nfrom collections import OrderedDict\nfrom dataclasses import fields, dataclass\nfrom typing import Any, List, Tuple, Optional\nfrom paddle.nn.layer.transformer import _convert_attention_mask\n\nfrom .utils import adapt_stale_fwd_patch\n\n\ndef _transformer_encoder_layer_fwd(self,\n src,\n src_mask=None,\n cache=None,\n output_attentions=False):\n self.self_attn.need_weights = output_attentions\n src_mask = _convert_attention_mask(src_mask, src.dtype)\n\n residual = src\n if self.normalize_before:\n src = self.norm1(src)\n\n attn_outputs = self.self_attn(src, src, src, src_mask, cache)\n if isinstance(attn_outputs, tuple):\n src = attn_outputs[0]\n outputs = attn_outputs[1:]\n else:\n src = attn_outputs\n outputs = None\n\n src = residual + self.dropout1(src)\n if not self.normalize_before:\n src = self.norm1(src)\n\n residual = src\n if self.normalize_before:\n src = self.norm2(src)\n src = self.linear2(self.dropout(self.activation(self.linear1(src))))\n src = residual + self.dropout2(src)\n if not self.normalize_before:\n src = self.norm2(src)\n\n return src if outputs is None else (\n (src, ) + outputs[::-1]) # hidden_states, cache, attentions\n\n\ndef _transformer_encoder_fwd(self,\n src,\n src_mask=None,\n cache=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=False):\n src_mask = _convert_attention_mask(src_mask, src.dtype)\n\n output = src\n new_caches = [] if cache is not None else None\n all_attentions = [] if output_attentions else None\n # NOTE: Also includes embeding output which is same as HF.\n all_hidden_states = [output] if output_hidden_states else None\n for i, mod in enumerate(self.layers):\n layer_outputs = mod(output,\n src_mask=src_mask,\n cache=None if cache is None else cache[i],\n output_attentions=output_attentions)\n if isinstance(layer_outputs, tuple):\n output = layer_outputs[0]\n outputs = layer_outputs[1:]\n else:\n output = layer_outputs\n outputs = None\n\n if output_hidden_states:\n all_hidden_states.append(output)\n if output_attentions:\n all_attentions.append(outputs[-1])\n if cache is not None:\n new_caches.append(outputs[1])\n\n if self.norm is not None:\n output = self.norm(output)\n\n if output_hidden_states:\n all_hidden_states[-1] = output\n\n if not return_dict:\n outputs = tuple(\n tuple(v) if isinstance(v, list) else v for v in [\n output,\n new_caches,\n all_hidden_states,\n all_attentions,\n ] if v is not None)\n if len(outputs) == 1:\n return output\n else:\n return outputs\n\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=output,\n past_key_values=new_caches,\n hidden_states=all_hidden_states,\n attentions=all_attentions)\n\n\n# patches of paddle.nn.Transformer to get all hidden_states and attentions\npaddle.nn.TransformerEncoderLayer.forward = _transformer_encoder_layer_fwd\npaddle.nn.TransformerEncoder.forward = _transformer_encoder_fwd\n\n\ndef _get_wrap_setattr(cls):\n\n def _wrap_setattr(self, name, value):\n value = adapt_stale_fwd_patch(self, name, value)\n return super(cls, self).__setattr__(name, value)\n\n return _wrap_setattr\n\n\npaddle.nn.TransformerEncoderLayer.__setattr__ = functools.wraps(\n paddle.nn.TransformerEncoderLayer.__setattr__)(_get_wrap_setattr(\n paddle.nn.TransformerEncoderLayer))\npaddle.nn.TransformerEncoder.__setattr__ = functools.wraps(\n paddle.nn.TransformerEncoder.__setattr__)(_get_wrap_setattr(\n paddle.nn.TransformerEncoder))\n\n\ndef is_tensor(x):\n if isinstance(x, paddle.Tensor):\n return True\n\n return isinstance(x, np.ndarray)\n\n\nclass ModelOutput(OrderedDict):\n \"\"\"\n Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a\n tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular\n python dictionary.\n\n <Tip warning={true}>\n\n You can't unpack a `ModelOutput` directly. Use the [`~utils.ModelOutput.to_tuple`] method to convert it to a tuple\n before.\n\n </Tip>\n \"\"\"\n\n def __post_init__(self):\n class_fields = fields(self)\n\n # note(guosheng): Convert list to tuple automatically, and better to\n # check if it is frozen.\n # assert not getattr(self, dataclasses._PARAMS).frozen\n for f in class_fields:\n value = getattr(self, f.name)\n if isinstance(value, list):\n setattr(self, f.name, tuple(value))\n\n # Safety and consistency checks\n if not len(class_fields):\n raise ValueError(f\"{self.__class__.__name__} has no fields.\")\n if not all(field.default is None for field in class_fields[1:]):\n raise ValueError(\n f\"{self.__class__.__name__} should not have more than one required field.\"\n )\n\n first_field = getattr(self, class_fields[0].name)\n other_fields_are_none = all(\n getattr(self, field.name) is None for field in class_fields[1:])\n\n if other_fields_are_none and not is_tensor(first_field):\n if isinstance(first_field, dict):\n iterator = first_field.items()\n first_field_iterator = True\n else:\n try:\n iterator = iter(first_field)\n first_field_iterator = True\n except TypeError:\n first_field_iterator = False\n\n # if we provided an iterator as first field and the iterator is a (key, value) iterator\n # set the associated fields\n if first_field_iterator:\n for element in iterator:\n if (not isinstance(element,\n (list, tuple)) or not len(element) == 2\n or not isinstance(element[0], str)):\n break\n setattr(self, element[0], element[1])\n if element[1] is not None:\n self[element[0]] = element[1]\n elif first_field is not None:\n self[class_fields[0].name] = first_field\n else:\n for field in class_fields:\n v = getattr(self, field.name)\n if v is not None:\n self[field.name] = v\n\n def __delitem__(self, *args, **kwargs):\n raise Exception(\n f\"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.\"\n )\n\n def setdefault(self, *args, **kwargs):\n raise Exception(\n f\"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.\"\n )\n\n def pop(self, *args, **kwargs):\n raise Exception(\n f\"You cannot use ``pop`` on a {self.__class__.__name__} instance.\")\n\n def update(self, *args, **kwargs):\n raise Exception(\n f\"You cannot use ``update`` on a {self.__class__.__name__} instance.\"\n )\n\n def __getitem__(self, k):\n if isinstance(k, str):\n inner_dict = {k: v for (k, v) in self.items()}\n return inner_dict[k]\n else:\n return self.to_tuple()[k]\n\n def __setattr__(self, name, value):\n if name in self.keys() and value is not None:\n # Don't call self.__setitem__ to avoid recursion errors\n super().__setitem__(name, value)\n super().__setattr__(name, value)\n\n def __setitem__(self, key, value):\n # Will raise a KeyException if needed\n super().__setitem__(key, value)\n # Don't call self.__setattr__ to avoid recursion errors\n super().__setattr__(key, value)\n\n def to_tuple(self) -> Tuple[Any]:\n \"\"\"\n Convert self to a tuple containing all the attributes/keys that are not `None`.\n \"\"\"\n return tuple(self[k] for k in self.keys())\n\n\n@dataclass\nclass BaseModelOutputWithPastAndCrossAttentions(ModelOutput):\n \"\"\"\n Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).\n\n Args:\n last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n\n If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,\n hidden_size)` is output.\n past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if\n `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,\n encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if\n `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`\n input) to speed up sequential decoding.\n hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):\n Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the\n weighted average in the cross-attention heads.\n \"\"\"\n\n last_hidden_state: paddle.Tensor = None\n past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None\n hidden_states: Optional[Tuple[paddle.Tensor]] = None\n attentions: Optional[Tuple[paddle.Tensor]] = None\n cross_attentions: Optional[Tuple[paddle.Tensor]] = None\n\n\n@dataclass\nclass BaseModelOutputWithPoolingAndCrossAttentions(ModelOutput):\n \"\"\"\n Base class for model's outputs that also contains a pooling of the last hidden states.\n\n Args:\n last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n pooler_output (`paddle.Tensor` of shape `(batch_size, hidden_size)`):\n Last layer hidden-state of the first token of the sequence (classification token) after further processing\n through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns\n the classification token after processing through a linear layer and a tanh activation function. The linear\n layer weights are trained from the next sentence prediction (classification) objective during pretraining.\n hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):\n Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the\n weighted average in the cross-attention heads.\n past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if\n `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,\n encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if\n `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`\n input) to speed up sequential decoding.\n \"\"\"\n\n last_hidden_state: paddle.Tensor = None\n pooler_output: paddle.Tensor = None\n past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None\n hidden_states: Optional[Tuple[paddle.Tensor]] = None\n attentions: Optional[Tuple[paddle.Tensor]] = None\n cross_attentions: Optional[Tuple[paddle.Tensor]] = None\n\n\n@dataclass\nclass SequenceClassifierOutput(ModelOutput):\n \"\"\"\n Base class for outputs of sentence classification models.\n\n Args:\n loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Classification (or regression if config.num_labels==1) loss.\n logits (`paddle.Tensor` of shape `(batch_size, config.num_labels)`):\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[paddle.Tensor] = None\n logits: paddle.Tensor = None\n hidden_states: Optional[Tuple[paddle.Tensor]] = None\n attentions: Optional[Tuple[paddle.Tensor]] = None\n\n\n@dataclass\nclass TokenClassifierOutput(ModelOutput):\n \"\"\"\n Base class for outputs of token classification models.\n\n Args:\n loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided) :\n Classification loss.\n logits (`paddle.Tensor` of shape `(batch_size, sequence_length, config.num_labels)`):\n Classification scores (before SoftMax).\n hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[paddle.Tensor] = None\n logits: paddle.Tensor = None\n hidden_states: Optional[Tuple[paddle.Tensor]] = None\n attentions: Optional[Tuple[paddle.Tensor]] = None\n\n\n@dataclass\nclass QuestionAnsweringModelOutput(ModelOutput):\n \"\"\"\n Base class for outputs of question answering models.\n\n Args:\n loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.\n start_logits (`paddle.Tensor` of shape `(batch_size, sequence_length)`):\n Span-start scores (before SoftMax).\n end_logits (`paddle.Tensor` of shape `(batch_size, sequence_length)`):\n Span-end scores (before SoftMax).\n hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[paddle.Tensor] = None\n start_logits: paddle.Tensor = None\n end_logits: paddle.Tensor = None\n hidden_states: Optional[Tuple[paddle.Tensor]] = None\n attentions: Optional[Tuple[paddle.Tensor]] = None\n\n\n@dataclass\nclass MultipleChoiceModelOutput(ModelOutput):\n \"\"\"\n Base class for outputs of multiple choice models.\n\n Args:\n loss (`paddle.Tensor` of shape *(1,)*, *optional*, returned when `labels` is provided):\n Classification loss.\n logits (`paddle.Tensor` of shape `(batch_size, num_choices)`):\n *num_choices* is the second dimension of the input tensors. (see *input_ids* above).\n\n Classification scores (before SoftMax).\n hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[paddle.Tensor] = None\n logits: paddle.Tensor = None\n hidden_states: Optional[Tuple[paddle.Tensor]] = None\n attentions: Optional[Tuple[paddle.Tensor]] = None\n\n\n@dataclass\nclass MaskedLMOutput(ModelOutput):\n \"\"\"\n Base class for masked language models outputs.\n\n Args:\n loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Masked language modeling (MLM) loss.\n logits (`paddle.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[paddle.Tensor] = None\n logits: paddle.Tensor = None\n hidden_states: Optional[Tuple[paddle.Tensor]] = None\n attentions: Optional[Tuple[paddle.Tensor]] = None\n",
"path": "paddlenlp/transformers/model_outputs.py"
}
] | [
{
"content": "# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n# Copyright 2020 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nimport paddle\nimport numpy as np\nfrom collections import OrderedDict\nfrom dataclasses import fields, dataclass\nfrom typing import Any, List, Tuple, Optional\nfrom paddle.nn.layer.transformer import _convert_attention_mask\n\nfrom .utils import adapt_stale_fwd_patch\n\n\ndef _transformer_encoder_layer_fwd(self,\n src,\n src_mask=None,\n cache=None,\n output_attentions=False):\n self.self_attn.need_weights = output_attentions\n src_mask = _convert_attention_mask(src_mask, src.dtype)\n\n residual = src\n if self.normalize_before:\n src = self.norm1(src)\n\n attn_outputs = self.self_attn(src, src, src, src_mask, cache)\n if isinstance(attn_outputs, tuple):\n src = attn_outputs[0]\n outputs = attn_outputs[1:]\n else:\n src = attn_outputs\n outputs = None\n\n src = residual + self.dropout1(src)\n if not self.normalize_before:\n src = self.norm1(src)\n\n residual = src\n if self.normalize_before:\n src = self.norm2(src)\n src = self.linear2(self.dropout(self.activation(self.linear1(src))))\n src = residual + self.dropout2(src)\n if not self.normalize_before:\n src = self.norm2(src)\n\n return src if outputs is None else (\n (src, ) + outputs[::-1]) # hidden_states, cache, attentions\n\n\ndef _transformer_encoder_fwd(self,\n src,\n src_mask=None,\n cache=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=False):\n src_mask = _convert_attention_mask(src_mask, src.dtype)\n\n output = src\n new_caches = [] if cache is not None else None\n all_attentions = [] if output_attentions else None\n # NOTE: Also includes embeding output which is same as HF.\n all_hidden_states = [output] if output_hidden_states else None\n for i, mod in enumerate(self.layers):\n layer_outputs = mod(output,\n src_mask=src_mask,\n cache=None if cache is None else cache[i],\n output_attentions=output_attentions)\n if isinstance(layer_outputs, tuple):\n output = layer_outputs[0]\n outputs = layer_outputs[1:]\n else:\n output = layer_outputs\n outputs = None\n\n if output_hidden_states:\n all_hidden_states.append(output)\n if output_attentions:\n all_attentions.append(outputs[-1])\n if cache is not None:\n new_caches.append(outputs[0])\n\n if self.norm is not None:\n output = self.norm(output)\n\n if output_hidden_states:\n all_hidden_states[-1] = output\n\n if not return_dict:\n outputs = tuple(\n tuple(v) if isinstance(v, list) else v for v in [\n output,\n new_caches,\n all_hidden_states,\n all_attentions,\n ] if v is not None)\n if len(outputs) == 1:\n return output\n else:\n return outputs\n\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=output,\n past_key_values=new_caches,\n hidden_states=all_hidden_states,\n attentions=all_attentions)\n\n\n# patches of paddle.nn.Transformer to get all hidden_states and attentions\npaddle.nn.TransformerEncoderLayer.forward = _transformer_encoder_layer_fwd\npaddle.nn.TransformerEncoder.forward = _transformer_encoder_fwd\n\n\ndef _get_wrap_setattr(cls):\n\n def _wrap_setattr(self, name, value):\n value = adapt_stale_fwd_patch(self, name, value)\n return super(cls, self).__setattr__(name, value)\n\n return _wrap_setattr\n\n\npaddle.nn.TransformerEncoderLayer.__setattr__ = functools.wraps(\n paddle.nn.TransformerEncoderLayer.__setattr__)(_get_wrap_setattr(\n paddle.nn.TransformerEncoderLayer))\npaddle.nn.TransformerEncoder.__setattr__ = functools.wraps(\n paddle.nn.TransformerEncoder.__setattr__)(_get_wrap_setattr(\n paddle.nn.TransformerEncoder))\n\n\ndef is_tensor(x):\n if isinstance(x, paddle.Tensor):\n return True\n\n return isinstance(x, np.ndarray)\n\n\nclass ModelOutput(OrderedDict):\n \"\"\"\n Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a\n tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular\n python dictionary.\n\n <Tip warning={true}>\n\n You can't unpack a `ModelOutput` directly. Use the [`~utils.ModelOutput.to_tuple`] method to convert it to a tuple\n before.\n\n </Tip>\n \"\"\"\n\n def __post_init__(self):\n class_fields = fields(self)\n\n # note(guosheng): Convert list to tuple automatically, and better to\n # check if it is frozen.\n # assert not getattr(self, dataclasses._PARAMS).frozen\n for f in class_fields:\n value = getattr(self, f.name)\n if isinstance(value, list):\n setattr(self, f.name, tuple(value))\n\n # Safety and consistency checks\n if not len(class_fields):\n raise ValueError(f\"{self.__class__.__name__} has no fields.\")\n if not all(field.default is None for field in class_fields[1:]):\n raise ValueError(\n f\"{self.__class__.__name__} should not have more than one required field.\"\n )\n\n first_field = getattr(self, class_fields[0].name)\n other_fields_are_none = all(\n getattr(self, field.name) is None for field in class_fields[1:])\n\n if other_fields_are_none and not is_tensor(first_field):\n if isinstance(first_field, dict):\n iterator = first_field.items()\n first_field_iterator = True\n else:\n try:\n iterator = iter(first_field)\n first_field_iterator = True\n except TypeError:\n first_field_iterator = False\n\n # if we provided an iterator as first field and the iterator is a (key, value) iterator\n # set the associated fields\n if first_field_iterator:\n for element in iterator:\n if (not isinstance(element,\n (list, tuple)) or not len(element) == 2\n or not isinstance(element[0], str)):\n break\n setattr(self, element[0], element[1])\n if element[1] is not None:\n self[element[0]] = element[1]\n elif first_field is not None:\n self[class_fields[0].name] = first_field\n else:\n for field in class_fields:\n v = getattr(self, field.name)\n if v is not None:\n self[field.name] = v\n\n def __delitem__(self, *args, **kwargs):\n raise Exception(\n f\"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.\"\n )\n\n def setdefault(self, *args, **kwargs):\n raise Exception(\n f\"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.\"\n )\n\n def pop(self, *args, **kwargs):\n raise Exception(\n f\"You cannot use ``pop`` on a {self.__class__.__name__} instance.\")\n\n def update(self, *args, **kwargs):\n raise Exception(\n f\"You cannot use ``update`` on a {self.__class__.__name__} instance.\"\n )\n\n def __getitem__(self, k):\n if isinstance(k, str):\n inner_dict = {k: v for (k, v) in self.items()}\n return inner_dict[k]\n else:\n return self.to_tuple()[k]\n\n def __setattr__(self, name, value):\n if name in self.keys() and value is not None:\n # Don't call self.__setitem__ to avoid recursion errors\n super().__setitem__(name, value)\n super().__setattr__(name, value)\n\n def __setitem__(self, key, value):\n # Will raise a KeyException if needed\n super().__setitem__(key, value)\n # Don't call self.__setattr__ to avoid recursion errors\n super().__setattr__(key, value)\n\n def to_tuple(self) -> Tuple[Any]:\n \"\"\"\n Convert self to a tuple containing all the attributes/keys that are not `None`.\n \"\"\"\n return tuple(self[k] for k in self.keys())\n\n\n@dataclass\nclass BaseModelOutputWithPastAndCrossAttentions(ModelOutput):\n \"\"\"\n Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).\n\n Args:\n last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n\n If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,\n hidden_size)` is output.\n past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if\n `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,\n encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if\n `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`\n input) to speed up sequential decoding.\n hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):\n Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the\n weighted average in the cross-attention heads.\n \"\"\"\n\n last_hidden_state: paddle.Tensor = None\n past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None\n hidden_states: Optional[Tuple[paddle.Tensor]] = None\n attentions: Optional[Tuple[paddle.Tensor]] = None\n cross_attentions: Optional[Tuple[paddle.Tensor]] = None\n\n\n@dataclass\nclass BaseModelOutputWithPoolingAndCrossAttentions(ModelOutput):\n \"\"\"\n Base class for model's outputs that also contains a pooling of the last hidden states.\n\n Args:\n last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n pooler_output (`paddle.Tensor` of shape `(batch_size, hidden_size)`):\n Last layer hidden-state of the first token of the sequence (classification token) after further processing\n through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns\n the classification token after processing through a linear layer and a tanh activation function. The linear\n layer weights are trained from the next sentence prediction (classification) objective during pretraining.\n hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):\n Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the\n weighted average in the cross-attention heads.\n past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if\n `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,\n encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if\n `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`\n input) to speed up sequential decoding.\n \"\"\"\n\n last_hidden_state: paddle.Tensor = None\n pooler_output: paddle.Tensor = None\n past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None\n hidden_states: Optional[Tuple[paddle.Tensor]] = None\n attentions: Optional[Tuple[paddle.Tensor]] = None\n cross_attentions: Optional[Tuple[paddle.Tensor]] = None\n\n\n@dataclass\nclass SequenceClassifierOutput(ModelOutput):\n \"\"\"\n Base class for outputs of sentence classification models.\n\n Args:\n loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Classification (or regression if config.num_labels==1) loss.\n logits (`paddle.Tensor` of shape `(batch_size, config.num_labels)`):\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[paddle.Tensor] = None\n logits: paddle.Tensor = None\n hidden_states: Optional[Tuple[paddle.Tensor]] = None\n attentions: Optional[Tuple[paddle.Tensor]] = None\n\n\n@dataclass\nclass TokenClassifierOutput(ModelOutput):\n \"\"\"\n Base class for outputs of token classification models.\n\n Args:\n loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided) :\n Classification loss.\n logits (`paddle.Tensor` of shape `(batch_size, sequence_length, config.num_labels)`):\n Classification scores (before SoftMax).\n hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[paddle.Tensor] = None\n logits: paddle.Tensor = None\n hidden_states: Optional[Tuple[paddle.Tensor]] = None\n attentions: Optional[Tuple[paddle.Tensor]] = None\n\n\n@dataclass\nclass QuestionAnsweringModelOutput(ModelOutput):\n \"\"\"\n Base class for outputs of question answering models.\n\n Args:\n loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.\n start_logits (`paddle.Tensor` of shape `(batch_size, sequence_length)`):\n Span-start scores (before SoftMax).\n end_logits (`paddle.Tensor` of shape `(batch_size, sequence_length)`):\n Span-end scores (before SoftMax).\n hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[paddle.Tensor] = None\n start_logits: paddle.Tensor = None\n end_logits: paddle.Tensor = None\n hidden_states: Optional[Tuple[paddle.Tensor]] = None\n attentions: Optional[Tuple[paddle.Tensor]] = None\n\n\n@dataclass\nclass MultipleChoiceModelOutput(ModelOutput):\n \"\"\"\n Base class for outputs of multiple choice models.\n\n Args:\n loss (`paddle.Tensor` of shape *(1,)*, *optional*, returned when `labels` is provided):\n Classification loss.\n logits (`paddle.Tensor` of shape `(batch_size, num_choices)`):\n *num_choices* is the second dimension of the input tensors. (see *input_ids* above).\n\n Classification scores (before SoftMax).\n hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[paddle.Tensor] = None\n logits: paddle.Tensor = None\n hidden_states: Optional[Tuple[paddle.Tensor]] = None\n attentions: Optional[Tuple[paddle.Tensor]] = None\n\n\n@dataclass\nclass MaskedLMOutput(ModelOutput):\n \"\"\"\n Base class for masked language models outputs.\n\n Args:\n loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Masked language modeling (MLM) loss.\n logits (`paddle.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[paddle.Tensor] = None\n logits: paddle.Tensor = None\n hidden_states: Optional[Tuple[paddle.Tensor]] = None\n attentions: Optional[Tuple[paddle.Tensor]] = None\n",
"path": "paddlenlp/transformers/model_outputs.py"
}
] | diff --git a/paddlenlp/transformers/model_outputs.py b/paddlenlp/transformers/model_outputs.py
index c74f431e8e1a..8882d647ec57 100644
--- a/paddlenlp/transformers/model_outputs.py
+++ b/paddlenlp/transformers/model_outputs.py
@@ -91,7 +91,7 @@ def _transformer_encoder_fwd(self,
if output_attentions:
all_attentions.append(outputs[-1])
if cache is not None:
- new_caches.append(outputs[1])
+ new_caches.append(outputs[0])
if self.norm is not None:
output = self.norm(output)
|
django-cms__django-cms-2886 | cms.utils.i18n.get_fallback_languages may fail if LANGUAGES has more languages than CMS_LANGUAGES
Reported via IRC.
Use case: Show the admin interface in English (for users that configured their browser to English), but CMS pages should only be in German or French.
In the use case above, settings might be something like:
``` python
LANGUAGES = [
('en', 'English'),
('de', 'Deutsch'),
('fr', 'French'),
]
CMS_LANGUAGES = {
1: [
{
'code': 'de',
'name': gettext('Deutsch'),
'public': True,
},
{
'code': 'fr',
'name': gettext('French'),
'fallbacks': ['de',],
'public': False,
},
],
'default': {
'fallbacks': ['de',],
'redirect_on_fallback':True,
'public': False,
'hide_untranslated': False,
}
}
```
`'en'` is in `LANGUAGES` but not in `CMS_LANGUAGES`.
Now if `cms.utils.i18n.get_fallback_languages` is called with `'en'` as argument (that happens for example if you try to add a page in the admin with the admin UI in English, as the add view tries to log the page change in the current active language, not language of the page. This triggers a call to `get_fallback_languages` with English as the argument) it raises a `LanguageError` as `'en'` is not available to the CMS.
cms.utils.i18n.get_fallback_languages may fail if LANGUAGES has more languages than CMS_LANGUAGES
Reported via IRC.
Use case: Show the admin interface in English (for users that configured their browser to English), but CMS pages should only be in German or French.
In the use case above, settings might be something like:
``` python
LANGUAGES = [
('en', 'English'),
('de', 'Deutsch'),
('fr', 'French'),
]
CMS_LANGUAGES = {
1: [
{
'code': 'de',
'name': gettext('Deutsch'),
'public': True,
},
{
'code': 'fr',
'name': gettext('French'),
'fallbacks': ['de',],
'public': False,
},
],
'default': {
'fallbacks': ['de',],
'redirect_on_fallback':True,
'public': False,
'hide_untranslated': False,
}
}
```
`'en'` is in `LANGUAGES` but not in `CMS_LANGUAGES`.
Now if `cms.utils.i18n.get_fallback_languages` is called with `'en'` as argument (that happens for example if you try to add a page in the admin with the admin UI in English, as the add view tries to log the page change in the current active language, not language of the page. This triggers a call to `get_fallback_languages` with English as the argument) it raises a `LanguageError` as `'en'` is not available to the CMS.
| [
{
"content": "# -*- coding: utf-8 -*-\nfrom contextlib import contextmanager\n\nfrom django.core.urlresolvers import get_resolver, LocaleRegexURLResolver\nfrom django.conf import settings\nfrom django.utils import translation\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom cms.exceptions import LanguageError\nfrom cms.utils.conf import get_cms_setting, get_site_id\n\n\n@contextmanager\ndef force_language(new_lang):\n old_lang = get_current_language()\n if old_lang != new_lang:\n translation.activate(new_lang)\n yield\n translation.activate(old_lang)\n\n\ndef get_languages(site_id=None):\n site_id = get_site_id(site_id)\n result = get_cms_setting('LANGUAGES').get(site_id)\n if not result:\n result = []\n defaults = get_cms_setting('LANGUAGES').get('default', {})\n for code, name in settings.LANGUAGES:\n lang = {'code': code, 'name': _(name)}\n lang.update(defaults)\n result.append(lang)\n get_cms_setting('LANGUAGES')[site_id] = result\n return result\n\n\ndef get_language_code(language_code):\n \"\"\"\n Returns language code while making sure it's in LANGUAGES\n \"\"\"\n if not language_code:\n return None\n languages = get_language_list()\n if language_code in languages: # direct hit\n return language_code\n for lang in languages:\n if language_code.split('-')[0] == lang: # base language hit\n return lang\n if lang.split('-')[0] == language_code: # base language hit\n return lang\n return language_code\n\n\ndef get_current_language():\n \"\"\"\n Returns the currently active language\n\n It's a replacement for Django's translation.get_language() to make sure the LANGUAGE_CODE will be found in LANGUAGES.\n Overcomes this issue: https://code.djangoproject.com/ticket/9340\n \"\"\"\n language_code = translation.get_language()\n return get_language_code(language_code)\n\n\ndef get_language_list(site_id=None):\n \"\"\"\n :return: returns a list of iso2codes for this site\n \"\"\"\n if not settings.USE_I18N:\n return [settings.LANGUAGE_CODE]\n languages = []\n for language in get_languages(site_id):\n languages.append(language['code'])\n return languages\n\n\ndef get_language_tuple(site_id=None):\n \"\"\"\n :return: returns an list of tuples like the old CMS_LANGUAGES or the LANGUAGES for this site\n \"\"\"\n languages = []\n for language in get_languages(site_id):\n languages.append((language['code'], language['name']))\n return languages\n\n\ndef get_language_dict(site_id=None):\n \"\"\"\n :return: returns an dict of cms languages\n \"\"\"\n languages = {}\n for language in get_languages(site_id):\n languages[language['code']] = language['name']\n return languages\n\n\ndef get_public_languages(site_id=None):\n \"\"\"\n :return: list of iso2codes of public languages for this site\n \"\"\"\n languages = []\n for language in get_language_objects(site_id):\n if language.get(\"public\", True):\n languages.append(language['code'])\n return languages\n\n\ndef get_language_object(language_code, site_id=None):\n \"\"\"\n :param language_code: RFC5646 language code\n :return: the language object filled up by defaults\n \"\"\"\n for language in get_languages(site_id):\n if language['code'] == get_language_code(language_code):\n return language\n raise LanguageError('Language not found: %s' % language_code)\n\n\ndef get_language_objects(site_id=None):\n \"\"\"\n returns list of all language objects filled up by default values\n \"\"\"\n return list(get_languages(site_id))\n\n\ndef get_default_language(language_code=None, site_id=None):\n \"\"\"\n Returns default language depending on settings.LANGUAGE_CODE merged with\n best match from get_cms_setting('LANGUAGES')\n\n Returns: language_code\n \"\"\"\n\n if not language_code:\n language_code = get_language_code(settings.LANGUAGE_CODE)\n\n languages = get_language_list(site_id)\n\n # first try if there is an exact language\n if language_code in languages:\n return language_code\n\n # otherwise split the language code if possible, so iso3\n language_code = language_code.split(\"-\")[0]\n\n if not language_code in languages:\n return settings.LANGUAGE_CODE\n\n return language_code\n\n\ndef get_fallback_languages(language, site_id=None):\n \"\"\"\n returns a list of fallback languages for the given language\n \"\"\"\n language = get_language_object(language, site_id)\n return language.get('fallbacks', [])\n\n\ndef get_redirect_on_fallback(language, site_id=None):\n \"\"\"\n returns if you should redirect on language fallback\n :param language:\n :param site_id:\n :return: Boolean\n \"\"\"\n language = get_language_object(language, site_id)\n return language.get('redirect_on_fallback', True)\n\n\ndef hide_untranslated(language, site_id=None):\n \"\"\"\n Should untranslated pages in this language be hidden?\n :param language:\n :param site_id:\n :return: A Boolean\n \"\"\"\n obj = get_language_object(language, site_id)\n return obj.get('hide_untranslated', True)\n\n\ndef is_language_prefix_patterns_used():\n \"\"\"\n Returns `True` if the `LocaleRegexURLResolver` is used\n at root level of the urlpatterns, else it returns `False`.\n \"\"\"\n for url_pattern in get_resolver(None).url_patterns:\n if isinstance(url_pattern, LocaleRegexURLResolver):\n return True\n return False\n",
"path": "cms/utils/i18n.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\nfrom contextlib import contextmanager\n\nfrom django.core.urlresolvers import get_resolver, LocaleRegexURLResolver\nfrom django.conf import settings\nfrom django.utils import translation\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom cms.exceptions import LanguageError\nfrom cms.utils.conf import get_cms_setting, get_site_id\n\n\n@contextmanager\ndef force_language(new_lang):\n old_lang = get_current_language()\n if old_lang != new_lang:\n translation.activate(new_lang)\n yield\n translation.activate(old_lang)\n\n\ndef get_languages(site_id=None):\n site_id = get_site_id(site_id)\n result = get_cms_setting('LANGUAGES').get(site_id)\n if not result:\n result = []\n defaults = get_cms_setting('LANGUAGES').get('default', {})\n for code, name in settings.LANGUAGES:\n lang = {'code': code, 'name': _(name)}\n lang.update(defaults)\n result.append(lang)\n get_cms_setting('LANGUAGES')[site_id] = result\n return result\n\n\ndef get_language_code(language_code):\n \"\"\"\n Returns language code while making sure it's in LANGUAGES\n \"\"\"\n if not language_code:\n return None\n languages = get_language_list()\n if language_code in languages: # direct hit\n return language_code\n for lang in languages:\n if language_code.split('-')[0] == lang: # base language hit\n return lang\n if lang.split('-')[0] == language_code: # base language hit\n return lang\n return language_code\n\n\ndef get_current_language():\n \"\"\"\n Returns the currently active language\n\n It's a replacement for Django's translation.get_language() to make sure the LANGUAGE_CODE will be found in LANGUAGES.\n Overcomes this issue: https://code.djangoproject.com/ticket/9340\n \"\"\"\n language_code = translation.get_language()\n return get_language_code(language_code)\n\n\ndef get_language_list(site_id=None):\n \"\"\"\n :return: returns a list of iso2codes for this site\n \"\"\"\n if not settings.USE_I18N:\n return [settings.LANGUAGE_CODE]\n languages = []\n for language in get_languages(site_id):\n languages.append(language['code'])\n return languages\n\n\ndef get_language_tuple(site_id=None):\n \"\"\"\n :return: returns an list of tuples like the old CMS_LANGUAGES or the LANGUAGES for this site\n \"\"\"\n languages = []\n for language in get_languages(site_id):\n languages.append((language['code'], language['name']))\n return languages\n\n\ndef get_language_dict(site_id=None):\n \"\"\"\n :return: returns an dict of cms languages\n \"\"\"\n languages = {}\n for language in get_languages(site_id):\n languages[language['code']] = language['name']\n return languages\n\n\ndef get_public_languages(site_id=None):\n \"\"\"\n :return: list of iso2codes of public languages for this site\n \"\"\"\n languages = []\n for language in get_language_objects(site_id):\n if language.get(\"public\", True):\n languages.append(language['code'])\n return languages\n\n\ndef get_language_object(language_code, site_id=None):\n \"\"\"\n :param language_code: RFC5646 language code\n :return: the language object filled up by defaults\n \"\"\"\n for language in get_languages(site_id):\n if language['code'] == get_language_code(language_code):\n return language\n raise LanguageError('Language not found: %s' % language_code)\n\n\ndef get_language_objects(site_id=None):\n \"\"\"\n returns list of all language objects filled up by default values\n \"\"\"\n return list(get_languages(site_id))\n\n\ndef get_default_language(language_code=None, site_id=None):\n \"\"\"\n Returns default language depending on settings.LANGUAGE_CODE merged with\n best match from get_cms_setting('LANGUAGES')\n\n Returns: language_code\n \"\"\"\n\n if not language_code:\n language_code = get_language_code(settings.LANGUAGE_CODE)\n\n languages = get_language_list(site_id)\n\n # first try if there is an exact language\n if language_code in languages:\n return language_code\n\n # otherwise split the language code if possible, so iso3\n language_code = language_code.split(\"-\")[0]\n\n if not language_code in languages:\n return settings.LANGUAGE_CODE\n\n return language_code\n\n\ndef get_fallback_languages(language, site_id=None):\n \"\"\"\n returns a list of fallback languages for the given language\n \"\"\"\n try:\n language = get_language_object(language, site_id)\n except LanguageError:\n language = get_languages(site_id)[0]\n return language.get('fallbacks', [])\n\n\ndef get_redirect_on_fallback(language, site_id=None):\n \"\"\"\n returns if you should redirect on language fallback\n :param language:\n :param site_id:\n :return: Boolean\n \"\"\"\n language = get_language_object(language, site_id)\n return language.get('redirect_on_fallback', True)\n\n\ndef hide_untranslated(language, site_id=None):\n \"\"\"\n Should untranslated pages in this language be hidden?\n :param language:\n :param site_id:\n :return: A Boolean\n \"\"\"\n obj = get_language_object(language, site_id)\n return obj.get('hide_untranslated', True)\n\n\ndef is_language_prefix_patterns_used():\n \"\"\"\n Returns `True` if the `LocaleRegexURLResolver` is used\n at root level of the urlpatterns, else it returns `False`.\n \"\"\"\n for url_pattern in get_resolver(None).url_patterns:\n if isinstance(url_pattern, LocaleRegexURLResolver):\n return True\n return False\n",
"path": "cms/utils/i18n.py"
}
] | diff --git a/cms/tests/i18n.py b/cms/tests/i18n.py
index de84f8f698d..3e7231cb34c 100644
--- a/cms/tests/i18n.py
+++ b/cms/tests/i18n.py
@@ -3,6 +3,7 @@
from cms.test_utils.util.context_managers import SettingsOverride
from cms.utils import i18n
from django.utils.importlib import import_module
+from cms.utils.i18n import get_fallback_languages
class TestLanguages(SettingsOverrideTestCase):
@@ -275,6 +276,39 @@ def test_get_languages_undefined_site(self):
self.assertEqual(lang['hide_untranslated'], False)
+class TestLanguagesNotInCMSLanguages(SettingsOverrideTestCase):
+ settings_overrides = {
+ 'LANGUAGE_CODE': 'en',
+ 'LANGUAGES': [
+ ('en', 'English'),
+ ('de', 'German'),
+ ('fr', 'French')
+ ],
+ 'CMS_LANGUAGES': {
+ 1: [
+ {
+ 'code': 'de',
+ 'name': 'German',
+ 'public': True,
+ },
+ {
+ 'code': 'fr',
+ 'name': 'French',
+ 'public': True
+ }
+ ],
+ 'default': {
+ 'fallbacks': ['de', 'fr'],
+ }
+ },
+ 'SITE_ID': 1,
+ }
+
+ def test_get_fallback_languages(self):
+ languages = get_fallback_languages('en', 1)
+ self.assertEqual(languages, ['de', 'fr'])
+
+
class TestLanguageFallbacks(SettingsOverrideTestCase):
settings_overrides = {
diff --git a/cms/utils/i18n.py b/cms/utils/i18n.py
index 08a24f50a11..1c4a0cde0a4 100644
--- a/cms/utils/i18n.py
+++ b/cms/utils/i18n.py
@@ -152,7 +152,10 @@ def get_fallback_languages(language, site_id=None):
"""
returns a list of fallback languages for the given language
"""
- language = get_language_object(language, site_id)
+ try:
+ language = get_language_object(language, site_id)
+ except LanguageError:
+ language = get_languages(site_id)[0]
return language.get('fallbacks', [])
|
searx__searx-2167 | Results links open in the same tab
How to set searx to open links in a new tab?
| [
{
"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\"\"\"Searx preferences implementation.\n\"\"\"\n\n# pylint: disable=useless-object-inheritance\n\nfrom base64 import urlsafe_b64encode, urlsafe_b64decode\nfrom zlib import compress, decompress\nfrom sys import version\n\nfrom searx import settings, autocomplete\nfrom searx.languages import language_codes as languages\nfrom searx.utils import match_language\nfrom searx.url_utils import parse_qs, urlencode\n\nif version[0] == '3':\n # pylint: disable=invalid-name\n unicode = str\n\n\nCOOKIE_MAX_AGE = 60 * 60 * 24 * 365 * 5 # 5 years\nLANGUAGE_CODES = [l[0] for l in languages]\nLANGUAGE_CODES.append('all')\nDISABLED = 0\nENABLED = 1\nDOI_RESOLVERS = list(settings['doi_resolvers'])\n\n\nclass MissingArgumentException(Exception):\n \"\"\"Exption from ``cls._post_init`` when a argument is missed.\n \"\"\"\n\n\nclass ValidationException(Exception):\n\n \"\"\"Exption from ``cls._post_init`` when configuration value is invalid.\n \"\"\"\n\n\nclass Setting(object):\n \"\"\"Base class of user settings\"\"\"\n\n def __init__(self, default_value, **kwargs):\n super().__init__()\n self.value = default_value\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n self._post_init()\n\n def _post_init(self):\n pass\n\n def parse(self, data):\n \"\"\"Parse ``data`` and store the result at ``self.value``\n\n If needed, its overwritten in the inheritance.\n \"\"\"\n self.value = data\n\n def get_value(self):\n \"\"\"Returns the value of the setting\n\n If needed, its overwritten in the inheritance.\n \"\"\"\n return self.value\n\n def save(self, name, resp):\n \"\"\"Save cookie ``name`` in the HTTP reponse obect\n\n If needed, its overwritten in the inheritance.\"\"\"\n resp.set_cookie(name, self.value, max_age=COOKIE_MAX_AGE)\n\n\nclass StringSetting(Setting):\n \"\"\"Setting of plain string values\"\"\"\n\n\nclass EnumStringSetting(Setting):\n \"\"\"Setting of a value which can only come from the given choices\"\"\"\n\n def _post_init(self):\n if not hasattr(self, 'choices'):\n raise MissingArgumentException('Missing argument: choices')\n self._validate_selection(self.value)\n\n def _validate_selection(self, selection):\n if selection not in self.choices: # pylint: disable=no-member\n raise ValidationException('Invalid value: \"{0}\"'.format(selection))\n\n def parse(self, data):\n \"\"\"Parse and validate ``data`` and store the result at ``self.value``\n \"\"\"\n self._validate_selection(data)\n self.value = data\n\n\nclass MultipleChoiceSetting(EnumStringSetting):\n \"\"\"Setting of values which can only come from the given choices\"\"\"\n\n def _validate_selections(self, selections):\n for item in selections:\n if item not in self.choices: # pylint: disable=no-member\n raise ValidationException('Invalid value: \"{0}\"'.format(selections))\n\n def _post_init(self):\n if not hasattr(self, 'choices'):\n raise MissingArgumentException('Missing argument: choices')\n self._validate_selections(self.value)\n\n def parse(self, data):\n \"\"\"Parse and validate ``data`` and store the result at ``self.value``\n \"\"\"\n if data == '':\n self.value = []\n return\n\n elements = data.split(',')\n self._validate_selections(elements)\n self.value = elements\n\n def parse_form(self, data): # pylint: disable=missing-function-docstring\n self.value = []\n for choice in data:\n if choice in self.choices and choice not in self.value: # pylint: disable=no-member\n self.value.append(choice)\n\n def save(self, name, resp):\n \"\"\"Save cookie ``name`` in the HTTP reponse obect\n \"\"\"\n resp.set_cookie(name, ','.join(self.value), max_age=COOKIE_MAX_AGE)\n\n\nclass SetSetting(Setting):\n \"\"\"Setting of values of type ``set`` (comma separated string) \"\"\"\n def _post_init(self):\n if not hasattr(self, 'values'):\n self.values = set()\n\n def get_value(self):\n \"\"\"Returns a string with comma separated values.\n \"\"\"\n return ','.join(self.values)\n\n def parse(self, data):\n \"\"\"Parse and validate ``data`` and store the result at ``self.value``\n \"\"\"\n if data == '':\n self.values = set() # pylint: disable=attribute-defined-outside-init\n return\n\n elements = data.split(',')\n for element in elements:\n self.values.add(element)\n\n def parse_form(self, data): # pylint: disable=missing-function-docstring\n elements = data.split(',')\n self.values = set(elements) # pylint: disable=attribute-defined-outside-init\n\n def save(self, name, resp):\n \"\"\"Save cookie ``name`` in the HTTP reponse obect\n \"\"\"\n resp.set_cookie(name, ','.join(self.values), max_age=COOKIE_MAX_AGE)\n\n\nclass SearchLanguageSetting(EnumStringSetting):\n \"\"\"Available choices may change, so user's value may not be in choices anymore\"\"\"\n\n def _validate_selection(self, selection):\n if selection != \"\" and not match_language(\n # pylint: disable=no-member\n selection, self.choices, fallback=None):\n raise ValidationException('Invalid language code: \"{0}\"'.format(selection))\n\n def parse(self, data):\n \"\"\"Parse and validate ``data`` and store the result at ``self.value``\n \"\"\"\n if data not in self.choices and data != self.value: # pylint: disable=no-member\n # hack to give some backwards compatibility with old language cookies\n data = str(data).replace('_', '-')\n lang = data.split('-')[0]\n # pylint: disable=no-member\n if data in self.choices:\n pass\n elif lang in self.choices:\n data = lang\n else:\n data = self.value\n self.value = data\n\n\nclass MapSetting(Setting):\n \"\"\"Setting of a value that has to be translated in order to be storable\"\"\"\n\n def _post_init(self):\n if not hasattr(self, 'map'):\n raise MissingArgumentException('missing argument: map')\n if self.value not in self.map.values(): # pylint: disable=no-member\n raise ValidationException('Invalid default value')\n\n def parse(self, data):\n \"\"\"Parse and validate ``data`` and store the result at ``self.value``\n \"\"\"\n # pylint: disable=no-member\n if data not in self.map:\n raise ValidationException('Invalid choice: {0}'.format(data))\n self.value = self.map[data]\n self.key = data # pylint: disable=attribute-defined-outside-init\n\n def save(self, name, resp):\n \"\"\"Save cookie ``name`` in the HTTP reponse obect\n \"\"\"\n if hasattr(self, 'key'):\n resp.set_cookie(name, self.key, max_age=COOKIE_MAX_AGE)\n\n\nclass SwitchableSetting(Setting):\n \"\"\" Base class for settings that can be turned on && off\"\"\"\n\n def _post_init(self):\n self.disabled = set()\n self.enabled = set()\n if not hasattr(self, 'choices'):\n raise MissingArgumentException('missing argument: choices')\n\n def transform_form_items(self, items): # pylint: disable=missing-function-docstring\n # pylint: disable=no-self-use\n return items\n\n def transform_values(self, values): # pylint: disable=missing-function-docstring\n # pylint: disable=no-self-use\n return values\n\n def parse_cookie(self, data): # pylint: disable=missing-function-docstring\n # pylint: disable=attribute-defined-outside-init\n if data[DISABLED] != '':\n self.disabled = set(data[DISABLED].split(','))\n if data[ENABLED] != '':\n self.enabled = set(data[ENABLED].split(','))\n\n def parse_form(self, items): # pylint: disable=missing-function-docstring\n items = self.transform_form_items(items)\n self.disabled = set() # pylint: disable=attribute-defined-outside-init\n self.enabled = set() # pylint: disable=attribute-defined-outside-init\n for choice in self.choices: # pylint: disable=no-member\n if choice['default_on']:\n if choice['id'] in items:\n self.disabled.add(choice['id'])\n else:\n if choice['id'] not in items:\n self.enabled.add(choice['id'])\n\n def save(self, resp): # pylint: disable=arguments-differ\n \"\"\"Save cookie in the HTTP reponse obect\n \"\"\"\n resp.set_cookie('disabled_{0}'.format(self.value), ','.join(self.disabled), max_age=COOKIE_MAX_AGE)\n resp.set_cookie('enabled_{0}'.format(self.value), ','.join(self.enabled), max_age=COOKIE_MAX_AGE)\n\n def get_disabled(self): # pylint: disable=missing-function-docstring\n disabled = self.disabled\n for choice in self.choices: # pylint: disable=no-member\n if not choice['default_on'] and choice['id'] not in self.enabled:\n disabled.add(choice['id'])\n return self.transform_values(disabled)\n\n def get_enabled(self): # pylint: disable=missing-function-docstring\n enabled = self.enabled\n for choice in self.choices: # pylint: disable=no-member\n if choice['default_on'] and choice['id'] not in self.disabled:\n enabled.add(choice['id'])\n return self.transform_values(enabled)\n\n\nclass EnginesSetting(SwitchableSetting):\n \"\"\"Engine settings\"\"\"\n\n def _post_init(self):\n super()._post_init()\n transformed_choices = []\n for engine_name, engine in self.choices.items(): # pylint: disable=no-member,access-member-before-definition\n for category in engine.categories:\n transformed_choice = dict()\n transformed_choice['default_on'] = not engine.disabled\n transformed_choice['id'] = '{}__{}'.format(engine_name, category)\n transformed_choices.append(transformed_choice)\n self.choices = transformed_choices\n\n def transform_form_items(self, items):\n return [item[len('engine_'):].replace('_', ' ').replace(' ', '__') for item in items]\n\n def transform_values(self, values):\n if len(values) == 1 and next(iter(values)) == '':\n return list()\n transformed_values = []\n for value in values:\n engine, category = value.split('__')\n transformed_values.append((engine, category))\n return transformed_values\n\n\nclass PluginsSetting(SwitchableSetting):\n \"\"\"Plugin settings\"\"\"\n\n def _post_init(self):\n super()._post_init()\n transformed_choices = []\n for plugin in self.choices: # pylint: disable=access-member-before-definition\n transformed_choice = dict()\n transformed_choice['default_on'] = plugin.default_on\n transformed_choice['id'] = plugin.id\n transformed_choices.append(transformed_choice)\n self.choices = transformed_choices\n\n def transform_form_items(self, items):\n return [item[len('plugin_'):] for item in items]\n\n\nclass Preferences(object):\n \"\"\"Validates and saves preferences to cookies\"\"\"\n\n def __init__(self, themes, categories, engines, plugins):\n super().__init__()\n\n self.key_value_settings = {\n 'categories': MultipleChoiceSetting(\n ['general'], choices=categories + ['none']\n ),\n 'language': SearchLanguageSetting(\n settings['search'].get('default_lang', ''),\n choices=list(LANGUAGE_CODES) + ['']\n ),\n 'locale': EnumStringSetting(\n settings['ui'].get('default_locale', ''),\n choices=list(settings['locales'].keys()) + ['']\n ),\n 'autocomplete': EnumStringSetting(\n settings['search'].get('autocomplete', ''),\n choices=list(autocomplete.backends.keys()) + ['']\n ),\n 'image_proxy': MapSetting(\n settings['server'].get('image_proxy', False),\n map={\n '': settings['server'].get('image_proxy', 0),\n '0': False,\n '1': True,\n 'True': True,\n 'False': False\n }\n ),\n 'method': EnumStringSetting(\n settings['server'].get('method', 'POST'),\n choices=('GET', 'POST')\n ),\n 'safesearch': MapSetting(\n settings['search'].get('safe_search', 0),\n map={\n '0': 0,\n '1': 1,\n '2': 2\n }\n ),\n 'theme': EnumStringSetting(\n settings['ui'].get('default_theme', 'oscar'),\n choices=themes\n ),\n 'results_on_new_tab': MapSetting(\n False,\n map={\n '0': False,\n '1': True,\n 'False': False,\n 'True': True\n }\n ),\n 'doi_resolver': MultipleChoiceSetting(\n ['oadoi.org'], choices=DOI_RESOLVERS\n ),\n 'oscar-style': EnumStringSetting(\n settings['ui'].get('theme_args', {}).get('oscar_style', 'logicodev'),\n choices=['', 'logicodev', 'logicodev-dark', 'pointhi']),\n }\n\n self.engines = EnginesSetting('engines', choices=engines)\n self.plugins = PluginsSetting('plugins', choices=plugins)\n self.tokens = SetSetting('tokens')\n self.unknown_params = {}\n\n def get_as_url_params(self):\n \"\"\"Return preferences as URL parameters\"\"\"\n settings_kv = {}\n for k, v in self.key_value_settings.items():\n if isinstance(v, MultipleChoiceSetting):\n settings_kv[k] = ','.join(v.get_value())\n else:\n settings_kv[k] = v.get_value()\n\n settings_kv['disabled_engines'] = ','.join(self.engines.disabled)\n settings_kv['enabled_engines'] = ','.join(self.engines.enabled)\n\n settings_kv['disabled_plugins'] = ','.join(self.plugins.disabled)\n settings_kv['enabled_plugins'] = ','.join(self.plugins.enabled)\n\n settings_kv['tokens'] = ','.join(self.tokens.values)\n\n return urlsafe_b64encode(compress(urlencode(settings_kv).encode('utf-8'))).decode('utf-8')\n\n def parse_encoded_data(self, input_data):\n \"\"\"parse (base64) preferences from request (``flask.request.form['preferences']``)\"\"\"\n decoded_data = decompress(urlsafe_b64decode(input_data.encode('utf-8')))\n dict_data = {}\n for x, y in parse_qs(decoded_data).items():\n dict_data[x.decode('utf8')] = y[0].decode('utf8')\n self.parse_dict(dict_data)\n\n def parse_dict(self, input_data):\n \"\"\"parse preferences from request (``flask.request.form``)\"\"\"\n for user_setting_name, user_setting in input_data.items():\n if user_setting_name in self.key_value_settings:\n self.key_value_settings[user_setting_name].parse(user_setting)\n elif user_setting_name == 'disabled_engines':\n self.engines.parse_cookie((input_data.get('disabled_engines', ''),\n input_data.get('enabled_engines', '')))\n elif user_setting_name == 'disabled_plugins':\n self.plugins.parse_cookie((input_data.get('disabled_plugins', ''),\n input_data.get('enabled_plugins', '')))\n elif user_setting_name == 'tokens':\n self.tokens.parse(user_setting)\n elif not any(user_setting_name.startswith(x) for x in [\n 'enabled_',\n 'disabled_',\n 'engine_',\n 'category_',\n 'plugin_']):\n self.unknown_params[user_setting_name] = user_setting\n\n def parse_form(self, input_data):\n \"\"\"Parse formular (``<input>``) data from a ``flask.request.form``\"\"\"\n disabled_engines = []\n enabled_categories = []\n disabled_plugins = []\n for user_setting_name, user_setting in input_data.items():\n if user_setting_name in self.key_value_settings:\n self.key_value_settings[user_setting_name].parse(user_setting)\n elif user_setting_name.startswith('engine_'):\n disabled_engines.append(user_setting_name)\n elif user_setting_name.startswith('category_'):\n enabled_categories.append(user_setting_name[len('category_'):])\n elif user_setting_name.startswith('plugin_'):\n disabled_plugins.append(user_setting_name)\n elif user_setting_name == 'tokens':\n self.tokens.parse_form(user_setting)\n else:\n self.unknown_params[user_setting_name] = user_setting\n self.key_value_settings['categories'].parse_form(enabled_categories)\n self.engines.parse_form(disabled_engines)\n self.plugins.parse_form(disabled_plugins)\n\n # cannot be used in case of engines or plugins\n def get_value(self, user_setting_name):\n \"\"\"Returns the value for ``user_setting_name``\n \"\"\"\n ret_val = None\n if user_setting_name in self.key_value_settings:\n ret_val = self.key_value_settings[user_setting_name].get_value()\n if user_setting_name in self.unknown_params:\n ret_val = self.unknown_params[user_setting_name]\n return ret_val\n\n def save(self, resp):\n \"\"\"Save cookie in the HTTP reponse obect\n \"\"\"\n for user_setting_name, user_setting in self.key_value_settings.items():\n user_setting.save(user_setting_name, resp)\n self.engines.save(resp)\n self.plugins.save(resp)\n self.tokens.save('tokens', resp)\n for k, v in self.unknown_params.items():\n resp.set_cookie(k, v, max_age=COOKIE_MAX_AGE)\n return resp\n\n def validate_token(self, engine): # pylint: disable=missing-function-docstring\n valid = True\n if hasattr(engine, 'tokens') and engine.tokens:\n valid = False\n for token in self.tokens.values:\n if token in engine.tokens:\n valid = True\n break\n\n return valid\n",
"path": "searx/preferences.py"
}
] | [
{
"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\"\"\"Searx preferences implementation.\n\"\"\"\n\n# pylint: disable=useless-object-inheritance\n\nfrom base64 import urlsafe_b64encode, urlsafe_b64decode\nfrom zlib import compress, decompress\nfrom sys import version\n\nfrom searx import settings, autocomplete\nfrom searx.languages import language_codes as languages\nfrom searx.utils import match_language\nfrom searx.url_utils import parse_qs, urlencode\n\nif version[0] == '3':\n # pylint: disable=invalid-name\n unicode = str\n\n\nCOOKIE_MAX_AGE = 60 * 60 * 24 * 365 * 5 # 5 years\nLANGUAGE_CODES = [l[0] for l in languages]\nLANGUAGE_CODES.append('all')\nDISABLED = 0\nENABLED = 1\nDOI_RESOLVERS = list(settings['doi_resolvers'])\n\n\nclass MissingArgumentException(Exception):\n \"\"\"Exption from ``cls._post_init`` when a argument is missed.\n \"\"\"\n\n\nclass ValidationException(Exception):\n\n \"\"\"Exption from ``cls._post_init`` when configuration value is invalid.\n \"\"\"\n\n\nclass Setting(object):\n \"\"\"Base class of user settings\"\"\"\n\n def __init__(self, default_value, **kwargs):\n super().__init__()\n self.value = default_value\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n self._post_init()\n\n def _post_init(self):\n pass\n\n def parse(self, data):\n \"\"\"Parse ``data`` and store the result at ``self.value``\n\n If needed, its overwritten in the inheritance.\n \"\"\"\n self.value = data\n\n def get_value(self):\n \"\"\"Returns the value of the setting\n\n If needed, its overwritten in the inheritance.\n \"\"\"\n return self.value\n\n def save(self, name, resp):\n \"\"\"Save cookie ``name`` in the HTTP reponse obect\n\n If needed, its overwritten in the inheritance.\"\"\"\n resp.set_cookie(name, self.value, max_age=COOKIE_MAX_AGE)\n\n\nclass StringSetting(Setting):\n \"\"\"Setting of plain string values\"\"\"\n\n\nclass EnumStringSetting(Setting):\n \"\"\"Setting of a value which can only come from the given choices\"\"\"\n\n def _post_init(self):\n if not hasattr(self, 'choices'):\n raise MissingArgumentException('Missing argument: choices')\n self._validate_selection(self.value)\n\n def _validate_selection(self, selection):\n if selection not in self.choices: # pylint: disable=no-member\n raise ValidationException('Invalid value: \"{0}\"'.format(selection))\n\n def parse(self, data):\n \"\"\"Parse and validate ``data`` and store the result at ``self.value``\n \"\"\"\n self._validate_selection(data)\n self.value = data\n\n\nclass MultipleChoiceSetting(EnumStringSetting):\n \"\"\"Setting of values which can only come from the given choices\"\"\"\n\n def _validate_selections(self, selections):\n for item in selections:\n if item not in self.choices: # pylint: disable=no-member\n raise ValidationException('Invalid value: \"{0}\"'.format(selections))\n\n def _post_init(self):\n if not hasattr(self, 'choices'):\n raise MissingArgumentException('Missing argument: choices')\n self._validate_selections(self.value)\n\n def parse(self, data):\n \"\"\"Parse and validate ``data`` and store the result at ``self.value``\n \"\"\"\n if data == '':\n self.value = []\n return\n\n elements = data.split(',')\n self._validate_selections(elements)\n self.value = elements\n\n def parse_form(self, data): # pylint: disable=missing-function-docstring\n self.value = []\n for choice in data:\n if choice in self.choices and choice not in self.value: # pylint: disable=no-member\n self.value.append(choice)\n\n def save(self, name, resp):\n \"\"\"Save cookie ``name`` in the HTTP reponse obect\n \"\"\"\n resp.set_cookie(name, ','.join(self.value), max_age=COOKIE_MAX_AGE)\n\n\nclass SetSetting(Setting):\n \"\"\"Setting of values of type ``set`` (comma separated string) \"\"\"\n def _post_init(self):\n if not hasattr(self, 'values'):\n self.values = set()\n\n def get_value(self):\n \"\"\"Returns a string with comma separated values.\n \"\"\"\n return ','.join(self.values)\n\n def parse(self, data):\n \"\"\"Parse and validate ``data`` and store the result at ``self.value``\n \"\"\"\n if data == '':\n self.values = set() # pylint: disable=attribute-defined-outside-init\n return\n\n elements = data.split(',')\n for element in elements:\n self.values.add(element)\n\n def parse_form(self, data): # pylint: disable=missing-function-docstring\n elements = data.split(',')\n self.values = set(elements) # pylint: disable=attribute-defined-outside-init\n\n def save(self, name, resp):\n \"\"\"Save cookie ``name`` in the HTTP reponse obect\n \"\"\"\n resp.set_cookie(name, ','.join(self.values), max_age=COOKIE_MAX_AGE)\n\n\nclass SearchLanguageSetting(EnumStringSetting):\n \"\"\"Available choices may change, so user's value may not be in choices anymore\"\"\"\n\n def _validate_selection(self, selection):\n if selection != \"\" and not match_language(\n # pylint: disable=no-member\n selection, self.choices, fallback=None):\n raise ValidationException('Invalid language code: \"{0}\"'.format(selection))\n\n def parse(self, data):\n \"\"\"Parse and validate ``data`` and store the result at ``self.value``\n \"\"\"\n if data not in self.choices and data != self.value: # pylint: disable=no-member\n # hack to give some backwards compatibility with old language cookies\n data = str(data).replace('_', '-')\n lang = data.split('-')[0]\n # pylint: disable=no-member\n if data in self.choices:\n pass\n elif lang in self.choices:\n data = lang\n else:\n data = self.value\n self.value = data\n\n\nclass MapSetting(Setting):\n \"\"\"Setting of a value that has to be translated in order to be storable\"\"\"\n\n def _post_init(self):\n if not hasattr(self, 'map'):\n raise MissingArgumentException('missing argument: map')\n if self.value not in self.map.values(): # pylint: disable=no-member\n raise ValidationException('Invalid default value')\n\n def parse(self, data):\n \"\"\"Parse and validate ``data`` and store the result at ``self.value``\n \"\"\"\n # pylint: disable=no-member\n if data not in self.map:\n raise ValidationException('Invalid choice: {0}'.format(data))\n self.value = self.map[data]\n self.key = data # pylint: disable=attribute-defined-outside-init\n\n def save(self, name, resp):\n \"\"\"Save cookie ``name`` in the HTTP reponse obect\n \"\"\"\n if hasattr(self, 'key'):\n resp.set_cookie(name, self.key, max_age=COOKIE_MAX_AGE)\n\n\nclass SwitchableSetting(Setting):\n \"\"\" Base class for settings that can be turned on && off\"\"\"\n\n def _post_init(self):\n self.disabled = set()\n self.enabled = set()\n if not hasattr(self, 'choices'):\n raise MissingArgumentException('missing argument: choices')\n\n def transform_form_items(self, items): # pylint: disable=missing-function-docstring\n # pylint: disable=no-self-use\n return items\n\n def transform_values(self, values): # pylint: disable=missing-function-docstring\n # pylint: disable=no-self-use\n return values\n\n def parse_cookie(self, data): # pylint: disable=missing-function-docstring\n # pylint: disable=attribute-defined-outside-init\n if data[DISABLED] != '':\n self.disabled = set(data[DISABLED].split(','))\n if data[ENABLED] != '':\n self.enabled = set(data[ENABLED].split(','))\n\n def parse_form(self, items): # pylint: disable=missing-function-docstring\n items = self.transform_form_items(items)\n self.disabled = set() # pylint: disable=attribute-defined-outside-init\n self.enabled = set() # pylint: disable=attribute-defined-outside-init\n for choice in self.choices: # pylint: disable=no-member\n if choice['default_on']:\n if choice['id'] in items:\n self.disabled.add(choice['id'])\n else:\n if choice['id'] not in items:\n self.enabled.add(choice['id'])\n\n def save(self, resp): # pylint: disable=arguments-differ\n \"\"\"Save cookie in the HTTP reponse obect\n \"\"\"\n resp.set_cookie('disabled_{0}'.format(self.value), ','.join(self.disabled), max_age=COOKIE_MAX_AGE)\n resp.set_cookie('enabled_{0}'.format(self.value), ','.join(self.enabled), max_age=COOKIE_MAX_AGE)\n\n def get_disabled(self): # pylint: disable=missing-function-docstring\n disabled = self.disabled\n for choice in self.choices: # pylint: disable=no-member\n if not choice['default_on'] and choice['id'] not in self.enabled:\n disabled.add(choice['id'])\n return self.transform_values(disabled)\n\n def get_enabled(self): # pylint: disable=missing-function-docstring\n enabled = self.enabled\n for choice in self.choices: # pylint: disable=no-member\n if choice['default_on'] and choice['id'] not in self.disabled:\n enabled.add(choice['id'])\n return self.transform_values(enabled)\n\n\nclass EnginesSetting(SwitchableSetting):\n \"\"\"Engine settings\"\"\"\n\n def _post_init(self):\n super()._post_init()\n transformed_choices = []\n for engine_name, engine in self.choices.items(): # pylint: disable=no-member,access-member-before-definition\n for category in engine.categories:\n transformed_choice = dict()\n transformed_choice['default_on'] = not engine.disabled\n transformed_choice['id'] = '{}__{}'.format(engine_name, category)\n transformed_choices.append(transformed_choice)\n self.choices = transformed_choices\n\n def transform_form_items(self, items):\n return [item[len('engine_'):].replace('_', ' ').replace(' ', '__') for item in items]\n\n def transform_values(self, values):\n if len(values) == 1 and next(iter(values)) == '':\n return list()\n transformed_values = []\n for value in values:\n engine, category = value.split('__')\n transformed_values.append((engine, category))\n return transformed_values\n\n\nclass PluginsSetting(SwitchableSetting):\n \"\"\"Plugin settings\"\"\"\n\n def _post_init(self):\n super()._post_init()\n transformed_choices = []\n for plugin in self.choices: # pylint: disable=access-member-before-definition\n transformed_choice = dict()\n transformed_choice['default_on'] = plugin.default_on\n transformed_choice['id'] = plugin.id\n transformed_choices.append(transformed_choice)\n self.choices = transformed_choices\n\n def transform_form_items(self, items):\n return [item[len('plugin_'):] for item in items]\n\n\nclass Preferences(object):\n \"\"\"Validates and saves preferences to cookies\"\"\"\n\n def __init__(self, themes, categories, engines, plugins):\n super().__init__()\n\n self.key_value_settings = {\n 'categories': MultipleChoiceSetting(\n ['general'], choices=categories + ['none']\n ),\n 'language': SearchLanguageSetting(\n settings['search'].get('default_lang', ''),\n choices=list(LANGUAGE_CODES) + ['']\n ),\n 'locale': EnumStringSetting(\n settings['ui'].get('default_locale', ''),\n choices=list(settings['locales'].keys()) + ['']\n ),\n 'autocomplete': EnumStringSetting(\n settings['search'].get('autocomplete', ''),\n choices=list(autocomplete.backends.keys()) + ['']\n ),\n 'image_proxy': MapSetting(\n settings['server'].get('image_proxy', False),\n map={\n '': settings['server'].get('image_proxy', 0),\n '0': False,\n '1': True,\n 'True': True,\n 'False': False\n }\n ),\n 'method': EnumStringSetting(\n settings['server'].get('method', 'POST'),\n choices=('GET', 'POST')\n ),\n 'safesearch': MapSetting(\n settings['search'].get('safe_search', 0),\n map={\n '0': 0,\n '1': 1,\n '2': 2\n }\n ),\n 'theme': EnumStringSetting(\n settings['ui'].get('default_theme', 'oscar'),\n choices=themes\n ),\n 'results_on_new_tab': MapSetting(\n settings['ui'].get('results_on_new_tab', False),\n map={\n '0': False,\n '1': True,\n 'False': False,\n 'True': True\n }\n ),\n 'doi_resolver': MultipleChoiceSetting(\n ['oadoi.org'], choices=DOI_RESOLVERS\n ),\n 'oscar-style': EnumStringSetting(\n settings['ui'].get('theme_args', {}).get('oscar_style', 'logicodev'),\n choices=['', 'logicodev', 'logicodev-dark', 'pointhi']),\n }\n\n self.engines = EnginesSetting('engines', choices=engines)\n self.plugins = PluginsSetting('plugins', choices=plugins)\n self.tokens = SetSetting('tokens')\n self.unknown_params = {}\n\n def get_as_url_params(self):\n \"\"\"Return preferences as URL parameters\"\"\"\n settings_kv = {}\n for k, v in self.key_value_settings.items():\n if isinstance(v, MultipleChoiceSetting):\n settings_kv[k] = ','.join(v.get_value())\n else:\n settings_kv[k] = v.get_value()\n\n settings_kv['disabled_engines'] = ','.join(self.engines.disabled)\n settings_kv['enabled_engines'] = ','.join(self.engines.enabled)\n\n settings_kv['disabled_plugins'] = ','.join(self.plugins.disabled)\n settings_kv['enabled_plugins'] = ','.join(self.plugins.enabled)\n\n settings_kv['tokens'] = ','.join(self.tokens.values)\n\n return urlsafe_b64encode(compress(urlencode(settings_kv).encode('utf-8'))).decode('utf-8')\n\n def parse_encoded_data(self, input_data):\n \"\"\"parse (base64) preferences from request (``flask.request.form['preferences']``)\"\"\"\n decoded_data = decompress(urlsafe_b64decode(input_data.encode('utf-8')))\n dict_data = {}\n for x, y in parse_qs(decoded_data).items():\n dict_data[x.decode('utf8')] = y[0].decode('utf8')\n self.parse_dict(dict_data)\n\n def parse_dict(self, input_data):\n \"\"\"parse preferences from request (``flask.request.form``)\"\"\"\n for user_setting_name, user_setting in input_data.items():\n if user_setting_name in self.key_value_settings:\n self.key_value_settings[user_setting_name].parse(user_setting)\n elif user_setting_name == 'disabled_engines':\n self.engines.parse_cookie((input_data.get('disabled_engines', ''),\n input_data.get('enabled_engines', '')))\n elif user_setting_name == 'disabled_plugins':\n self.plugins.parse_cookie((input_data.get('disabled_plugins', ''),\n input_data.get('enabled_plugins', '')))\n elif user_setting_name == 'tokens':\n self.tokens.parse(user_setting)\n elif not any(user_setting_name.startswith(x) for x in [\n 'enabled_',\n 'disabled_',\n 'engine_',\n 'category_',\n 'plugin_']):\n self.unknown_params[user_setting_name] = user_setting\n\n def parse_form(self, input_data):\n \"\"\"Parse formular (``<input>``) data from a ``flask.request.form``\"\"\"\n disabled_engines = []\n enabled_categories = []\n disabled_plugins = []\n for user_setting_name, user_setting in input_data.items():\n if user_setting_name in self.key_value_settings:\n self.key_value_settings[user_setting_name].parse(user_setting)\n elif user_setting_name.startswith('engine_'):\n disabled_engines.append(user_setting_name)\n elif user_setting_name.startswith('category_'):\n enabled_categories.append(user_setting_name[len('category_'):])\n elif user_setting_name.startswith('plugin_'):\n disabled_plugins.append(user_setting_name)\n elif user_setting_name == 'tokens':\n self.tokens.parse_form(user_setting)\n else:\n self.unknown_params[user_setting_name] = user_setting\n self.key_value_settings['categories'].parse_form(enabled_categories)\n self.engines.parse_form(disabled_engines)\n self.plugins.parse_form(disabled_plugins)\n\n # cannot be used in case of engines or plugins\n def get_value(self, user_setting_name):\n \"\"\"Returns the value for ``user_setting_name``\n \"\"\"\n ret_val = None\n if user_setting_name in self.key_value_settings:\n ret_val = self.key_value_settings[user_setting_name].get_value()\n if user_setting_name in self.unknown_params:\n ret_val = self.unknown_params[user_setting_name]\n return ret_val\n\n def save(self, resp):\n \"\"\"Save cookie in the HTTP reponse obect\n \"\"\"\n for user_setting_name, user_setting in self.key_value_settings.items():\n user_setting.save(user_setting_name, resp)\n self.engines.save(resp)\n self.plugins.save(resp)\n self.tokens.save('tokens', resp)\n for k, v in self.unknown_params.items():\n resp.set_cookie(k, v, max_age=COOKIE_MAX_AGE)\n return resp\n\n def validate_token(self, engine): # pylint: disable=missing-function-docstring\n valid = True\n if hasattr(engine, 'tokens') and engine.tokens:\n valid = False\n for token in self.tokens.values:\n if token in engine.tokens:\n valid = True\n break\n\n return valid\n",
"path": "searx/preferences.py"
}
] | diff --git a/searx/preferences.py b/searx/preferences.py
index cb33bc5aa6..82b8f5224d 100644
--- a/searx/preferences.py
+++ b/searx/preferences.py
@@ -364,7 +364,7 @@ def __init__(self, themes, categories, engines, plugins):
choices=themes
),
'results_on_new_tab': MapSetting(
- False,
+ settings['ui'].get('results_on_new_tab', False),
map={
'0': False,
'1': True,
diff --git a/searx/settings.yml b/searx/settings.yml
index 6f9afd1390..d6ea53177d 100644
--- a/searx/settings.yml
+++ b/searx/settings.yml
@@ -25,6 +25,7 @@ ui:
default_locale : "" # Default interface locale - leave blank to detect from browser information or use codes from the 'locales' config section
theme_args :
oscar_style : logicodev # default style of oscar
+# results_on_new_tab: False # Open result links in a new tab by default
# categories_order :
# - general
# - files
|
encode__django-rest-framework-722 | PrimaryKeyRelatedField with OneToOneField serializes wrong object's id
```
class A(Model):
pass
class B(Model):
a = OneToOneField('A')
class ASerializer(Serializer):
b_id = PrimaryKeyRelatedField(source='b', null=True)
```
Now when an `A` is serialized, it will not have `B`'s `id`, but its own. I believe this is due to [this erroneous line in PrimaryKeyRelatedField](https://github.com/tomchristie/django-rest-framework/blob/018298deb89628b39e1caeceecb414c1e27310da/rest_framework/relations.py#L238). Once I remove that line, the correct value for `b_id` will be serialized.
| [
{
"content": "from __future__ import unicode_literals\nfrom django.core.exceptions import ObjectDoesNotExist, ValidationError\nfrom django.core.urlresolvers import resolve, get_script_prefix, NoReverseMatch\nfrom django import forms\nfrom django.forms import widgets\nfrom django.forms.models import ModelChoiceIterator\nfrom django.utils.translation import ugettext_lazy as _\nfrom rest_framework.fields import Field, WritableField, get_component\nfrom rest_framework.reverse import reverse\nfrom rest_framework.compat import urlparse\nfrom rest_framework.compat import smart_text\nimport warnings\n\n\n##### Relational fields #####\n\n\n# Not actually Writable, but subclasses may need to be.\nclass RelatedField(WritableField):\n \"\"\"\n Base class for related model fields.\n\n This represents a relationship using the unicode representation of the target.\n \"\"\"\n widget = widgets.Select\n many_widget = widgets.SelectMultiple\n form_field_class = forms.ChoiceField\n many_form_field_class = forms.MultipleChoiceField\n\n cache_choices = False\n empty_label = None\n read_only = True\n many = False\n\n def __init__(self, *args, **kwargs):\n\n # 'null' is to be deprecated in favor of 'required'\n if 'null' in kwargs:\n warnings.warn('The `null` keyword argument is due to be deprecated. '\n 'Use the `required` keyword argument instead.',\n PendingDeprecationWarning, stacklevel=2)\n kwargs['required'] = not kwargs.pop('null')\n\n self.queryset = kwargs.pop('queryset', None)\n self.many = kwargs.pop('many', self.many)\n if self.many:\n self.widget = self.many_widget\n self.form_field_class = self.many_form_field_class\n\n kwargs['read_only'] = kwargs.pop('read_only', self.read_only)\n super(RelatedField, self).__init__(*args, **kwargs)\n\n def initialize(self, parent, field_name):\n super(RelatedField, self).initialize(parent, field_name)\n if self.queryset is None and not self.read_only:\n try:\n manager = getattr(self.parent.opts.model, self.source or field_name)\n if hasattr(manager, 'related'): # Forward\n self.queryset = manager.related.model._default_manager.all()\n else: # Reverse\n self.queryset = manager.field.rel.to._default_manager.all()\n except Exception:\n raise\n msg = ('Serializer related fields must include a `queryset`' +\n ' argument or set `read_only=True')\n raise Exception(msg)\n\n ### We need this stuff to make form choices work...\n\n def prepare_value(self, obj):\n return self.to_native(obj)\n\n def label_from_instance(self, obj):\n \"\"\"\n Return a readable representation for use with eg. select widgets.\n \"\"\"\n desc = smart_text(obj)\n ident = smart_text(self.to_native(obj))\n if desc == ident:\n return desc\n return \"%s - %s\" % (desc, ident)\n\n def _get_queryset(self):\n return self._queryset\n\n def _set_queryset(self, queryset):\n self._queryset = queryset\n self.widget.choices = self.choices\n\n queryset = property(_get_queryset, _set_queryset)\n\n def _get_choices(self):\n # If self._choices is set, then somebody must have manually set\n # the property self.choices. In this case, just return self._choices.\n if hasattr(self, '_choices'):\n return self._choices\n\n # Otherwise, execute the QuerySet in self.queryset to determine the\n # choices dynamically. Return a fresh ModelChoiceIterator that has not been\n # consumed. Note that we're instantiating a new ModelChoiceIterator *each*\n # time _get_choices() is called (and, thus, each time self.choices is\n # accessed) so that we can ensure the QuerySet has not been consumed. This\n # construct might look complicated but it allows for lazy evaluation of\n # the queryset.\n return ModelChoiceIterator(self)\n\n def _set_choices(self, value):\n # Setting choices also sets the choices on the widget.\n # choices can be any iterable, but we call list() on it because\n # it will be consumed more than once.\n self._choices = self.widget.choices = list(value)\n\n choices = property(_get_choices, _set_choices)\n\n ### Regular serializer stuff...\n\n def field_to_native(self, obj, field_name):\n try:\n if self.source == '*':\n return self.to_native(obj)\n\n source = self.source or field_name\n value = obj\n\n for component in source.split('.'):\n value = get_component(value, component)\n if value is None:\n break\n except ObjectDoesNotExist:\n return None\n\n if value is None:\n return None\n\n if self.many:\n return [self.to_native(item) for item in value.all()]\n return self.to_native(value)\n\n def field_from_native(self, data, files, field_name, into):\n if self.read_only:\n return\n\n try:\n if self.many:\n try:\n # Form data\n value = data.getlist(field_name)\n if value == [''] or value == []:\n raise KeyError\n except AttributeError:\n # Non-form data\n value = data[field_name]\n else:\n value = data[field_name]\n except KeyError:\n if self.partial:\n return\n value = [] if self.many else None\n\n if value in (None, '') and self.required:\n raise ValidationError(self.error_messages['required'])\n elif value in (None, ''):\n into[(self.source or field_name)] = None\n elif self.many:\n into[(self.source or field_name)] = [self.from_native(item) for item in value]\n else:\n into[(self.source or field_name)] = self.from_native(value)\n\n\n### PrimaryKey relationships\n\nclass PrimaryKeyRelatedField(RelatedField):\n \"\"\"\n Represents a relationship as a pk value.\n \"\"\"\n read_only = False\n\n default_error_messages = {\n 'does_not_exist': _(\"Invalid pk '%s' - object does not exist.\"),\n 'incorrect_type': _('Incorrect type. Expected pk value, received %s.'),\n }\n\n # TODO: Remove these field hacks...\n def prepare_value(self, obj):\n return self.to_native(obj.pk)\n\n def label_from_instance(self, obj):\n \"\"\"\n Return a readable representation for use with eg. select widgets.\n \"\"\"\n desc = smart_text(obj)\n ident = smart_text(self.to_native(obj.pk))\n if desc == ident:\n return desc\n return \"%s - %s\" % (desc, ident)\n\n # TODO: Possibly change this to just take `obj`, through prob less performant\n def to_native(self, pk):\n return pk\n\n def from_native(self, data):\n if self.queryset is None:\n raise Exception('Writable related fields must include a `queryset` argument')\n\n try:\n return self.queryset.get(pk=data)\n except ObjectDoesNotExist:\n msg = self.error_messages['does_not_exist'] % smart_text(data)\n raise ValidationError(msg)\n except (TypeError, ValueError):\n received = type(data).__name__\n msg = self.error_messages['incorrect_type'] % received\n raise ValidationError(msg)\n\n def field_to_native(self, obj, field_name):\n if self.many:\n # To-many relationship\n try:\n # Prefer obj.serializable_value for performance reasons\n queryset = obj.serializable_value(self.source or field_name)\n except AttributeError:\n # RelatedManager (reverse relationship)\n queryset = getattr(obj, self.source or field_name)\n\n # Forward relationship\n return [self.to_native(item.pk) for item in queryset.all()]\n\n # To-one relationship\n try:\n # Prefer obj.serializable_value for performance reasons\n pk = obj.serializable_value(self.source or field_name)\n except AttributeError:\n # RelatedObject (reverse relationship)\n try:\n pk = getattr(obj, self.source or field_name).pk\n except ObjectDoesNotExist:\n return None\n return self.to_native(obj.pk)\n\n # Forward relationship\n return self.to_native(pk)\n\n\n### Slug relationships\n\n\nclass SlugRelatedField(RelatedField):\n \"\"\"\n Represents a relationship using a unique field on the target.\n \"\"\"\n read_only = False\n\n default_error_messages = {\n 'does_not_exist': _(\"Object with %s=%s does not exist.\"),\n 'invalid': _('Invalid value.'),\n }\n\n def __init__(self, *args, **kwargs):\n self.slug_field = kwargs.pop('slug_field', None)\n assert self.slug_field, 'slug_field is required'\n super(SlugRelatedField, self).__init__(*args, **kwargs)\n\n def to_native(self, obj):\n return getattr(obj, self.slug_field)\n\n def from_native(self, data):\n if self.queryset is None:\n raise Exception('Writable related fields must include a `queryset` argument')\n\n try:\n return self.queryset.get(**{self.slug_field: data})\n except ObjectDoesNotExist:\n raise ValidationError(self.error_messages['does_not_exist'] %\n (self.slug_field, smart_text(data)))\n except (TypeError, ValueError):\n msg = self.error_messages['invalid']\n raise ValidationError(msg)\n\n\n### Hyperlinked relationships\n\nclass HyperlinkedRelatedField(RelatedField):\n \"\"\"\n Represents a relationship using hyperlinking.\n \"\"\"\n pk_url_kwarg = 'pk'\n slug_field = 'slug'\n slug_url_kwarg = None # Defaults to same as `slug_field` unless overridden\n read_only = False\n\n default_error_messages = {\n 'no_match': _('Invalid hyperlink - No URL match'),\n 'incorrect_match': _('Invalid hyperlink - Incorrect URL match'),\n 'configuration_error': _('Invalid hyperlink due to configuration error'),\n 'does_not_exist': _(\"Invalid hyperlink - object does not exist.\"),\n 'incorrect_type': _('Incorrect type. Expected url string, received %s.'),\n }\n\n def __init__(self, *args, **kwargs):\n try:\n self.view_name = kwargs.pop('view_name')\n except KeyError:\n raise ValueError(\"Hyperlinked field requires 'view_name' kwarg\")\n\n self.slug_field = kwargs.pop('slug_field', self.slug_field)\n default_slug_kwarg = self.slug_url_kwarg or self.slug_field\n self.pk_url_kwarg = kwargs.pop('pk_url_kwarg', self.pk_url_kwarg)\n self.slug_url_kwarg = kwargs.pop('slug_url_kwarg', default_slug_kwarg)\n\n self.format = kwargs.pop('format', None)\n super(HyperlinkedRelatedField, self).__init__(*args, **kwargs)\n\n def get_slug_field(self):\n \"\"\"\n Get the name of a slug field to be used to look up by slug.\n \"\"\"\n return self.slug_field\n\n def to_native(self, obj):\n view_name = self.view_name\n request = self.context.get('request', None)\n format = self.format or self.context.get('format', None)\n\n if request is None:\n warnings.warn(\"Using `HyperlinkedRelatedField` without including the \"\n \"request in the serializer context is due to be deprecated. \"\n \"Add `context={'request': request}` when instantiating the serializer.\",\n PendingDeprecationWarning, stacklevel=4)\n\n pk = getattr(obj, 'pk', None)\n if pk is None:\n return\n kwargs = {self.pk_url_kwarg: pk}\n try:\n return reverse(view_name, kwargs=kwargs, request=request, format=format)\n except NoReverseMatch:\n pass\n\n slug = getattr(obj, self.slug_field, None)\n\n if not slug:\n raise Exception('Could not resolve URL for field using view name \"%s\"' % view_name)\n\n kwargs = {self.slug_url_kwarg: slug}\n try:\n return reverse(view_name, kwargs=kwargs, request=request, format=format)\n except NoReverseMatch:\n pass\n\n kwargs = {self.pk_url_kwarg: obj.pk, self.slug_url_kwarg: slug}\n try:\n return reverse(view_name, kwargs=kwargs, request=request, format=format)\n except NoReverseMatch:\n pass\n\n raise Exception('Could not resolve URL for field using view name \"%s\"' % view_name)\n\n def from_native(self, value):\n # Convert URL -> model instance pk\n # TODO: Use values_list\n if self.queryset is None:\n raise Exception('Writable related fields must include a `queryset` argument')\n\n try:\n http_prefix = value.startswith('http:') or value.startswith('https:')\n except AttributeError:\n msg = self.error_messages['incorrect_type']\n raise ValidationError(msg % type(value).__name__)\n\n if http_prefix:\n # If needed convert absolute URLs to relative path\n value = urlparse.urlparse(value).path\n prefix = get_script_prefix()\n if value.startswith(prefix):\n value = '/' + value[len(prefix):]\n\n try:\n match = resolve(value)\n except Exception:\n raise ValidationError(self.error_messages['no_match'])\n\n if match.view_name != self.view_name:\n raise ValidationError(self.error_messages['incorrect_match'])\n\n pk = match.kwargs.get(self.pk_url_kwarg, None)\n slug = match.kwargs.get(self.slug_url_kwarg, None)\n\n # Try explicit primary key.\n if pk is not None:\n queryset = self.queryset.filter(pk=pk)\n # Next, try looking up by slug.\n elif slug is not None:\n slug_field = self.get_slug_field()\n queryset = self.queryset.filter(**{slug_field: slug})\n # If none of those are defined, it's probably a configuation error.\n else:\n raise ValidationError(self.error_messages['configuration_error'])\n\n try:\n obj = queryset.get()\n except ObjectDoesNotExist:\n raise ValidationError(self.error_messages['does_not_exist'])\n except (TypeError, ValueError):\n msg = self.error_messages['incorrect_type']\n raise ValidationError(msg % type(value).__name__)\n\n return obj\n\n\nclass HyperlinkedIdentityField(Field):\n \"\"\"\n Represents the instance, or a property on the instance, using hyperlinking.\n \"\"\"\n pk_url_kwarg = 'pk'\n slug_field = 'slug'\n slug_url_kwarg = None # Defaults to same as `slug_field` unless overridden\n read_only = True\n\n def __init__(self, *args, **kwargs):\n # TODO: Make view_name mandatory, and have the\n # HyperlinkedModelSerializer set it on-the-fly\n self.view_name = kwargs.pop('view_name', None)\n # Optionally the format of the target hyperlink may be specified\n self.format = kwargs.pop('format', None)\n\n self.slug_field = kwargs.pop('slug_field', self.slug_field)\n default_slug_kwarg = self.slug_url_kwarg or self.slug_field\n self.pk_url_kwarg = kwargs.pop('pk_url_kwarg', self.pk_url_kwarg)\n self.slug_url_kwarg = kwargs.pop('slug_url_kwarg', default_slug_kwarg)\n\n super(HyperlinkedIdentityField, self).__init__(*args, **kwargs)\n\n def field_to_native(self, obj, field_name):\n request = self.context.get('request', None)\n format = self.context.get('format', None)\n view_name = self.view_name or self.parent.opts.view_name\n kwargs = {self.pk_url_kwarg: obj.pk}\n\n if request is None:\n warnings.warn(\"Using `HyperlinkedIdentityField` without including the \"\n \"request in the serializer context is due to be deprecated. \"\n \"Add `context={'request': request}` when instantiating the serializer.\",\n PendingDeprecationWarning, stacklevel=4)\n\n # By default use whatever format is given for the current context\n # unless the target is a different type to the source.\n #\n # Eg. Consider a HyperlinkedIdentityField pointing from a json\n # representation to an html property of that representation...\n #\n # '/snippets/1/' should link to '/snippets/1/highlight/'\n # ...but...\n # '/snippets/1/.json' should link to '/snippets/1/highlight/.html'\n if format and self.format and self.format != format:\n format = self.format\n\n try:\n return reverse(view_name, kwargs=kwargs, request=request, format=format)\n except NoReverseMatch:\n pass\n\n slug = getattr(obj, self.slug_field, None)\n\n if not slug:\n raise Exception('Could not resolve URL for field using view name \"%s\"' % view_name)\n\n kwargs = {self.slug_url_kwarg: slug}\n try:\n return reverse(view_name, kwargs=kwargs, request=request, format=format)\n except NoReverseMatch:\n pass\n\n kwargs = {self.pk_url_kwarg: obj.pk, self.slug_url_kwarg: slug}\n try:\n return reverse(view_name, kwargs=kwargs, request=request, format=format)\n except NoReverseMatch:\n pass\n\n raise Exception('Could not resolve URL for field using view name \"%s\"' % view_name)\n\n\n### Old-style many classes for backwards compat\n\nclass ManyRelatedField(RelatedField):\n def __init__(self, *args, **kwargs):\n warnings.warn('`ManyRelatedField()` is due to be deprecated. '\n 'Use `RelatedField(many=True)` instead.',\n PendingDeprecationWarning, stacklevel=2)\n kwargs['many'] = True\n super(ManyRelatedField, self).__init__(*args, **kwargs)\n\n\nclass ManyPrimaryKeyRelatedField(PrimaryKeyRelatedField):\n def __init__(self, *args, **kwargs):\n warnings.warn('`ManyPrimaryKeyRelatedField()` is due to be deprecated. '\n 'Use `PrimaryKeyRelatedField(many=True)` instead.',\n PendingDeprecationWarning, stacklevel=2)\n kwargs['many'] = True\n super(ManyPrimaryKeyRelatedField, self).__init__(*args, **kwargs)\n\n\nclass ManySlugRelatedField(SlugRelatedField):\n def __init__(self, *args, **kwargs):\n warnings.warn('`ManySlugRelatedField()` is due to be deprecated. '\n 'Use `SlugRelatedField(many=True)` instead.',\n PendingDeprecationWarning, stacklevel=2)\n kwargs['many'] = True\n super(ManySlugRelatedField, self).__init__(*args, **kwargs)\n\n\nclass ManyHyperlinkedRelatedField(HyperlinkedRelatedField):\n def __init__(self, *args, **kwargs):\n warnings.warn('`ManyHyperlinkedRelatedField()` is due to be deprecated. '\n 'Use `HyperlinkedRelatedField(many=True)` instead.',\n PendingDeprecationWarning, stacklevel=2)\n kwargs['many'] = True\n super(ManyHyperlinkedRelatedField, self).__init__(*args, **kwargs)\n",
"path": "rest_framework/relations.py"
}
] | [
{
"content": "from __future__ import unicode_literals\nfrom django.core.exceptions import ObjectDoesNotExist, ValidationError\nfrom django.core.urlresolvers import resolve, get_script_prefix, NoReverseMatch\nfrom django import forms\nfrom django.forms import widgets\nfrom django.forms.models import ModelChoiceIterator\nfrom django.utils.translation import ugettext_lazy as _\nfrom rest_framework.fields import Field, WritableField, get_component\nfrom rest_framework.reverse import reverse\nfrom rest_framework.compat import urlparse\nfrom rest_framework.compat import smart_text\nimport warnings\n\n\n##### Relational fields #####\n\n\n# Not actually Writable, but subclasses may need to be.\nclass RelatedField(WritableField):\n \"\"\"\n Base class for related model fields.\n\n This represents a relationship using the unicode representation of the target.\n \"\"\"\n widget = widgets.Select\n many_widget = widgets.SelectMultiple\n form_field_class = forms.ChoiceField\n many_form_field_class = forms.MultipleChoiceField\n\n cache_choices = False\n empty_label = None\n read_only = True\n many = False\n\n def __init__(self, *args, **kwargs):\n\n # 'null' is to be deprecated in favor of 'required'\n if 'null' in kwargs:\n warnings.warn('The `null` keyword argument is due to be deprecated. '\n 'Use the `required` keyword argument instead.',\n PendingDeprecationWarning, stacklevel=2)\n kwargs['required'] = not kwargs.pop('null')\n\n self.queryset = kwargs.pop('queryset', None)\n self.many = kwargs.pop('many', self.many)\n if self.many:\n self.widget = self.many_widget\n self.form_field_class = self.many_form_field_class\n\n kwargs['read_only'] = kwargs.pop('read_only', self.read_only)\n super(RelatedField, self).__init__(*args, **kwargs)\n\n def initialize(self, parent, field_name):\n super(RelatedField, self).initialize(parent, field_name)\n if self.queryset is None and not self.read_only:\n try:\n manager = getattr(self.parent.opts.model, self.source or field_name)\n if hasattr(manager, 'related'): # Forward\n self.queryset = manager.related.model._default_manager.all()\n else: # Reverse\n self.queryset = manager.field.rel.to._default_manager.all()\n except Exception:\n raise\n msg = ('Serializer related fields must include a `queryset`' +\n ' argument or set `read_only=True')\n raise Exception(msg)\n\n ### We need this stuff to make form choices work...\n\n def prepare_value(self, obj):\n return self.to_native(obj)\n\n def label_from_instance(self, obj):\n \"\"\"\n Return a readable representation for use with eg. select widgets.\n \"\"\"\n desc = smart_text(obj)\n ident = smart_text(self.to_native(obj))\n if desc == ident:\n return desc\n return \"%s - %s\" % (desc, ident)\n\n def _get_queryset(self):\n return self._queryset\n\n def _set_queryset(self, queryset):\n self._queryset = queryset\n self.widget.choices = self.choices\n\n queryset = property(_get_queryset, _set_queryset)\n\n def _get_choices(self):\n # If self._choices is set, then somebody must have manually set\n # the property self.choices. In this case, just return self._choices.\n if hasattr(self, '_choices'):\n return self._choices\n\n # Otherwise, execute the QuerySet in self.queryset to determine the\n # choices dynamically. Return a fresh ModelChoiceIterator that has not been\n # consumed. Note that we're instantiating a new ModelChoiceIterator *each*\n # time _get_choices() is called (and, thus, each time self.choices is\n # accessed) so that we can ensure the QuerySet has not been consumed. This\n # construct might look complicated but it allows for lazy evaluation of\n # the queryset.\n return ModelChoiceIterator(self)\n\n def _set_choices(self, value):\n # Setting choices also sets the choices on the widget.\n # choices can be any iterable, but we call list() on it because\n # it will be consumed more than once.\n self._choices = self.widget.choices = list(value)\n\n choices = property(_get_choices, _set_choices)\n\n ### Regular serializer stuff...\n\n def field_to_native(self, obj, field_name):\n try:\n if self.source == '*':\n return self.to_native(obj)\n\n source = self.source or field_name\n value = obj\n\n for component in source.split('.'):\n value = get_component(value, component)\n if value is None:\n break\n except ObjectDoesNotExist:\n return None\n\n if value is None:\n return None\n\n if self.many:\n return [self.to_native(item) for item in value.all()]\n return self.to_native(value)\n\n def field_from_native(self, data, files, field_name, into):\n if self.read_only:\n return\n\n try:\n if self.many:\n try:\n # Form data\n value = data.getlist(field_name)\n if value == [''] or value == []:\n raise KeyError\n except AttributeError:\n # Non-form data\n value = data[field_name]\n else:\n value = data[field_name]\n except KeyError:\n if self.partial:\n return\n value = [] if self.many else None\n\n if value in (None, '') and self.required:\n raise ValidationError(self.error_messages['required'])\n elif value in (None, ''):\n into[(self.source or field_name)] = None\n elif self.many:\n into[(self.source or field_name)] = [self.from_native(item) for item in value]\n else:\n into[(self.source or field_name)] = self.from_native(value)\n\n\n### PrimaryKey relationships\n\nclass PrimaryKeyRelatedField(RelatedField):\n \"\"\"\n Represents a relationship as a pk value.\n \"\"\"\n read_only = False\n\n default_error_messages = {\n 'does_not_exist': _(\"Invalid pk '%s' - object does not exist.\"),\n 'incorrect_type': _('Incorrect type. Expected pk value, received %s.'),\n }\n\n # TODO: Remove these field hacks...\n def prepare_value(self, obj):\n return self.to_native(obj.pk)\n\n def label_from_instance(self, obj):\n \"\"\"\n Return a readable representation for use with eg. select widgets.\n \"\"\"\n desc = smart_text(obj)\n ident = smart_text(self.to_native(obj.pk))\n if desc == ident:\n return desc\n return \"%s - %s\" % (desc, ident)\n\n # TODO: Possibly change this to just take `obj`, through prob less performant\n def to_native(self, pk):\n return pk\n\n def from_native(self, data):\n if self.queryset is None:\n raise Exception('Writable related fields must include a `queryset` argument')\n\n try:\n return self.queryset.get(pk=data)\n except ObjectDoesNotExist:\n msg = self.error_messages['does_not_exist'] % smart_text(data)\n raise ValidationError(msg)\n except (TypeError, ValueError):\n received = type(data).__name__\n msg = self.error_messages['incorrect_type'] % received\n raise ValidationError(msg)\n\n def field_to_native(self, obj, field_name):\n if self.many:\n # To-many relationship\n try:\n # Prefer obj.serializable_value for performance reasons\n queryset = obj.serializable_value(self.source or field_name)\n except AttributeError:\n # RelatedManager (reverse relationship)\n queryset = getattr(obj, self.source or field_name)\n\n # Forward relationship\n return [self.to_native(item.pk) for item in queryset.all()]\n\n # To-one relationship\n try:\n # Prefer obj.serializable_value for performance reasons\n pk = obj.serializable_value(self.source or field_name)\n except AttributeError:\n # RelatedObject (reverse relationship)\n try:\n pk = getattr(obj, self.source or field_name).pk\n except ObjectDoesNotExist:\n return None\n\n # Forward relationship\n return self.to_native(pk)\n\n\n### Slug relationships\n\n\nclass SlugRelatedField(RelatedField):\n \"\"\"\n Represents a relationship using a unique field on the target.\n \"\"\"\n read_only = False\n\n default_error_messages = {\n 'does_not_exist': _(\"Object with %s=%s does not exist.\"),\n 'invalid': _('Invalid value.'),\n }\n\n def __init__(self, *args, **kwargs):\n self.slug_field = kwargs.pop('slug_field', None)\n assert self.slug_field, 'slug_field is required'\n super(SlugRelatedField, self).__init__(*args, **kwargs)\n\n def to_native(self, obj):\n return getattr(obj, self.slug_field)\n\n def from_native(self, data):\n if self.queryset is None:\n raise Exception('Writable related fields must include a `queryset` argument')\n\n try:\n return self.queryset.get(**{self.slug_field: data})\n except ObjectDoesNotExist:\n raise ValidationError(self.error_messages['does_not_exist'] %\n (self.slug_field, smart_text(data)))\n except (TypeError, ValueError):\n msg = self.error_messages['invalid']\n raise ValidationError(msg)\n\n\n### Hyperlinked relationships\n\nclass HyperlinkedRelatedField(RelatedField):\n \"\"\"\n Represents a relationship using hyperlinking.\n \"\"\"\n pk_url_kwarg = 'pk'\n slug_field = 'slug'\n slug_url_kwarg = None # Defaults to same as `slug_field` unless overridden\n read_only = False\n\n default_error_messages = {\n 'no_match': _('Invalid hyperlink - No URL match'),\n 'incorrect_match': _('Invalid hyperlink - Incorrect URL match'),\n 'configuration_error': _('Invalid hyperlink due to configuration error'),\n 'does_not_exist': _(\"Invalid hyperlink - object does not exist.\"),\n 'incorrect_type': _('Incorrect type. Expected url string, received %s.'),\n }\n\n def __init__(self, *args, **kwargs):\n try:\n self.view_name = kwargs.pop('view_name')\n except KeyError:\n raise ValueError(\"Hyperlinked field requires 'view_name' kwarg\")\n\n self.slug_field = kwargs.pop('slug_field', self.slug_field)\n default_slug_kwarg = self.slug_url_kwarg or self.slug_field\n self.pk_url_kwarg = kwargs.pop('pk_url_kwarg', self.pk_url_kwarg)\n self.slug_url_kwarg = kwargs.pop('slug_url_kwarg', default_slug_kwarg)\n\n self.format = kwargs.pop('format', None)\n super(HyperlinkedRelatedField, self).__init__(*args, **kwargs)\n\n def get_slug_field(self):\n \"\"\"\n Get the name of a slug field to be used to look up by slug.\n \"\"\"\n return self.slug_field\n\n def to_native(self, obj):\n view_name = self.view_name\n request = self.context.get('request', None)\n format = self.format or self.context.get('format', None)\n\n if request is None:\n warnings.warn(\"Using `HyperlinkedRelatedField` without including the \"\n \"request in the serializer context is due to be deprecated. \"\n \"Add `context={'request': request}` when instantiating the serializer.\",\n PendingDeprecationWarning, stacklevel=4)\n\n pk = getattr(obj, 'pk', None)\n if pk is None:\n return\n kwargs = {self.pk_url_kwarg: pk}\n try:\n return reverse(view_name, kwargs=kwargs, request=request, format=format)\n except NoReverseMatch:\n pass\n\n slug = getattr(obj, self.slug_field, None)\n\n if not slug:\n raise Exception('Could not resolve URL for field using view name \"%s\"' % view_name)\n\n kwargs = {self.slug_url_kwarg: slug}\n try:\n return reverse(view_name, kwargs=kwargs, request=request, format=format)\n except NoReverseMatch:\n pass\n\n kwargs = {self.pk_url_kwarg: obj.pk, self.slug_url_kwarg: slug}\n try:\n return reverse(view_name, kwargs=kwargs, request=request, format=format)\n except NoReverseMatch:\n pass\n\n raise Exception('Could not resolve URL for field using view name \"%s\"' % view_name)\n\n def from_native(self, value):\n # Convert URL -> model instance pk\n # TODO: Use values_list\n if self.queryset is None:\n raise Exception('Writable related fields must include a `queryset` argument')\n\n try:\n http_prefix = value.startswith('http:') or value.startswith('https:')\n except AttributeError:\n msg = self.error_messages['incorrect_type']\n raise ValidationError(msg % type(value).__name__)\n\n if http_prefix:\n # If needed convert absolute URLs to relative path\n value = urlparse.urlparse(value).path\n prefix = get_script_prefix()\n if value.startswith(prefix):\n value = '/' + value[len(prefix):]\n\n try:\n match = resolve(value)\n except Exception:\n raise ValidationError(self.error_messages['no_match'])\n\n if match.view_name != self.view_name:\n raise ValidationError(self.error_messages['incorrect_match'])\n\n pk = match.kwargs.get(self.pk_url_kwarg, None)\n slug = match.kwargs.get(self.slug_url_kwarg, None)\n\n # Try explicit primary key.\n if pk is not None:\n queryset = self.queryset.filter(pk=pk)\n # Next, try looking up by slug.\n elif slug is not None:\n slug_field = self.get_slug_field()\n queryset = self.queryset.filter(**{slug_field: slug})\n # If none of those are defined, it's probably a configuation error.\n else:\n raise ValidationError(self.error_messages['configuration_error'])\n\n try:\n obj = queryset.get()\n except ObjectDoesNotExist:\n raise ValidationError(self.error_messages['does_not_exist'])\n except (TypeError, ValueError):\n msg = self.error_messages['incorrect_type']\n raise ValidationError(msg % type(value).__name__)\n\n return obj\n\n\nclass HyperlinkedIdentityField(Field):\n \"\"\"\n Represents the instance, or a property on the instance, using hyperlinking.\n \"\"\"\n pk_url_kwarg = 'pk'\n slug_field = 'slug'\n slug_url_kwarg = None # Defaults to same as `slug_field` unless overridden\n read_only = True\n\n def __init__(self, *args, **kwargs):\n # TODO: Make view_name mandatory, and have the\n # HyperlinkedModelSerializer set it on-the-fly\n self.view_name = kwargs.pop('view_name', None)\n # Optionally the format of the target hyperlink may be specified\n self.format = kwargs.pop('format', None)\n\n self.slug_field = kwargs.pop('slug_field', self.slug_field)\n default_slug_kwarg = self.slug_url_kwarg or self.slug_field\n self.pk_url_kwarg = kwargs.pop('pk_url_kwarg', self.pk_url_kwarg)\n self.slug_url_kwarg = kwargs.pop('slug_url_kwarg', default_slug_kwarg)\n\n super(HyperlinkedIdentityField, self).__init__(*args, **kwargs)\n\n def field_to_native(self, obj, field_name):\n request = self.context.get('request', None)\n format = self.context.get('format', None)\n view_name = self.view_name or self.parent.opts.view_name\n kwargs = {self.pk_url_kwarg: obj.pk}\n\n if request is None:\n warnings.warn(\"Using `HyperlinkedIdentityField` without including the \"\n \"request in the serializer context is due to be deprecated. \"\n \"Add `context={'request': request}` when instantiating the serializer.\",\n PendingDeprecationWarning, stacklevel=4)\n\n # By default use whatever format is given for the current context\n # unless the target is a different type to the source.\n #\n # Eg. Consider a HyperlinkedIdentityField pointing from a json\n # representation to an html property of that representation...\n #\n # '/snippets/1/' should link to '/snippets/1/highlight/'\n # ...but...\n # '/snippets/1/.json' should link to '/snippets/1/highlight/.html'\n if format and self.format and self.format != format:\n format = self.format\n\n try:\n return reverse(view_name, kwargs=kwargs, request=request, format=format)\n except NoReverseMatch:\n pass\n\n slug = getattr(obj, self.slug_field, None)\n\n if not slug:\n raise Exception('Could not resolve URL for field using view name \"%s\"' % view_name)\n\n kwargs = {self.slug_url_kwarg: slug}\n try:\n return reverse(view_name, kwargs=kwargs, request=request, format=format)\n except NoReverseMatch:\n pass\n\n kwargs = {self.pk_url_kwarg: obj.pk, self.slug_url_kwarg: slug}\n try:\n return reverse(view_name, kwargs=kwargs, request=request, format=format)\n except NoReverseMatch:\n pass\n\n raise Exception('Could not resolve URL for field using view name \"%s\"' % view_name)\n\n\n### Old-style many classes for backwards compat\n\nclass ManyRelatedField(RelatedField):\n def __init__(self, *args, **kwargs):\n warnings.warn('`ManyRelatedField()` is due to be deprecated. '\n 'Use `RelatedField(many=True)` instead.',\n PendingDeprecationWarning, stacklevel=2)\n kwargs['many'] = True\n super(ManyRelatedField, self).__init__(*args, **kwargs)\n\n\nclass ManyPrimaryKeyRelatedField(PrimaryKeyRelatedField):\n def __init__(self, *args, **kwargs):\n warnings.warn('`ManyPrimaryKeyRelatedField()` is due to be deprecated. '\n 'Use `PrimaryKeyRelatedField(many=True)` instead.',\n PendingDeprecationWarning, stacklevel=2)\n kwargs['many'] = True\n super(ManyPrimaryKeyRelatedField, self).__init__(*args, **kwargs)\n\n\nclass ManySlugRelatedField(SlugRelatedField):\n def __init__(self, *args, **kwargs):\n warnings.warn('`ManySlugRelatedField()` is due to be deprecated. '\n 'Use `SlugRelatedField(many=True)` instead.',\n PendingDeprecationWarning, stacklevel=2)\n kwargs['many'] = True\n super(ManySlugRelatedField, self).__init__(*args, **kwargs)\n\n\nclass ManyHyperlinkedRelatedField(HyperlinkedRelatedField):\n def __init__(self, *args, **kwargs):\n warnings.warn('`ManyHyperlinkedRelatedField()` is due to be deprecated. '\n 'Use `HyperlinkedRelatedField(many=True)` instead.',\n PendingDeprecationWarning, stacklevel=2)\n kwargs['many'] = True\n super(ManyHyperlinkedRelatedField, self).__init__(*args, **kwargs)\n",
"path": "rest_framework/relations.py"
}
] | diff --git a/rest_framework/relations.py b/rest_framework/relations.py
index 0c108717fc..2a10e9af53 100644
--- a/rest_framework/relations.py
+++ b/rest_framework/relations.py
@@ -235,7 +235,6 @@ def field_to_native(self, obj, field_name):
pk = getattr(obj, self.source or field_name).pk
except ObjectDoesNotExist:
return None
- return self.to_native(obj.pk)
# Forward relationship
return self.to_native(pk)
diff --git a/rest_framework/tests/relations_pk.py b/rest_framework/tests/relations_pk.py
index d6ae317607..f08e18086a 100644
--- a/rest_framework/tests/relations_pk.py
+++ b/rest_framework/tests/relations_pk.py
@@ -407,14 +407,14 @@ def setUp(self):
target.save()
new_target = OneToOneTarget(name='target-2')
new_target.save()
- source = NullableOneToOneSource(name='source-1', target=target)
+ source = NullableOneToOneSource(name='source-1', target=new_target)
source.save()
def test_reverse_foreign_key_retrieve_with_null(self):
queryset = OneToOneTarget.objects.all()
serializer = NullableOneToOneTargetSerializer(queryset, many=True)
expected = [
- {'id': 1, 'name': 'target-1', 'nullable_source': 1},
- {'id': 2, 'name': 'target-2', 'nullable_source': None},
+ {'id': 1, 'name': 'target-1', 'nullable_source': None},
+ {'id': 2, 'name': 'target-2', 'nullable_source': 1},
]
self.assertEqual(serializer.data, expected)
|
django-cms__django-cms-1768 | cms.utils.i18n.get_fallback_languages may fail if LANGUAGES has more languages than CMS_LANGUAGES
Reported via IRC.
Use case: Show the admin interface in English (for users that configured their browser to English), but CMS pages should only be in German or French.
In the use case above, settings might be something like:
``` python
LANGUAGES = [
('en', 'English'),
('de', 'Deutsch'),
('fr', 'French'),
]
CMS_LANGUAGES = {
1: [
{
'code': 'de',
'name': gettext('Deutsch'),
'public': True,
},
{
'code': 'fr',
'name': gettext('French'),
'fallbacks': ['de',],
'public': False,
},
],
'default': {
'fallbacks': ['de',],
'redirect_on_fallback':True,
'public': False,
'hide_untranslated': False,
}
}
```
`'en'` is in `LANGUAGES` but not in `CMS_LANGUAGES`.
Now if `cms.utils.i18n.get_fallback_languages` is called with `'en'` as argument (that happens for example if you try to add a page in the admin with the admin UI in English, as the add view tries to log the page change in the current active language, not language of the page. This triggers a call to `get_fallback_languages` with English as the argument) it raises a `LanguageError` as `'en'` is not available to the CMS.
cms.utils.i18n.get_fallback_languages may fail if LANGUAGES has more languages than CMS_LANGUAGES
Reported via IRC.
Use case: Show the admin interface in English (for users that configured their browser to English), but CMS pages should only be in German or French.
In the use case above, settings might be something like:
``` python
LANGUAGES = [
('en', 'English'),
('de', 'Deutsch'),
('fr', 'French'),
]
CMS_LANGUAGES = {
1: [
{
'code': 'de',
'name': gettext('Deutsch'),
'public': True,
},
{
'code': 'fr',
'name': gettext('French'),
'fallbacks': ['de',],
'public': False,
},
],
'default': {
'fallbacks': ['de',],
'redirect_on_fallback':True,
'public': False,
'hide_untranslated': False,
}
}
```
`'en'` is in `LANGUAGES` but not in `CMS_LANGUAGES`.
Now if `cms.utils.i18n.get_fallback_languages` is called with `'en'` as argument (that happens for example if you try to add a page in the admin with the admin UI in English, as the add view tries to log the page change in the current active language, not language of the page. This triggers a call to `get_fallback_languages` with English as the argument) it raises a `LanguageError` as `'en'` is not available to the CMS.
cms.utils.i18n.get_fallback_languages may fail if LANGUAGES has more languages than CMS_LANGUAGES
Reported via IRC.
Use case: Show the admin interface in English (for users that configured their browser to English), but CMS pages should only be in German or French.
In the use case above, settings might be something like:
``` python
LANGUAGES = [
('en', 'English'),
('de', 'Deutsch'),
('fr', 'French'),
]
CMS_LANGUAGES = {
1: [
{
'code': 'de',
'name': gettext('Deutsch'),
'public': True,
},
{
'code': 'fr',
'name': gettext('French'),
'fallbacks': ['de',],
'public': False,
},
],
'default': {
'fallbacks': ['de',],
'redirect_on_fallback':True,
'public': False,
'hide_untranslated': False,
}
}
```
`'en'` is in `LANGUAGES` but not in `CMS_LANGUAGES`.
Now if `cms.utils.i18n.get_fallback_languages` is called with `'en'` as argument (that happens for example if you try to add a page in the admin with the admin UI in English, as the add view tries to log the page change in the current active language, not language of the page. This triggers a call to `get_fallback_languages` with English as the argument) it raises a `LanguageError` as `'en'` is not available to the CMS.
| [
{
"content": "# -*- coding: utf-8 -*-\nfrom contextlib import contextmanager\n\nfrom django.core.urlresolvers import get_resolver, LocaleRegexURLResolver\nfrom django.conf import settings\nfrom django.utils import translation\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom cms.exceptions import LanguageError\nfrom cms.utils.conf import get_cms_setting\n\n\n@contextmanager\ndef force_language(new_lang):\n old_lang = get_current_language()\n if old_lang != new_lang:\n translation.activate(new_lang)\n yield\n translation.activate(old_lang)\n\n\ndef get_languages(site_id=None):\n site_id = get_site(site_id)\n result = get_cms_setting('LANGUAGES').get(site_id)\n if not result:\n result = []\n defaults = get_cms_setting('LANGUAGES').get('default', {})\n for code, name in settings.LANGUAGES:\n lang = {'code': code, 'name': _(name)}\n lang.update(defaults)\n result.append(lang)\n get_cms_setting('LANGUAGES')[site_id] = result\n return result\n\n\ndef get_language_code(language_code):\n \"\"\"\n Returns language code while making sure it's in LANGUAGES\n \"\"\"\n if not language_code:\n return None\n languages = get_language_list()\n if language_code in languages: # direct hit\n return language_code\n for lang in languages:\n if language_code.split('-')[0] == lang: # base language hit\n return lang\n if lang.split('-')[0] == language_code: # base language hit\n return lang\n return language_code\n\n\ndef get_current_language():\n \"\"\"\n Returns the currently active language\n\n It's a replacement for Django's translation.get_language() to make sure the LANGUAGE_CODE will be found in LANGUAGES.\n Overcomes this issue: https://code.djangoproject.com/ticket/9340\n \"\"\"\n language_code = translation.get_language()\n return get_language_code(language_code)\n\n\ndef get_site(site):\n if site is None:\n return settings.SITE_ID\n else:\n try:\n return int(site)\n except TypeError:\n return site.pk\n\n\ndef get_language_list(site_id=None):\n \"\"\"\n :return: returns a list of iso2codes for this site\n \"\"\"\n if not settings.USE_I18N:\n return [settings.LANGUAGE_CODE]\n languages = []\n for language in get_languages(site_id):\n languages.append(language['code'])\n return languages\n\n\ndef get_language_tuple(site_id=None):\n \"\"\"\n :return: returns an list of tuples like the old CMS_LANGUAGES or the LANGUAGES for this site\n \"\"\"\n languages = []\n for language in get_languages(site_id):\n languages.append((language['code'], language['name']))\n return languages\n\n\ndef get_language_dict(site_id=None):\n \"\"\"\n :return: returns an dict of cms languages\n \"\"\"\n languages = {}\n for language in get_languages(site_id):\n languages[language['code']] = language['name']\n return languages\n\n\ndef get_public_languages(site_id=None):\n \"\"\"\n :return: list of iso2codes of public languages for this site\n \"\"\"\n languages = []\n for language in get_language_objects(site_id):\n if language.get(\"public\", True):\n languages.append(language['code'])\n return languages\n\n\ndef get_language_object(language_code, site_id=None):\n \"\"\"\n :param language_code: RFC5646 language code\n :return: the language object filled up by defaults\n \"\"\"\n for language in get_languages(site_id):\n if language['code'] == get_language_code(language_code):\n return language\n raise LanguageError('Language not found: %s' % language_code)\n\n\ndef get_language_objects(site_id=None):\n \"\"\"\n returns list of all language objects filled up by default values\n \"\"\"\n return list(get_languages(site_id))\n\n\ndef get_default_language(language_code=None, site_id=None):\n \"\"\"\n Returns default language depending on settings.LANGUAGE_CODE merged with\n best match from get_cms_setting('LANGUAGES')\n\n Returns: language_code\n \"\"\"\n\n if not language_code:\n language_code = get_language_code(settings.LANGUAGE_CODE)\n\n languages = get_language_list(site_id)\n\n # first try if there is an exact language\n if language_code in languages:\n return language_code\n\n # otherwise split the language code if possible, so iso3\n language_code = language_code.split(\"-\")[0]\n\n if not language_code in languages:\n return settings.LANGUAGE_CODE\n\n return language_code\n\n\ndef get_fallback_languages(language, site_id=None):\n \"\"\"\n returns a list of fallback languages for the given language\n \"\"\"\n language = get_language_object(language, site_id)\n return language.get('fallbacks', [])\n\n\ndef get_redirect_on_fallback(language, site_id=None):\n \"\"\"\n returns if you should redirect on language fallback\n :param language:\n :param site_id:\n :return: Boolean\n \"\"\"\n language = get_language_object(language, site_id)\n return language.get('redirect_on_fallback', True)\n\n\ndef hide_untranslated(language, site_id=None):\n \"\"\"\n Should untranslated pages in this language be hidden?\n :param language:\n :param site_id:\n :return: A Boolean\n \"\"\"\n obj = get_language_object(language, site_id)\n return obj.get('hide_untranslated', True)\n\n\ndef is_language_prefix_patterns_used():\n \"\"\"\n Returns `True` if the `LocaleRegexURLResolver` is used\n at root level of the urlpatterns, else it returns `False`.\n \"\"\"\n for url_pattern in get_resolver(None).url_patterns:\n if isinstance(url_pattern, LocaleRegexURLResolver):\n return True\n return False\n",
"path": "cms/utils/i18n.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\nfrom contextlib import contextmanager\n\nfrom django.core.urlresolvers import get_resolver, LocaleRegexURLResolver\nfrom django.conf import settings\nfrom django.utils import translation\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom cms.exceptions import LanguageError\nfrom cms.utils.conf import get_cms_setting\n\n\n@contextmanager\ndef force_language(new_lang):\n old_lang = get_current_language()\n if old_lang != new_lang:\n translation.activate(new_lang)\n yield\n translation.activate(old_lang)\n\n\ndef get_languages(site_id=None):\n site_id = get_site(site_id)\n result = get_cms_setting('LANGUAGES').get(site_id)\n if not result:\n result = []\n defaults = get_cms_setting('LANGUAGES').get('default', {})\n for code, name in settings.LANGUAGES:\n lang = {'code': code, 'name': _(name)}\n lang.update(defaults)\n result.append(lang)\n get_cms_setting('LANGUAGES')[site_id] = result\n return result\n\n\ndef get_language_code(language_code):\n \"\"\"\n Returns language code while making sure it's in LANGUAGES\n \"\"\"\n if not language_code:\n return None\n languages = get_language_list()\n if language_code in languages: # direct hit\n return language_code\n for lang in languages:\n if language_code.split('-')[0] == lang: # base language hit\n return lang\n if lang.split('-')[0] == language_code: # base language hit\n return lang\n return language_code\n\n\ndef get_current_language():\n \"\"\"\n Returns the currently active language\n\n It's a replacement for Django's translation.get_language() to make sure the LANGUAGE_CODE will be found in LANGUAGES.\n Overcomes this issue: https://code.djangoproject.com/ticket/9340\n \"\"\"\n language_code = translation.get_language()\n return get_language_code(language_code)\n\n\ndef get_site(site):\n if site is None:\n return settings.SITE_ID\n else:\n try:\n return int(site)\n except TypeError:\n return site.pk\n\n\ndef get_language_list(site_id=None):\n \"\"\"\n :return: returns a list of iso2codes for this site\n \"\"\"\n if not settings.USE_I18N:\n return [settings.LANGUAGE_CODE]\n languages = []\n for language in get_languages(site_id):\n languages.append(language['code'])\n return languages\n\n\ndef get_language_tuple(site_id=None):\n \"\"\"\n :return: returns an list of tuples like the old CMS_LANGUAGES or the LANGUAGES for this site\n \"\"\"\n languages = []\n for language in get_languages(site_id):\n languages.append((language['code'], language['name']))\n return languages\n\n\ndef get_language_dict(site_id=None):\n \"\"\"\n :return: returns an dict of cms languages\n \"\"\"\n languages = {}\n for language in get_languages(site_id):\n languages[language['code']] = language['name']\n return languages\n\n\ndef get_public_languages(site_id=None):\n \"\"\"\n :return: list of iso2codes of public languages for this site\n \"\"\"\n languages = []\n for language in get_language_objects(site_id):\n if language.get(\"public\", True):\n languages.append(language['code'])\n return languages\n\n\ndef get_language_object(language_code, site_id=None):\n \"\"\"\n :param language_code: RFC5646 language code\n :return: the language object filled up by defaults\n \"\"\"\n for language in get_languages(site_id):\n if language['code'] == get_language_code(language_code):\n return language\n raise LanguageError('Language not found: %s' % language_code)\n\n\ndef get_language_objects(site_id=None):\n \"\"\"\n returns list of all language objects filled up by default values\n \"\"\"\n return list(get_languages(site_id))\n\n\ndef get_default_language(language_code=None, site_id=None):\n \"\"\"\n Returns default language depending on settings.LANGUAGE_CODE merged with\n best match from get_cms_setting('LANGUAGES')\n\n Returns: language_code\n \"\"\"\n\n if not language_code:\n language_code = get_language_code(settings.LANGUAGE_CODE)\n\n languages = get_language_list(site_id)\n\n # first try if there is an exact language\n if language_code in languages:\n return language_code\n\n # otherwise split the language code if possible, so iso3\n language_code = language_code.split(\"-\")[0]\n\n if not language_code in languages:\n return settings.LANGUAGE_CODE\n\n return language_code\n\n\ndef get_fallback_languages(language, site_id=None):\n \"\"\"\n returns a list of fallback languages for the given language\n \"\"\"\n try:\n language = get_language_object(language, site_id)\n except LanguageError:\n language = get_languages(site_id)[0]\n return language.get('fallbacks', [])\n\n\ndef get_redirect_on_fallback(language, site_id=None):\n \"\"\"\n returns if you should redirect on language fallback\n :param language:\n :param site_id:\n :return: Boolean\n \"\"\"\n language = get_language_object(language, site_id)\n return language.get('redirect_on_fallback', True)\n\n\ndef hide_untranslated(language, site_id=None):\n \"\"\"\n Should untranslated pages in this language be hidden?\n :param language:\n :param site_id:\n :return: A Boolean\n \"\"\"\n obj = get_language_object(language, site_id)\n return obj.get('hide_untranslated', True)\n\n\ndef is_language_prefix_patterns_used():\n \"\"\"\n Returns `True` if the `LocaleRegexURLResolver` is used\n at root level of the urlpatterns, else it returns `False`.\n \"\"\"\n for url_pattern in get_resolver(None).url_patterns:\n if isinstance(url_pattern, LocaleRegexURLResolver):\n return True\n return False\n",
"path": "cms/utils/i18n.py"
}
] | diff --git a/cms/tests/i18n.py b/cms/tests/i18n.py
index 579224462a1..8f10c1815fa 100644
--- a/cms/tests/i18n.py
+++ b/cms/tests/i18n.py
@@ -1,5 +1,7 @@
from cms.test_utils.testcases import SettingsOverrideTestCase
from cms.utils import i18n
+from cms.utils.i18n import get_fallback_languages
+
class TestLanguages(SettingsOverrideTestCase):
@@ -269,3 +271,36 @@ def test_get_languages_undefined_site(self):
for lang in result:
self.assertEqual(lang['public'], True)
self.assertEqual(lang['hide_untranslated'], False)
+
+
+class TestLanguagesNotInCMSLanguages(SettingsOverrideTestCase):
+ settings_overrides = {
+ 'LANGUAGE_CODE': 'en',
+ 'LANGUAGES': [
+ ('en', 'English'),
+ ('de', 'German'),
+ ('fr', 'French')
+ ],
+ 'CMS_LANGUAGES': {
+ 1: [
+ {
+ 'code': 'de',
+ 'name': 'German',
+ 'public': True,
+ },
+ {
+ 'code': 'fr',
+ 'name': 'French',
+ 'public': True
+ }
+ ],
+ 'default': {
+ 'fallbacks': ['de', 'fr'],
+ }
+ },
+ 'SITE_ID': 1,
+ }
+
+ def test_get_fallback_languages(self):
+ languages = get_fallback_languages('en', 1)
+ self.assertEqual(languages, ['de', 'fr'])
diff --git a/cms/utils/i18n.py b/cms/utils/i18n.py
index 68868947a75..ed5f6165168 100644
--- a/cms/utils/i18n.py
+++ b/cms/utils/i18n.py
@@ -162,7 +162,10 @@ def get_fallback_languages(language, site_id=None):
"""
returns a list of fallback languages for the given language
"""
- language = get_language_object(language, site_id)
+ try:
+ language = get_language_object(language, site_id)
+ except LanguageError:
+ language = get_languages(site_id)[0]
return language.get('fallbacks', [])
|
spack__spack-20572 | improve installation of Zoltran: imposing +int64 constrains on parmetis
<!--*Please add a concise summary of your suggestion here.*-->
### Rationale
zoltan spec has a variant called `int64` which imposes the corresponding constrain on metis.
https://github.com/spack/spack/blob/6947951aaf9954b1dfd12ca7a9266d7335f07105/var/spack/repos/builtin/packages/zoltan/package.py#L37-L44
The same constrain must be applied to parmetis.
<!--*Is your feature request related to a problem? Please describe it!*-->
### Description
I guess a solution can be something like
```
depends_on('parmetis@4:', when='+parmetis')
depends_on('parmetis@4: +int64', when='+parmetis+int64')
```
<!--*Describe the solution you'd like and the alternatives you have considered.*-->
### Additional information
<!--*Add any other context about the feature request here.*-->
I guess this happens because parmetis package has been recently updated and `int64` has been added. Because there was no such an option in parmetis for a long time people came up with a workaround by specifying `metis+int64` explicitly in their script. The parametis update brings an inconsistency because `int64` is off by default in parmetis, however, and the ''legacy'' workaround imposes `int64` on metis.
My spack version is 0.16.0
### General information
- [x] I have run `spack --version` and reported the version of Spack
- [x] I have searched the issues of this repo and believe this is not a duplicate
<!--If you want to ask a question about the tool (how to use it, what it can currently do, etc.), try the `#general` channel on our Slack first. We have a welcoming community and chances are you'll get your reply faster and without opening an issue.
Other than that, thanks for taking the time to contribute to Spack!
-->
| [
{
"content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\n\nfrom spack import *\nimport re\n\n\nclass Zoltan(AutotoolsPackage):\n \"\"\"The Zoltan library is a toolkit of parallel combinatorial algorithms\n for parallel, unstructured, and/or adaptive scientific\n applications. Zoltan's largest component is a suite of dynamic\n load-balancing and partitioning algorithms that increase\n applications' parallel performance by reducing idle time. Zoltan\n also has graph coloring and graph ordering algorithms, which are\n useful in task schedulers and parallel preconditioners.\n\n \"\"\"\n\n homepage = \"http://www.cs.sandia.gov/zoltan\"\n url = \"http://www.cs.sandia.gov/~kddevin/Zoltan_Distributions/zoltan_distrib_v3.83.tar.gz\"\n\n version('3.83', sha256='d0d78fdeab7a385c87d3666b8a8dc748994ff04d3fd846872a4845e12d79c1bb')\n version('3.8', sha256='5bdd46548fb9c73b225bbcf3d206c558c318cb292f0b19645e536315d14aafb7')\n version('3.6', sha256='d2cb41e5fb72ca564b24bc5f21d82d9f7992f2c977bc82b243a01a8a8ee4eb9c')\n version('3.3', sha256='8a90585674ab1bbd011dab29f778b9816519712c78d0aab4cdde9c68f02b30dc')\n\n patch('notparallel.patch', when='@3.8')\n\n variant('debug', default=False, description='Builds a debug version of the library.')\n variant('shared', default=True, description='Builds a shared version of the library.')\n\n variant('fortran', default=True, description='Enable Fortran support.')\n variant('mpi', default=True, description='Enable MPI support.')\n variant('parmetis', default=False, description='Enable ParMETIS support.')\n variant('int64', default=False, description='Enable 64bit indices.')\n\n depends_on('mpi', when='+mpi')\n\n depends_on('parmetis@4:', when='+parmetis')\n depends_on('metis+int64', when='+parmetis+int64')\n depends_on('metis', when='+parmetis')\n\n depends_on('perl@:5.21', type='build', when='@:3.6')\n depends_on('autoconf', type='build')\n depends_on('automake', type='build')\n depends_on('m4', type='build')\n\n conflicts('+parmetis', when='~mpi')\n\n build_directory = 'spack-build'\n\n @property\n def configure_directory(self):\n spec = self.spec\n\n # FIXME: The older Zoltan versions fail to compile the F90 MPI wrappers\n # because of some complicated generic type problem.\n if spec.satisfies('@:3.6+fortran+mpi'):\n raise RuntimeError(('Cannot build Zoltan v{0} with +fortran and '\n '+mpi; please disable one of these features '\n 'or upgrade versions.').format(self.version))\n if spec.satisfies('@:3.6'):\n zoltan_path = 'Zoltan_v{0}'.format(self.version)\n return zoltan_path\n return '.'\n\n @property\n def parallel(self):\n # NOTE: Earlier versions of Zoltan cannot be built in parallel\n # because they contain nested Makefile dependency bugs.\n return not self.spec.satisfies('@:3.6+fortran')\n\n def autoreconf(self, spec, prefix):\n autoreconf = which('autoreconf')\n with working_dir(self.configure_directory):\n autoreconf('-ivf')\n\n def configure_args(self):\n spec = self.spec\n\n config_args = [\n self.get_config_flag('f90interface', 'fortran'),\n self.get_config_flag('mpi', 'mpi'),\n ]\n config_cflags = [\n '-O0' if '+debug' in spec else '-O3',\n '-g' if '+debug' in spec else '',\n ]\n\n config_ldflags = []\n # PGI runtime libraries\n if '%pgi' in spec:\n config_ldflags.append('-pgf90libs')\n if '+shared' in spec:\n config_args.extend([\n 'RANLIB=echo',\n '--with-ar=$(CXX) -shared $(LDFLAGS) -o'\n ])\n config_cflags.append(self.compiler.cc_pic_flag)\n if spec.satisfies('%gcc'):\n config_args.append('--with-libs=-lgfortran')\n if spec.satisfies('%intel'):\n config_args.append('--with-libs=-lifcore')\n\n if '+int64' in spec:\n config_args.append('--with-id-type=ulong')\n\n if '+parmetis' in spec:\n parmetis_prefix = spec['parmetis'].prefix\n config_args.extend([\n '--with-parmetis',\n '--with-parmetis-libdir={0}'.format(parmetis_prefix.lib),\n '--with-parmetis-incdir={0}'.format(parmetis_prefix.include),\n '--with-incdirs=-I{0}'.format(spec['metis'].prefix.include),\n '--with-ldflags=-L{0}'.format(spec['metis'].prefix.lib)\n ])\n if '+int64' in spec['metis']:\n config_args.append('--with-id-type=ulong')\n else:\n config_args.append('--with-id-type=uint')\n\n if '+mpi' in spec:\n config_args.extend([\n 'CC={0}'.format(spec['mpi'].mpicc),\n 'CXX={0}'.format(spec['mpi'].mpicxx),\n 'FC={0}'.format(spec['mpi'].mpifc),\n '--with-mpi={0}'.format(spec['mpi'].prefix),\n\n # NOTE: Zoltan assumes that it's linking against an MPI library\n # that can be found with '-lmpi' which isn't the case for many\n # MPI packages. We rely on the MPI-wrappers to automatically\n # add what is required for linking and thus pass an empty\n # list of libs\n '--with-mpi-libs= '\n ])\n\n config_fcflags = config_cflags[:]\n if spec.satisfies('%gcc@10:+fortran'):\n config_fcflags.append('-fallow-argument-mismatch')\n # NOTE: Early versions of Zoltan come packaged with a few embedded\n # library packages (e.g. ParMETIS, Scotch), which messes with Spack's\n # ability to descend directly into the package's source directory.\n config_args.extend([\n '--with-cflags={0}'.format(' '.join(config_cflags)),\n '--with-cxxflags={0}'.format(' '.join(config_cflags)),\n '--with-fcflags={0}'.format(' '.join(config_fcflags)),\n '--with-ldflags={0}'.format(' '.join(config_ldflags))\n ])\n return config_args\n\n # NOTE: Unfortunately, Zoltan doesn't provide any configuration\n # options for the extension of the output library files, so this\n # script must change these extensions as a post-processing step.\n @run_after('install')\n def solib_install(self):\n if '+shared' in self.spec:\n for lib_path in find(self.spec.prefix.lib, 'lib*.a'):\n lib_shared_name = re.sub(r'\\.a$', '.{0}'.format(dso_suffix),\n lib_path)\n move(lib_path, lib_shared_name)\n\n def get_config_flag(self, flag_name, flag_variant):\n flag_pre = 'en' if '+{0}'.format(flag_variant) in self.spec else 'dis'\n return '--{0}able-{1}'.format(flag_pre, flag_name)\n",
"path": "var/spack/repos/builtin/packages/zoltan/package.py"
}
] | [
{
"content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\n\nfrom spack import *\nimport re\n\n\nclass Zoltan(AutotoolsPackage):\n \"\"\"The Zoltan library is a toolkit of parallel combinatorial algorithms\n for parallel, unstructured, and/or adaptive scientific\n applications. Zoltan's largest component is a suite of dynamic\n load-balancing and partitioning algorithms that increase\n applications' parallel performance by reducing idle time. Zoltan\n also has graph coloring and graph ordering algorithms, which are\n useful in task schedulers and parallel preconditioners.\n\n \"\"\"\n\n homepage = \"http://www.cs.sandia.gov/zoltan\"\n url = \"http://www.cs.sandia.gov/~kddevin/Zoltan_Distributions/zoltan_distrib_v3.83.tar.gz\"\n\n version('3.83', sha256='d0d78fdeab7a385c87d3666b8a8dc748994ff04d3fd846872a4845e12d79c1bb')\n version('3.8', sha256='5bdd46548fb9c73b225bbcf3d206c558c318cb292f0b19645e536315d14aafb7')\n version('3.6', sha256='d2cb41e5fb72ca564b24bc5f21d82d9f7992f2c977bc82b243a01a8a8ee4eb9c')\n version('3.3', sha256='8a90585674ab1bbd011dab29f778b9816519712c78d0aab4cdde9c68f02b30dc')\n\n patch('notparallel.patch', when='@3.8')\n\n variant('debug', default=False, description='Builds a debug version of the library.')\n variant('shared', default=True, description='Builds a shared version of the library.')\n\n variant('fortran', default=True, description='Enable Fortran support.')\n variant('mpi', default=True, description='Enable MPI support.')\n variant('parmetis', default=False, description='Enable ParMETIS support.')\n variant('int64', default=False, description='Enable 64bit indices.')\n\n depends_on('mpi', when='+mpi')\n\n depends_on('parmetis@4: +int64', when='+parmetis+int64')\n depends_on('parmetis@4:', when='+parmetis')\n depends_on('metis+int64', when='+parmetis+int64')\n depends_on('metis', when='+parmetis')\n\n depends_on('perl@:5.21', type='build', when='@:3.6')\n depends_on('autoconf', type='build')\n depends_on('automake', type='build')\n depends_on('m4', type='build')\n\n conflicts('+parmetis', when='~mpi')\n\n build_directory = 'spack-build'\n\n @property\n def configure_directory(self):\n spec = self.spec\n\n # FIXME: The older Zoltan versions fail to compile the F90 MPI wrappers\n # because of some complicated generic type problem.\n if spec.satisfies('@:3.6+fortran+mpi'):\n raise RuntimeError(('Cannot build Zoltan v{0} with +fortran and '\n '+mpi; please disable one of these features '\n 'or upgrade versions.').format(self.version))\n if spec.satisfies('@:3.6'):\n zoltan_path = 'Zoltan_v{0}'.format(self.version)\n return zoltan_path\n return '.'\n\n @property\n def parallel(self):\n # NOTE: Earlier versions of Zoltan cannot be built in parallel\n # because they contain nested Makefile dependency bugs.\n return not self.spec.satisfies('@:3.6+fortran')\n\n def autoreconf(self, spec, prefix):\n autoreconf = which('autoreconf')\n with working_dir(self.configure_directory):\n autoreconf('-ivf')\n\n def configure_args(self):\n spec = self.spec\n\n config_args = [\n self.get_config_flag('f90interface', 'fortran'),\n self.get_config_flag('mpi', 'mpi'),\n ]\n config_cflags = [\n '-O0' if '+debug' in spec else '-O3',\n '-g' if '+debug' in spec else '',\n ]\n\n config_ldflags = []\n # PGI runtime libraries\n if '%pgi' in spec:\n config_ldflags.append('-pgf90libs')\n if '+shared' in spec:\n config_args.extend([\n 'RANLIB=echo',\n '--with-ar=$(CXX) -shared $(LDFLAGS) -o'\n ])\n config_cflags.append(self.compiler.cc_pic_flag)\n if spec.satisfies('%gcc'):\n config_args.append('--with-libs=-lgfortran')\n if spec.satisfies('%intel'):\n config_args.append('--with-libs=-lifcore')\n\n if '+int64' in spec:\n config_args.append('--with-id-type=ulong')\n\n if '+parmetis' in spec:\n parmetis_prefix = spec['parmetis'].prefix\n config_args.extend([\n '--with-parmetis',\n '--with-parmetis-libdir={0}'.format(parmetis_prefix.lib),\n '--with-parmetis-incdir={0}'.format(parmetis_prefix.include),\n '--with-incdirs=-I{0}'.format(spec['metis'].prefix.include),\n '--with-ldflags=-L{0}'.format(spec['metis'].prefix.lib)\n ])\n if '+int64' in spec['metis']:\n config_args.append('--with-id-type=ulong')\n else:\n config_args.append('--with-id-type=uint')\n\n if '+mpi' in spec:\n config_args.extend([\n 'CC={0}'.format(spec['mpi'].mpicc),\n 'CXX={0}'.format(spec['mpi'].mpicxx),\n 'FC={0}'.format(spec['mpi'].mpifc),\n '--with-mpi={0}'.format(spec['mpi'].prefix),\n\n # NOTE: Zoltan assumes that it's linking against an MPI library\n # that can be found with '-lmpi' which isn't the case for many\n # MPI packages. We rely on the MPI-wrappers to automatically\n # add what is required for linking and thus pass an empty\n # list of libs\n '--with-mpi-libs= '\n ])\n\n config_fcflags = config_cflags[:]\n if spec.satisfies('%gcc@10:+fortran'):\n config_fcflags.append('-fallow-argument-mismatch')\n # NOTE: Early versions of Zoltan come packaged with a few embedded\n # library packages (e.g. ParMETIS, Scotch), which messes with Spack's\n # ability to descend directly into the package's source directory.\n config_args.extend([\n '--with-cflags={0}'.format(' '.join(config_cflags)),\n '--with-cxxflags={0}'.format(' '.join(config_cflags)),\n '--with-fcflags={0}'.format(' '.join(config_fcflags)),\n '--with-ldflags={0}'.format(' '.join(config_ldflags))\n ])\n return config_args\n\n # NOTE: Unfortunately, Zoltan doesn't provide any configuration\n # options for the extension of the output library files, so this\n # script must change these extensions as a post-processing step.\n @run_after('install')\n def solib_install(self):\n if '+shared' in self.spec:\n for lib_path in find(self.spec.prefix.lib, 'lib*.a'):\n lib_shared_name = re.sub(r'\\.a$', '.{0}'.format(dso_suffix),\n lib_path)\n move(lib_path, lib_shared_name)\n\n def get_config_flag(self, flag_name, flag_variant):\n flag_pre = 'en' if '+{0}'.format(flag_variant) in self.spec else 'dis'\n return '--{0}able-{1}'.format(flag_pre, flag_name)\n",
"path": "var/spack/repos/builtin/packages/zoltan/package.py"
}
] | diff --git a/var/spack/repos/builtin/packages/zoltan/package.py b/var/spack/repos/builtin/packages/zoltan/package.py
index 769ca81be67e4d..e96579d6c3b3cc 100644
--- a/var/spack/repos/builtin/packages/zoltan/package.py
+++ b/var/spack/repos/builtin/packages/zoltan/package.py
@@ -39,6 +39,7 @@ class Zoltan(AutotoolsPackage):
depends_on('mpi', when='+mpi')
+ depends_on('parmetis@4: +int64', when='+parmetis+int64')
depends_on('parmetis@4:', when='+parmetis')
depends_on('metis+int64', when='+parmetis+int64')
depends_on('metis', when='+parmetis')
|
Mailu__Mailu-2116 | Error 404 not found when opening admin after upgrade 1.8 to master
## Before you open your issue
- [X] Check if no issue or pull-request for this already exists.
- [X] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [X] You understand `Mailu` is made by volunteers in their **free time** — be conscise, civil and accept that delays can occur.
- [X] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [X] docker-compose
### Versions
Before upgrade: Docker 1.8 images.
After upgrade: Docker master images (pulled 30 December 2021).
## Description
**Mailu 1.8** image redirects `/admin` to `/admin/ui`.
**Mailu master** image no longer redirects `/admin/ui` as the `ui` part in the URL has been removed according to [Tomcat 1929.enhacement](https://github.com/Mailu/Mailu/blob/master/towncrier/newsfragments/1929.enhancement):
> Removed the /admin/ prefix to reduce complexity of routing with Mailu. Admin is accessible directly via /admin instead of /admin/ui
After the upgrade from `1.8` to `master` and visiting the admin page, the browser still uses the cached URL `/admin/ui` and results in 404 not found.
## Replication Steps
1. Create 1.8 production environment on AMD64 platform using `mailu 1.8 Docker images`.
2. Make sure the Admin page works.
3. Remove docker containers (`docker-compose down`).
4. Recreate **all** containers at the same time using `mailu master Docker images`.
5. Open root mail domain. The browser uses the cached URL `admin/ui` and shows Error 404 not found.
Note: Tested with `TLS_FLAVOR=letsencrypt`, admin and roundcube and Firefox.
## Expected behaviour
Backwards compatibility after Mailu 1.8 upgrade without the need of removing browser caches.
## Front log
```
front_1 | <IP> - - [30/Dec/2021:10:14:35 +0000] "GET /admin/ui/ HTTP/2.0" 404 198 "https://mail.mydomain.nl/sso/login" "Mozilla/5.0 (X11; Linux x86_64; rv:95.0) Gecko/20100101 Firefox/95.0"
```
## Bugfix
Proposal is to redirect `/admin/ui` always to `/admin` to prevent browser caching problems after the upgrade.
| [
{
"content": "from mailu import models, utils\nfrom mailu.ui import ui, forms, access\n\nfrom flask import current_app as app\nimport flask\nimport flask_login\n\n\[email protected]('/', methods=[\"GET\"])\[email protected]\ndef index():\n return flask.redirect(flask.url_for('.user_settings'))\n\[email protected]('/announcement', methods=['GET', 'POST'])\[email protected]_admin\ndef announcement():\n form = forms.AnnouncementForm()\n if form.validate_on_submit():\n for user in models.User.query.all():\n user.sendmail(form.announcement_subject.data,\n form.announcement_body.data)\n # Force-empty the form\n form.announcement_subject.data = ''\n form.announcement_body.data = ''\n flask.flash('Your announcement was sent', 'success')\n return flask.render_template('announcement.html', form=form)\n\[email protected]('/webmail', methods=['GET'])\ndef webmail():\n return flask.redirect(app.config['WEB_WEBMAIL'])\n\[email protected]('/client', methods=['GET'])\ndef client():\n return flask.render_template('client.html')\n\[email protected]('/webui_antispam', methods=['GET'])\ndef antispam():\n return flask.render_template('antispam.html')\n",
"path": "core/admin/mailu/ui/views/base.py"
}
] | [
{
"content": "from mailu import models, utils\nfrom mailu.ui import ui, forms, access\n\nfrom flask import current_app as app\nimport flask\nimport flask_login\n\n\[email protected]('/', methods=[\"GET\"])\[email protected]\ndef index():\n return flask.redirect(flask.url_for('.user_settings'))\n\[email protected]('/ui/')\ndef redirect_old_path():\n return flask.redirect(flask.url_for('.index'), code=301)\n\[email protected]('/announcement', methods=['GET', 'POST'])\[email protected]_admin\ndef announcement():\n form = forms.AnnouncementForm()\n if form.validate_on_submit():\n for user in models.User.query.all():\n user.sendmail(form.announcement_subject.data,\n form.announcement_body.data)\n # Force-empty the form\n form.announcement_subject.data = ''\n form.announcement_body.data = ''\n flask.flash('Your announcement was sent', 'success')\n return flask.render_template('announcement.html', form=form)\n\[email protected]('/webmail', methods=['GET'])\ndef webmail():\n return flask.redirect(app.config['WEB_WEBMAIL'])\n\[email protected]('/client', methods=['GET'])\ndef client():\n return flask.render_template('client.html')\n\[email protected]('/webui_antispam', methods=['GET'])\ndef antispam():\n return flask.render_template('antispam.html')\n",
"path": "core/admin/mailu/ui/views/base.py"
}
] | diff --git a/core/admin/mailu/ui/views/base.py b/core/admin/mailu/ui/views/base.py
index 01e168a1e..9b7614e1d 100644
--- a/core/admin/mailu/ui/views/base.py
+++ b/core/admin/mailu/ui/views/base.py
@@ -11,6 +11,10 @@
def index():
return flask.redirect(flask.url_for('.user_settings'))
[email protected]('/ui/')
+def redirect_old_path():
+ return flask.redirect(flask.url_for('.index'), code=301)
+
@ui.route('/announcement', methods=['GET', 'POST'])
@access.global_admin
def announcement():
|
numba__numba-4034 | Make type.Optional string representation more friendly
<!--
Thanks for opening an issue! To help the Numba team handle your information
efficiently, please first ensure that there is no other issue present that
already describes the issue you have
(search at https://github.com/numba/numba/issues?&q=is%3Aissue).
For more general "how do I do X?" type questions, please speak to us in real
time on https://gitter.im/numba/numba or post to the Numba mailing list
https://groups.google.com/a/continuum.io/forum/#!forum/numba-users.
-->
## Feature request
For e.g. `types.Optional(types.float64)` the current `str()` is `?float64` which is fine, but it may be more helpful for users to produce something like:
`OptionalType(float64) i.e. type 'float64 or None'`
such that an error message might read e.g.:
```
Dict.value_type cannot be of type OptionalType(float64) i.e. type 'float64 or None'
```
instead of:
```
Dict.value_type cannot be of type ?float64
```
<!--
Please include details of the feature you would like to see, why you would
like to see it/the use case
-->
| [
{
"content": "from __future__ import print_function, division, absolute_import\n\nfrom .abstract import *\nfrom .common import *\nfrom ..typeconv import Conversion\nfrom ..errors import TypingError, LiteralTypingError\n\n\n\nclass PyObject(Dummy):\n \"\"\"\n A generic CPython object.\n \"\"\"\n\n def is_precise(self):\n return False\n\n\nclass Phantom(Dummy):\n \"\"\"\n A type that cannot be materialized. A Phantom cannot be used as\n argument or return type.\n \"\"\"\n\n\nclass Undefined(Dummy):\n \"\"\"\n A type that is left imprecise. This is used as a temporaray placeholder\n during type inference in the hope that the type can be later refined.\n \"\"\"\n\n def is_precise(self):\n return False\n\n\nclass RawPointer(Opaque):\n \"\"\"\n A raw pointer without any specific meaning.\n \"\"\"\n\n\nclass StringLiteral(Literal, Dummy):\n pass\n\n\nLiteral.ctor_map[str] = StringLiteral\n\n\ndef unliteral(lit_type):\n \"\"\"\n Get base type from Literal type.\n \"\"\"\n if hasattr(lit_type, '__unliteral__'):\n return lit_type.__unliteral__()\n return getattr(lit_type, 'literal_type', lit_type)\n\n\ndef literal(value):\n \"\"\"Returns a Literal instance or raise LiteralTypingError\n \"\"\"\n assert not isinstance(value, Literal)\n ty = type(value)\n try:\n ctor = Literal.ctor_map[ty]\n except KeyError:\n raise LiteralTypingError(ty)\n else:\n return ctor(value)\n\n\ndef maybe_literal(value):\n \"\"\"Get a Literal type for the value or None.\n \"\"\"\n try:\n return literal(value)\n except LiteralTypingError:\n return\n\n\nclass Omitted(Opaque):\n \"\"\"\n An omitted function argument with a default value.\n \"\"\"\n\n def __init__(self, value):\n self.value = value\n super(Omitted, self).__init__(\"omitted(default=%r)\" % (value,))\n\n @property\n def key(self):\n return type(self.value), id(self.value)\n\n\nclass VarArg(Type):\n \"\"\"\n Special type representing a variable number of arguments at the\n end of a function's signature. Only used for signature matching,\n not for actual values.\n \"\"\"\n\n def __init__(self, dtype):\n self.dtype = dtype\n super(VarArg, self).__init__(\"*%s\" % dtype)\n\n @property\n def key(self):\n return self.dtype\n\n\nclass Module(Dummy):\n def __init__(self, pymod):\n self.pymod = pymod\n super(Module, self).__init__(\"Module(%s)\" % pymod)\n\n @property\n def key(self):\n return self.pymod\n\n\nclass Macro(Type):\n def __init__(self, template):\n self.template = template\n cls = type(self)\n super(Macro, self).__init__(\"%s(%s)\" % (cls.__name__, template))\n\n @property\n def key(self):\n return self.template\n\n\nclass MemInfoPointer(Type):\n \"\"\"\n Pointer to a Numba \"meminfo\" (i.e. the information for a managed\n piece of memory).\n \"\"\"\n mutable = True\n\n def __init__(self, dtype):\n self.dtype = dtype\n name = \"memory-managed *%s\" % dtype\n super(MemInfoPointer, self).__init__(name)\n\n @property\n def key(self):\n return self.dtype\n\n\nclass CPointer(Type):\n \"\"\"\n Type class for pointers to other types.\n \"\"\"\n mutable = True\n\n def __init__(self, dtype):\n self.dtype = dtype\n name = \"%s*\" % dtype\n super(CPointer, self).__init__(name)\n\n @property\n def key(self):\n return self.dtype\n\n\nclass EphemeralPointer(CPointer):\n \"\"\"\n Type class for pointers which aren't guaranteed to last long - e.g.\n stack-allocated slots. The data model serializes such pointers\n by copying the data pointed to.\n \"\"\"\n\n\nclass EphemeralArray(Type):\n \"\"\"\n Similar to EphemeralPointer, but pointing to an array of elements,\n rather than a single one. The array size must be known at compile-time.\n \"\"\"\n\n def __init__(self, dtype, count):\n self.dtype = dtype\n self.count = count\n name = \"*%s[%d]\" % (dtype, count)\n super(EphemeralArray, self).__init__(name)\n\n @property\n def key(self):\n return self.dtype, self.count\n\n\nclass Object(Type):\n # XXX unused?\n mutable = True\n\n def __init__(self, clsobj):\n self.cls = clsobj\n name = \"Object(%s)\" % clsobj.__name__\n super(Object, self).__init__(name)\n\n @property\n def key(self):\n return self.cls\n\n\nclass Optional(Type):\n \"\"\"\n Type class for optional types, i.e. union { some type, None }\n \"\"\"\n\n def __init__(self, typ):\n assert not isinstance(typ, (Optional, NoneType))\n typ = unliteral(typ)\n self.type = typ\n name = \"?%s\" % typ\n super(Optional, self).__init__(name)\n\n @property\n def key(self):\n return self.type\n\n def can_convert_to(self, typingctx, other):\n if isinstance(other, Optional):\n return typingctx.can_convert(self.type, other.type)\n else:\n conv = typingctx.can_convert(self.type, other)\n if conv is not None:\n return max(conv, Conversion.safe)\n\n def can_convert_from(self, typingctx, other):\n if isinstance(other, NoneType):\n return Conversion.promote\n elif isinstance(other, Optional):\n return typingctx.can_convert(other.type, self.type)\n else:\n conv = typingctx.can_convert(other, self.type)\n if conv is not None:\n return max(conv, Conversion.promote)\n\n def unify(self, typingctx, other):\n if isinstance(other, Optional):\n unified = typingctx.unify_pairs(self.type, other.type)\n else:\n unified = typingctx.unify_pairs(self.type, other)\n\n if unified is not None:\n if isinstance(unified, Optional):\n return unified\n else:\n return Optional(unified)\n\n\nclass NoneType(Opaque):\n \"\"\"\n The type for None.\n \"\"\"\n\n def unify(self, typingctx, other):\n \"\"\"\n Turn anything to a Optional type;\n \"\"\"\n if isinstance(other, (Optional, NoneType)):\n return other\n return Optional(other)\n\n\nclass EllipsisType(Opaque):\n \"\"\"\n The type for the Ellipsis singleton.\n \"\"\"\n\n\nclass ExceptionClass(Callable, Phantom):\n \"\"\"\n The type of exception classes (not instances).\n \"\"\"\n\n def __init__(self, exc_class):\n assert issubclass(exc_class, BaseException)\n name = \"%s\" % (exc_class.__name__)\n self.exc_class = exc_class\n super(ExceptionClass, self).__init__(name)\n\n def get_call_type(self, context, args, kws):\n return self.get_call_signatures()[0][0]\n\n def get_call_signatures(self):\n from .. import typing\n return_type = ExceptionInstance(self.exc_class)\n return [typing.signature(return_type)], False\n\n @property\n def key(self):\n return self.exc_class\n\n\nclass ExceptionInstance(Phantom):\n \"\"\"\n The type of exception instances. *exc_class* should be the\n exception class.\n \"\"\"\n\n def __init__(self, exc_class):\n assert issubclass(exc_class, BaseException)\n name = \"%s(...)\" % (exc_class.__name__,)\n self.exc_class = exc_class\n super(ExceptionInstance, self).__init__(name)\n\n @property\n def key(self):\n return self.exc_class\n\n\nclass SliceType(Type):\n\n def __init__(self, name, members):\n assert members in (2, 3)\n self.members = members\n self.has_step = members >= 3\n super(SliceType, self).__init__(name)\n\n @property\n def key(self):\n return self.members\n\n\nclass SliceLiteral(Literal, SliceType):\n def __init__(self, value):\n self._literal_init(value)\n name = 'Literal[slice]({})'.format(value)\n members = 2 if value.step is None else 3\n SliceType.__init__(self, name=name, members=members)\n\n\nLiteral.ctor_map[slice] = SliceLiteral\n\n\nclass ClassInstanceType(Type):\n \"\"\"\n The type of a jitted class *instance*. It will be the return-type\n of the constructor of the class.\n \"\"\"\n mutable = True\n name_prefix = \"instance\"\n\n def __init__(self, class_type):\n self.class_type = class_type\n name = \"{0}.{1}\".format(self.name_prefix, self.class_type.name)\n super(ClassInstanceType, self).__init__(name)\n\n def get_data_type(self):\n return ClassDataType(self)\n\n def get_reference_type(self):\n return self\n\n @property\n def key(self):\n return self.class_type.key\n\n @property\n def classname(self):\n return self.class_type.class_def.__name__\n\n @property\n def jitprops(self):\n return self.class_type.jitprops\n\n @property\n def jitmethods(self):\n return self.class_type.jitmethods\n\n @property\n def struct(self):\n return self.class_type.struct\n\n @property\n def methods(self):\n return self.class_type.methods\n\n\nclass ClassType(Callable, Opaque):\n \"\"\"\n The type of the jitted class (not instance). When the type of a class\n is called, its constructor is invoked.\n \"\"\"\n mutable = True\n name_prefix = \"jitclass\"\n instance_type_class = ClassInstanceType\n\n def __init__(self, class_def, ctor_template_cls, struct, jitmethods,\n jitprops):\n self.class_def = class_def\n self.ctor_template = self._specialize_template(ctor_template_cls)\n self.jitmethods = jitmethods\n self.jitprops = jitprops\n self.struct = struct\n self.methods = dict((k, v.py_func) for k, v in self.jitmethods.items())\n fielddesc = ','.join(\"{0}:{1}\".format(k, v) for k, v in struct.items())\n name = \"{0}.{1}#{2:x}<{3}>\".format(self.name_prefix, class_def.__name__,\n id(class_def), fielddesc)\n super(ClassType, self).__init__(name)\n self.instance_type = self.instance_type_class(self)\n\n def get_call_type(self, context, args, kws):\n return self.ctor_template(context).apply(args, kws)\n\n def get_call_signatures(self):\n return (), True\n\n def _specialize_template(self, basecls):\n return type(basecls.__name__, (basecls,), dict(key=self))\n\n\nclass DeferredType(Type):\n \"\"\"\n Represents a type that will be defined later. It must be defined\n before it is materialized (used in the compiler). Once defined, it\n behaves exactly as the type it is defining.\n \"\"\"\n def __init__(self):\n self._define = None\n name = \"{0}#{1}\".format(type(self).__name__, id(self))\n super(DeferredType, self).__init__(name)\n\n def get(self):\n if self._define is None:\n raise RuntimeError(\"deferred type not defined\")\n return self._define\n\n def define(self, typ):\n if self._define is not None:\n raise TypeError(\"deferred type already defined\")\n if not isinstance(typ, Type):\n raise TypeError(\"arg is not a Type; got: {0}\".format(type(typ)))\n self._define = typ\n\n def unify(self, typingctx, other):\n return typingctx.unify_pairs(self.get(), other)\n\n\nclass ClassDataType(Type):\n \"\"\"\n Internal only.\n Represents the data of the instance. The representation of\n ClassInstanceType contains a pointer to a ClassDataType which represents\n a C structure that contains all the data fields of the class instance.\n \"\"\"\n def __init__(self, classtyp):\n self.class_type = classtyp\n name = \"data.{0}\".format(self.class_type.name)\n super(ClassDataType, self).__init__(name)\n\n\nclass ContextManager(Callable, Phantom):\n \"\"\"\n An overly-simple ContextManager type that cannot be materialized.\n \"\"\"\n def __init__(self, cm):\n self.cm = cm\n super(ContextManager, self).__init__(\"ContextManager({})\".format(cm))\n\n def get_call_signatures(self):\n if not self.cm.is_callable:\n msg = \"contextmanager {} is not callable\".format(self.cm)\n raise TypingError(msg)\n\n return (), False\n\n def get_call_type(self, context, args, kws):\n from numba import typing\n\n if not self.cm.is_callable:\n msg = \"contextmanager {} is not callable\".format(self.cm)\n raise TypingError(msg)\n\n posargs = list(args) + [v for k, v in sorted(kws.items())]\n return typing.signature(self, *posargs)\n\n\nclass UnicodeType(IterableType):\n\n def __init__(self, name):\n super(UnicodeType, self).__init__(name)\n\n @property\n def iterator_type(self):\n return UnicodeIteratorType(self)\n\n\nclass UnicodeIteratorType(SimpleIteratorType):\n\n def __init__(self, dtype):\n name = \"iter_unicode\"\n self.data = dtype\n super(UnicodeIteratorType, self).__init__(name, dtype)\n",
"path": "numba/types/misc.py"
}
] | [
{
"content": "from __future__ import print_function, division, absolute_import\n\nfrom .abstract import *\nfrom .common import *\nfrom ..typeconv import Conversion\nfrom ..errors import TypingError, LiteralTypingError\n\n\n\nclass PyObject(Dummy):\n \"\"\"\n A generic CPython object.\n \"\"\"\n\n def is_precise(self):\n return False\n\n\nclass Phantom(Dummy):\n \"\"\"\n A type that cannot be materialized. A Phantom cannot be used as\n argument or return type.\n \"\"\"\n\n\nclass Undefined(Dummy):\n \"\"\"\n A type that is left imprecise. This is used as a temporaray placeholder\n during type inference in the hope that the type can be later refined.\n \"\"\"\n\n def is_precise(self):\n return False\n\n\nclass RawPointer(Opaque):\n \"\"\"\n A raw pointer without any specific meaning.\n \"\"\"\n\n\nclass StringLiteral(Literal, Dummy):\n pass\n\n\nLiteral.ctor_map[str] = StringLiteral\n\n\ndef unliteral(lit_type):\n \"\"\"\n Get base type from Literal type.\n \"\"\"\n if hasattr(lit_type, '__unliteral__'):\n return lit_type.__unliteral__()\n return getattr(lit_type, 'literal_type', lit_type)\n\n\ndef literal(value):\n \"\"\"Returns a Literal instance or raise LiteralTypingError\n \"\"\"\n assert not isinstance(value, Literal)\n ty = type(value)\n try:\n ctor = Literal.ctor_map[ty]\n except KeyError:\n raise LiteralTypingError(ty)\n else:\n return ctor(value)\n\n\ndef maybe_literal(value):\n \"\"\"Get a Literal type for the value or None.\n \"\"\"\n try:\n return literal(value)\n except LiteralTypingError:\n return\n\n\nclass Omitted(Opaque):\n \"\"\"\n An omitted function argument with a default value.\n \"\"\"\n\n def __init__(self, value):\n self.value = value\n super(Omitted, self).__init__(\"omitted(default=%r)\" % (value,))\n\n @property\n def key(self):\n return type(self.value), id(self.value)\n\n\nclass VarArg(Type):\n \"\"\"\n Special type representing a variable number of arguments at the\n end of a function's signature. Only used for signature matching,\n not for actual values.\n \"\"\"\n\n def __init__(self, dtype):\n self.dtype = dtype\n super(VarArg, self).__init__(\"*%s\" % dtype)\n\n @property\n def key(self):\n return self.dtype\n\n\nclass Module(Dummy):\n def __init__(self, pymod):\n self.pymod = pymod\n super(Module, self).__init__(\"Module(%s)\" % pymod)\n\n @property\n def key(self):\n return self.pymod\n\n\nclass Macro(Type):\n def __init__(self, template):\n self.template = template\n cls = type(self)\n super(Macro, self).__init__(\"%s(%s)\" % (cls.__name__, template))\n\n @property\n def key(self):\n return self.template\n\n\nclass MemInfoPointer(Type):\n \"\"\"\n Pointer to a Numba \"meminfo\" (i.e. the information for a managed\n piece of memory).\n \"\"\"\n mutable = True\n\n def __init__(self, dtype):\n self.dtype = dtype\n name = \"memory-managed *%s\" % dtype\n super(MemInfoPointer, self).__init__(name)\n\n @property\n def key(self):\n return self.dtype\n\n\nclass CPointer(Type):\n \"\"\"\n Type class for pointers to other types.\n \"\"\"\n mutable = True\n\n def __init__(self, dtype):\n self.dtype = dtype\n name = \"%s*\" % dtype\n super(CPointer, self).__init__(name)\n\n @property\n def key(self):\n return self.dtype\n\n\nclass EphemeralPointer(CPointer):\n \"\"\"\n Type class for pointers which aren't guaranteed to last long - e.g.\n stack-allocated slots. The data model serializes such pointers\n by copying the data pointed to.\n \"\"\"\n\n\nclass EphemeralArray(Type):\n \"\"\"\n Similar to EphemeralPointer, but pointing to an array of elements,\n rather than a single one. The array size must be known at compile-time.\n \"\"\"\n\n def __init__(self, dtype, count):\n self.dtype = dtype\n self.count = count\n name = \"*%s[%d]\" % (dtype, count)\n super(EphemeralArray, self).__init__(name)\n\n @property\n def key(self):\n return self.dtype, self.count\n\n\nclass Object(Type):\n # XXX unused?\n mutable = True\n\n def __init__(self, clsobj):\n self.cls = clsobj\n name = \"Object(%s)\" % clsobj.__name__\n super(Object, self).__init__(name)\n\n @property\n def key(self):\n return self.cls\n\n\nclass Optional(Type):\n \"\"\"\n Type class for optional types, i.e. union { some type, None }\n \"\"\"\n\n def __init__(self, typ):\n assert not isinstance(typ, (Optional, NoneType))\n typ = unliteral(typ)\n self.type = typ\n name = \"OptionalType(%s) i.e. the type '%s or None'\" % (typ, typ)\n super(Optional, self).__init__(name)\n\n @property\n def key(self):\n return self.type\n\n def can_convert_to(self, typingctx, other):\n if isinstance(other, Optional):\n return typingctx.can_convert(self.type, other.type)\n else:\n conv = typingctx.can_convert(self.type, other)\n if conv is not None:\n return max(conv, Conversion.safe)\n\n def can_convert_from(self, typingctx, other):\n if isinstance(other, NoneType):\n return Conversion.promote\n elif isinstance(other, Optional):\n return typingctx.can_convert(other.type, self.type)\n else:\n conv = typingctx.can_convert(other, self.type)\n if conv is not None:\n return max(conv, Conversion.promote)\n\n def unify(self, typingctx, other):\n if isinstance(other, Optional):\n unified = typingctx.unify_pairs(self.type, other.type)\n else:\n unified = typingctx.unify_pairs(self.type, other)\n\n if unified is not None:\n if isinstance(unified, Optional):\n return unified\n else:\n return Optional(unified)\n\n\nclass NoneType(Opaque):\n \"\"\"\n The type for None.\n \"\"\"\n\n def unify(self, typingctx, other):\n \"\"\"\n Turn anything to a Optional type;\n \"\"\"\n if isinstance(other, (Optional, NoneType)):\n return other\n return Optional(other)\n\n\nclass EllipsisType(Opaque):\n \"\"\"\n The type for the Ellipsis singleton.\n \"\"\"\n\n\nclass ExceptionClass(Callable, Phantom):\n \"\"\"\n The type of exception classes (not instances).\n \"\"\"\n\n def __init__(self, exc_class):\n assert issubclass(exc_class, BaseException)\n name = \"%s\" % (exc_class.__name__)\n self.exc_class = exc_class\n super(ExceptionClass, self).__init__(name)\n\n def get_call_type(self, context, args, kws):\n return self.get_call_signatures()[0][0]\n\n def get_call_signatures(self):\n from .. import typing\n return_type = ExceptionInstance(self.exc_class)\n return [typing.signature(return_type)], False\n\n @property\n def key(self):\n return self.exc_class\n\n\nclass ExceptionInstance(Phantom):\n \"\"\"\n The type of exception instances. *exc_class* should be the\n exception class.\n \"\"\"\n\n def __init__(self, exc_class):\n assert issubclass(exc_class, BaseException)\n name = \"%s(...)\" % (exc_class.__name__,)\n self.exc_class = exc_class\n super(ExceptionInstance, self).__init__(name)\n\n @property\n def key(self):\n return self.exc_class\n\n\nclass SliceType(Type):\n\n def __init__(self, name, members):\n assert members in (2, 3)\n self.members = members\n self.has_step = members >= 3\n super(SliceType, self).__init__(name)\n\n @property\n def key(self):\n return self.members\n\n\nclass SliceLiteral(Literal, SliceType):\n def __init__(self, value):\n self._literal_init(value)\n name = 'Literal[slice]({})'.format(value)\n members = 2 if value.step is None else 3\n SliceType.__init__(self, name=name, members=members)\n\n\nLiteral.ctor_map[slice] = SliceLiteral\n\n\nclass ClassInstanceType(Type):\n \"\"\"\n The type of a jitted class *instance*. It will be the return-type\n of the constructor of the class.\n \"\"\"\n mutable = True\n name_prefix = \"instance\"\n\n def __init__(self, class_type):\n self.class_type = class_type\n name = \"{0}.{1}\".format(self.name_prefix, self.class_type.name)\n super(ClassInstanceType, self).__init__(name)\n\n def get_data_type(self):\n return ClassDataType(self)\n\n def get_reference_type(self):\n return self\n\n @property\n def key(self):\n return self.class_type.key\n\n @property\n def classname(self):\n return self.class_type.class_def.__name__\n\n @property\n def jitprops(self):\n return self.class_type.jitprops\n\n @property\n def jitmethods(self):\n return self.class_type.jitmethods\n\n @property\n def struct(self):\n return self.class_type.struct\n\n @property\n def methods(self):\n return self.class_type.methods\n\n\nclass ClassType(Callable, Opaque):\n \"\"\"\n The type of the jitted class (not instance). When the type of a class\n is called, its constructor is invoked.\n \"\"\"\n mutable = True\n name_prefix = \"jitclass\"\n instance_type_class = ClassInstanceType\n\n def __init__(self, class_def, ctor_template_cls, struct, jitmethods,\n jitprops):\n self.class_def = class_def\n self.ctor_template = self._specialize_template(ctor_template_cls)\n self.jitmethods = jitmethods\n self.jitprops = jitprops\n self.struct = struct\n self.methods = dict((k, v.py_func) for k, v in self.jitmethods.items())\n fielddesc = ','.join(\"{0}:{1}\".format(k, v) for k, v in struct.items())\n name = \"{0}.{1}#{2:x}<{3}>\".format(self.name_prefix, class_def.__name__,\n id(class_def), fielddesc)\n super(ClassType, self).__init__(name)\n self.instance_type = self.instance_type_class(self)\n\n def get_call_type(self, context, args, kws):\n return self.ctor_template(context).apply(args, kws)\n\n def get_call_signatures(self):\n return (), True\n\n def _specialize_template(self, basecls):\n return type(basecls.__name__, (basecls,), dict(key=self))\n\n\nclass DeferredType(Type):\n \"\"\"\n Represents a type that will be defined later. It must be defined\n before it is materialized (used in the compiler). Once defined, it\n behaves exactly as the type it is defining.\n \"\"\"\n def __init__(self):\n self._define = None\n name = \"{0}#{1}\".format(type(self).__name__, id(self))\n super(DeferredType, self).__init__(name)\n\n def get(self):\n if self._define is None:\n raise RuntimeError(\"deferred type not defined\")\n return self._define\n\n def define(self, typ):\n if self._define is not None:\n raise TypeError(\"deferred type already defined\")\n if not isinstance(typ, Type):\n raise TypeError(\"arg is not a Type; got: {0}\".format(type(typ)))\n self._define = typ\n\n def unify(self, typingctx, other):\n return typingctx.unify_pairs(self.get(), other)\n\n\nclass ClassDataType(Type):\n \"\"\"\n Internal only.\n Represents the data of the instance. The representation of\n ClassInstanceType contains a pointer to a ClassDataType which represents\n a C structure that contains all the data fields of the class instance.\n \"\"\"\n def __init__(self, classtyp):\n self.class_type = classtyp\n name = \"data.{0}\".format(self.class_type.name)\n super(ClassDataType, self).__init__(name)\n\n\nclass ContextManager(Callable, Phantom):\n \"\"\"\n An overly-simple ContextManager type that cannot be materialized.\n \"\"\"\n def __init__(self, cm):\n self.cm = cm\n super(ContextManager, self).__init__(\"ContextManager({})\".format(cm))\n\n def get_call_signatures(self):\n if not self.cm.is_callable:\n msg = \"contextmanager {} is not callable\".format(self.cm)\n raise TypingError(msg)\n\n return (), False\n\n def get_call_type(self, context, args, kws):\n from numba import typing\n\n if not self.cm.is_callable:\n msg = \"contextmanager {} is not callable\".format(self.cm)\n raise TypingError(msg)\n\n posargs = list(args) + [v for k, v in sorted(kws.items())]\n return typing.signature(self, *posargs)\n\n\nclass UnicodeType(IterableType):\n\n def __init__(self, name):\n super(UnicodeType, self).__init__(name)\n\n @property\n def iterator_type(self):\n return UnicodeIteratorType(self)\n\n\nclass UnicodeIteratorType(SimpleIteratorType):\n\n def __init__(self, dtype):\n name = \"iter_unicode\"\n self.data = dtype\n super(UnicodeIteratorType, self).__init__(name, dtype)\n",
"path": "numba/types/misc.py"
}
] | diff --git a/docs/source/glossary.rst b/docs/source/glossary.rst
index 9fe07808e4c..e9df79622b6 100644
--- a/docs/source/glossary.rst
+++ b/docs/source/glossary.rst
@@ -67,6 +67,15 @@ Glossary
no faster than Python interpreted code, unless the Numba compiler can
take advantage of :term:`loop-jitting`.
+ ``OptionalType``
+ An ``OptionalType`` is effectively a type union of a ``type`` and ``None``.
+ They typically occur in practice due to a variable being set to ``None``
+ and then in a branch the variable being set to some other value. It's
+ often not possible at compile time to determine if the branch will execute
+ so to permit :term:`type inference` to complete, the type of the variable
+ becomes the union of a ``type`` (from the value) and ``None``,
+ i.e. ``OptionalType(type)``.
+
type inference
The process by which Numba determines the specialized types of all
values within a function being compiled. Type inference can fail
diff --git a/numba/tests/test_dicts.py b/numba/tests/test_dicts.py
index e607ee5d08e..5be862d4c14 100644
--- a/numba/tests/test_dicts.py
+++ b/numba/tests/test_dicts.py
@@ -137,7 +137,7 @@ def foo(choice):
with self.assertRaises(TypingError) as raises:
foo(True)
self.assertIn(
- "Dict.value_type cannot be of type ?float64",
+ "Dict.value_type cannot be of type OptionalType(float64)",
str(raises.exception),
)
@@ -151,7 +151,7 @@ def foo(choice):
with self.assertRaises(TypingError) as raises:
foo(True)
self.assertIn(
- "Dict.key_type cannot be of type ?float64",
+ "Dict.key_type cannot be of type OptionalType(float64)",
str(raises.exception),
)
diff --git a/numba/types/misc.py b/numba/types/misc.py
index b96dded0c1d..9dc1af9dd1d 100644
--- a/numba/types/misc.py
+++ b/numba/types/misc.py
@@ -209,7 +209,7 @@ def __init__(self, typ):
assert not isinstance(typ, (Optional, NoneType))
typ = unliteral(typ)
self.type = typ
- name = "?%s" % typ
+ name = "OptionalType(%s) i.e. the type '%s or None'" % (typ, typ)
super(Optional, self).__init__(name)
@property
|
ray-project__ray-3621 | [modin] Importing Modin before Ray can sometimes cause ImportError
### Describe the problem
<!-- Describe the problem clearly here. -->
When running Modin with Ray installed from source, I am sometimes running into `ImportError` and `ModuleNotFoundError` which is occurring when I am running a modified version of Modin. This forces me to modify Ray's source such that it does not try to use the Modin that is bundled with Ray.
I will work on a solution for this.
### Source code / logs
`import modin.pandas as pd`
```
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/function_manager.py", line 165, in fetch_and_register_remote_function
function = pickle.loads(serialized_function)
ModuleNotFoundError: No module named 'modin.data_management.utils'
```
| [
{
"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nif \"pyarrow\" in sys.modules:\n raise ImportError(\"Ray must be imported before pyarrow because Ray \"\n \"requires a specific version of pyarrow (which is \"\n \"packaged along with Ray).\")\n\n# Add the directory containing pyarrow to the Python path so that we find the\n# pyarrow version packaged with ray and not a pre-existing pyarrow.\npyarrow_path = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"pyarrow_files\")\nsys.path.insert(0, pyarrow_path)\n\n# See https://github.com/ray-project/ray/issues/131.\nhelpful_message = \"\"\"\n\nIf you are using Anaconda, try fixing this problem by running:\n\n conda install libgcc\n\"\"\"\n\ntry:\n import pyarrow # noqa: F401\nexcept ImportError as e:\n if ((hasattr(e, \"msg\") and isinstance(e.msg, str)\n and (\"libstdc++\" in e.msg or \"CXX\" in e.msg))):\n # This code path should be taken with Python 3.\n e.msg += helpful_message\n elif (hasattr(e, \"message\") and isinstance(e.message, str)\n and (\"libstdc++\" in e.message or \"CXX\" in e.message)):\n # This code path should be taken with Python 2.\n condition = (hasattr(e, \"args\") and isinstance(e.args, tuple)\n and len(e.args) == 1 and isinstance(e.args[0], str))\n if condition:\n e.args = (e.args[0] + helpful_message, )\n else:\n if not hasattr(e, \"args\"):\n e.args = ()\n elif not isinstance(e.args, tuple):\n e.args = (e.args, )\n e.args += (helpful_message, )\n raise\n\nmodin_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"modin\")\nsys.path.insert(0, modin_path)\n\nfrom ray.raylet import ObjectID, _config # noqa: E402\nfrom ray.profiling import profile # noqa: E402\nfrom ray.worker import (error_info, init, connect, disconnect, get, put, wait,\n remote, get_gpu_ids, get_resource_ids, get_webui_url,\n register_custom_serializer, shutdown,\n is_initialized) # noqa: E402\nfrom ray.worker import (SCRIPT_MODE, WORKER_MODE, LOCAL_MODE,\n PYTHON_MODE) # noqa: E402\nfrom ray.worker import global_state # noqa: E402\nimport ray.internal # noqa: E402\n# We import ray.actor because some code is run in actor.py which initializes\n# some functions in the worker.\nimport ray.actor # noqa: F401\nfrom ray.actor import method # noqa: E402\n\n# Ray version string.\n__version__ = \"0.6.0\"\n\n__all__ = [\n \"error_info\", \"init\", \"connect\", \"disconnect\", \"get\", \"put\", \"wait\",\n \"remote\", \"profile\", \"actor\", \"method\", \"get_gpu_ids\", \"get_resource_ids\",\n \"get_webui_url\", \"register_custom_serializer\", \"shutdown\",\n \"is_initialized\", \"SCRIPT_MODE\", \"WORKER_MODE\", \"LOCAL_MODE\",\n \"PYTHON_MODE\", \"global_state\", \"ObjectID\", \"_config\", \"__version__\",\n \"internal\"\n]\n\nimport ctypes # noqa: E402\n# Windows only\nif hasattr(ctypes, \"windll\"):\n # Makes sure that all child processes die when we die. Also makes sure that\n # fatal crashes result in process termination rather than an error dialog\n # (the latter is annoying since we have a lot of processes). This is done\n # by associating all child processes with a \"job\" object that imposes this\n # behavior.\n (lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, \"\\0\" * 17 + chr(0x8 | 0x4 | 0x20) + \"\\0\" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32) # noqa: E501\n",
"path": "python/ray/__init__.py"
}
] | [
{
"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nif \"pyarrow\" in sys.modules:\n raise ImportError(\"Ray must be imported before pyarrow because Ray \"\n \"requires a specific version of pyarrow (which is \"\n \"packaged along with Ray).\")\n\n# Add the directory containing pyarrow to the Python path so that we find the\n# pyarrow version packaged with ray and not a pre-existing pyarrow.\npyarrow_path = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"pyarrow_files\")\nsys.path.insert(0, pyarrow_path)\n\n# See https://github.com/ray-project/ray/issues/131.\nhelpful_message = \"\"\"\n\nIf you are using Anaconda, try fixing this problem by running:\n\n conda install libgcc\n\"\"\"\n\ntry:\n import pyarrow # noqa: F401\nexcept ImportError as e:\n if ((hasattr(e, \"msg\") and isinstance(e.msg, str)\n and (\"libstdc++\" in e.msg or \"CXX\" in e.msg))):\n # This code path should be taken with Python 3.\n e.msg += helpful_message\n elif (hasattr(e, \"message\") and isinstance(e.message, str)\n and (\"libstdc++\" in e.message or \"CXX\" in e.message)):\n # This code path should be taken with Python 2.\n condition = (hasattr(e, \"args\") and isinstance(e.args, tuple)\n and len(e.args) == 1 and isinstance(e.args[0], str))\n if condition:\n e.args = (e.args[0] + helpful_message, )\n else:\n if not hasattr(e, \"args\"):\n e.args = ()\n elif not isinstance(e.args, tuple):\n e.args = (e.args, )\n e.args += (helpful_message, )\n raise\n\nmodin_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"modin\")\nsys.path.append(modin_path)\n\nfrom ray.raylet import ObjectID, _config # noqa: E402\nfrom ray.profiling import profile # noqa: E402\nfrom ray.worker import (error_info, init, connect, disconnect, get, put, wait,\n remote, get_gpu_ids, get_resource_ids, get_webui_url,\n register_custom_serializer, shutdown,\n is_initialized) # noqa: E402\nfrom ray.worker import (SCRIPT_MODE, WORKER_MODE, LOCAL_MODE,\n PYTHON_MODE) # noqa: E402\nfrom ray.worker import global_state # noqa: E402\nimport ray.internal # noqa: E402\n# We import ray.actor because some code is run in actor.py which initializes\n# some functions in the worker.\nimport ray.actor # noqa: F401\nfrom ray.actor import method # noqa: E402\n\n# Ray version string.\n__version__ = \"0.6.0\"\n\n__all__ = [\n \"error_info\", \"init\", \"connect\", \"disconnect\", \"get\", \"put\", \"wait\",\n \"remote\", \"profile\", \"actor\", \"method\", \"get_gpu_ids\", \"get_resource_ids\",\n \"get_webui_url\", \"register_custom_serializer\", \"shutdown\",\n \"is_initialized\", \"SCRIPT_MODE\", \"WORKER_MODE\", \"LOCAL_MODE\",\n \"PYTHON_MODE\", \"global_state\", \"ObjectID\", \"_config\", \"__version__\",\n \"internal\"\n]\n\nimport ctypes # noqa: E402\n# Windows only\nif hasattr(ctypes, \"windll\"):\n # Makes sure that all child processes die when we die. Also makes sure that\n # fatal crashes result in process termination rather than an error dialog\n # (the latter is annoying since we have a lot of processes). This is done\n # by associating all child processes with a \"job\" object that imposes this\n # behavior.\n (lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, \"\\0\" * 17 + chr(0x8 | 0x4 | 0x20) + \"\\0\" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32) # noqa: E501\n",
"path": "python/ray/__init__.py"
}
] | diff --git a/python/ray/__init__.py b/python/ray/__init__.py
index ed024a107aa50..776c6d0367294 100644
--- a/python/ray/__init__.py
+++ b/python/ray/__init__.py
@@ -47,7 +47,7 @@
raise
modin_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "modin")
-sys.path.insert(0, modin_path)
+sys.path.append(modin_path)
from ray.raylet import ObjectID, _config # noqa: E402
from ray.profiling import profile # noqa: E402
|
great-expectations__great_expectations-1713 | Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
| [
{
"content": "import logging\nimport uuid\nfrom copy import deepcopy\n\nfrom marshmallow import (\n INCLUDE,\n Schema,\n ValidationError,\n fields,\n post_dump,\n post_load,\n validates_schema,\n)\nfrom ruamel.yaml import YAML\nfrom ruamel.yaml.comments import CommentedMap\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations.types import DictDot\nfrom great_expectations.types.configurations import ClassConfigSchema\n\nlogger = logging.getLogger(__name__)\n\nyaml = YAML()\n\nCURRENT_CONFIG_VERSION = 2\nMINIMUM_SUPPORTED_CONFIG_VERSION = 2\nDEFAULT_USAGE_STATISTICS_URL = (\n \"https://stats.greatexpectations.io/great_expectations/v1/usage_statistics\"\n)\n\n\nclass DataContextConfig(DictDot):\n def __init__(\n self,\n config_version,\n datasources,\n expectations_store_name,\n validations_store_name,\n evaluation_parameter_store_name,\n plugins_directory,\n validation_operators,\n stores,\n data_docs_sites,\n notebooks=None,\n config_variables_file_path=None,\n anonymous_usage_statistics=None,\n commented_map=None,\n ):\n if commented_map is None:\n commented_map = CommentedMap()\n self._commented_map = commented_map\n self._config_version = config_version\n if datasources is None:\n datasources = {}\n self.datasources = datasources\n self.expectations_store_name = expectations_store_name\n self.validations_store_name = validations_store_name\n self.evaluation_parameter_store_name = evaluation_parameter_store_name\n self.plugins_directory = plugins_directory\n if not isinstance(validation_operators, dict):\n raise ValueError(\n \"validation_operators must be configured with a dictionary\"\n )\n self.validation_operators = validation_operators\n self.stores = stores\n self.notebooks = notebooks\n self.data_docs_sites = data_docs_sites\n self.config_variables_file_path = config_variables_file_path\n if anonymous_usage_statistics is None:\n anonymous_usage_statistics = AnonymizedUsageStatisticsConfig()\n elif isinstance(anonymous_usage_statistics, dict):\n anonymous_usage_statistics = AnonymizedUsageStatisticsConfig(\n **anonymous_usage_statistics\n )\n self.anonymous_usage_statistics = anonymous_usage_statistics\n\n @property\n def commented_map(self):\n return self._commented_map\n\n @property\n def config_version(self):\n return self._config_version\n\n @classmethod\n def from_commented_map(cls, commented_map):\n try:\n config = dataContextConfigSchema.load(commented_map)\n return cls(commented_map=commented_map, **config)\n except ValidationError:\n logger.error(\n \"Encountered errors during loading data context config. See ValidationError for more details.\"\n )\n raise\n\n def to_yaml(self, outfile):\n commented_map = deepcopy(self.commented_map)\n commented_map.update(dataContextConfigSchema.dump(self))\n yaml.dump(commented_map, outfile)\n\n\nclass DatasourceConfig(DictDot):\n def __init__(\n self,\n class_name,\n module_name=None,\n data_asset_type=None,\n batch_kwargs_generators=None,\n credentials=None,\n reader_method=None,\n limit=None,\n **kwargs\n ):\n # NOTE - JPC - 20200316: Currently, we are mostly inconsistent with respect to this type...\n self._class_name = class_name\n self._module_name = module_name\n self.data_asset_type = data_asset_type\n if batch_kwargs_generators is not None:\n self.batch_kwargs_generators = batch_kwargs_generators\n if credentials is not None:\n self.credentials = credentials\n if reader_method is not None:\n self.reader_method = reader_method\n if limit is not None:\n self.limit = limit\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n @property\n def class_name(self):\n return self._class_name\n\n @property\n def module_name(self):\n return self._module_name\n\n\nclass AnonymizedUsageStatisticsConfig(DictDot):\n def __init__(self, enabled=True, data_context_id=None, usage_statistics_url=None):\n self._enabled = enabled\n if data_context_id is None:\n data_context_id = str(uuid.uuid4())\n self._explicit_id = False\n else:\n self._explicit_id = True\n\n self._data_context_id = data_context_id\n if usage_statistics_url is None:\n usage_statistics_url = DEFAULT_USAGE_STATISTICS_URL\n self._explicit_url = False\n else:\n self._explicit_url = True\n self._usage_statistics_url = usage_statistics_url\n\n @property\n def enabled(self):\n return self._enabled\n\n @enabled.setter\n def enabled(self, enabled):\n if not isinstance(enabled, bool):\n raise ValueError(\"usage statistics enabled property must be boolean\")\n self._enabled = enabled\n\n @property\n def data_context_id(self):\n return self._data_context_id\n\n @data_context_id.setter\n def data_context_id(self, data_context_id):\n try:\n uuid.UUID(data_context_id)\n except ValueError:\n raise ge_exceptions.InvalidConfigError(\n \"data_context_id must be a valid uuid\"\n )\n self._data_context_id = data_context_id\n self._explicit_id = True\n\n @property\n def explicit_id(self):\n return self._explicit_id\n\n @property\n def usage_statistics_url(self):\n return self._usage_statistics_url\n\n @usage_statistics_url.setter\n def usage_statistics_url(self, usage_statistics_url):\n self._usage_statistics_url = usage_statistics_url\n self._explicit_url = True\n\n\nclass AnonymizedUsageStatisticsConfigSchema(Schema):\n data_context_id = fields.UUID()\n enabled = fields.Boolean(default=True)\n usage_statistics_url = fields.URL(allow_none=True)\n _explicit_url = fields.Boolean(required=False)\n\n # noinspection PyUnusedLocal\n @post_load()\n def make_usage_statistics_config(self, data, **kwargs):\n if \"data_context_id\" in data:\n data[\"data_context_id\"] = str(data[\"data_context_id\"])\n return AnonymizedUsageStatisticsConfig(**data)\n\n # noinspection PyUnusedLocal\n @post_dump()\n def filter_implicit(self, data, **kwargs):\n if not data.get(\"_explicit_url\") and \"usage_statistics_url\" in data:\n del data[\"usage_statistics_url\"]\n if \"_explicit_url\" in data:\n del data[\"_explicit_url\"]\n return data\n\n\nclass DatasourceConfigSchema(Schema):\n class Meta:\n unknown = INCLUDE\n\n class_name = fields.String(required=True)\n module_name = fields.String(missing=\"great_expectations.datasource\")\n data_asset_type = fields.Nested(ClassConfigSchema)\n # TODO: Update to generator-specific\n # batch_kwargs_generators = fields.Mapping(keys=fields.Str(), values=fields.Nested(fields.GeneratorSchema))\n batch_kwargs_generators = fields.Dict(\n keys=fields.Str(), values=fields.Dict(), allow_none=True\n )\n credentials = fields.Raw(allow_none=True)\n\n @validates_schema\n def validate_schema(self, data, **kwargs):\n if \"generators\" in data:\n raise ge_exceptions.InvalidConfigError(\n \"Your current configuration uses the 'generators' key in a datasource, but in version 0.10 of \"\n \"GE, that key is renamed to 'batch_kwargs_generators'. Please update your config to continue.\"\n )\n\n # noinspection PyUnusedLocal\n @post_load\n def make_datasource_config(self, data, **kwargs):\n return DatasourceConfig(**data)\n\n\nclass NotebookTemplateConfig(DictDot):\n def __init__(self, file_name, template_kwargs=None):\n self.file_name = file_name\n if template_kwargs:\n self.template_kwargs = template_kwargs\n else:\n self.template_kwargs = {}\n\n\nclass NotebookTemplateConfigSchema(Schema):\n file_name = fields.String()\n template_kwargs = fields.Dict(\n keys=fields.Str(), values=fields.Str(), allow_none=True\n )\n\n # noinspection PyUnusedLocal\n @post_load\n def make_notebook_template_config(self, data, **kwargs):\n return NotebookTemplateConfig(**data)\n\n\nclass NotebookConfig(DictDot):\n def __init__(\n self,\n class_name,\n module_name,\n custom_templates_module,\n header_markdown=None,\n footer_markdown=None,\n table_expectations_header_markdown=None,\n column_expectations_header_markdown=None,\n table_expectations_not_found_markdown=None,\n column_expectations_not_found_markdown=None,\n authoring_intro_markdown=None,\n column_expectations_markdown=None,\n header_code=None,\n footer_code=None,\n column_expectation_code=None,\n table_expectation_code=None,\n ):\n self.class_name = class_name\n self.module_name = module_name\n self.custom_templates_module = custom_templates_module\n\n self.header_markdown = header_markdown\n self.footer_markdown = footer_markdown\n self.table_expectations_header_markdown = table_expectations_header_markdown\n self.column_expectations_header_markdown = column_expectations_header_markdown\n self.table_expectations_not_found_markdown = (\n table_expectations_not_found_markdown\n )\n self.column_expectations_not_found_markdown = (\n column_expectations_not_found_markdown\n )\n self.authoring_intro_markdown = authoring_intro_markdown\n self.column_expectations_markdown = column_expectations_markdown\n\n self.header_code = header_code\n self.footer_code = footer_code\n self.column_expectation_code = column_expectation_code\n self.table_expectation_code = table_expectation_code\n\n\nclass NotebookConfigSchema(Schema):\n class_name = fields.String(missing=\"SuiteEditNotebookRenderer\")\n module_name = fields.String(\n missing=\"great_expectations.render.renderer.suite_edit_notebook_renderer\"\n )\n custom_templates_module = fields.String()\n\n header_markdown = fields.Nested(NotebookTemplateConfigSchema, allow_none=True)\n footer_markdown = fields.Nested(NotebookTemplateConfigSchema, allow_none=True)\n table_expectations_header_markdown = fields.Nested(\n NotebookTemplateConfigSchema, allow_none=True\n )\n column_expectations_header_markdown = fields.Nested(\n NotebookTemplateConfigSchema, allow_none=True\n )\n table_expectations_not_found_markdown = fields.Nested(\n NotebookTemplateConfigSchema, allow_none=True\n )\n column_expectations_not_found_markdown = fields.Nested(\n NotebookTemplateConfigSchema, allow_none=True\n )\n authoring_intro_markdown = fields.Nested(\n NotebookTemplateConfigSchema, allow_none=True\n )\n column_expectations_markdown = fields.Nested(\n NotebookTemplateConfigSchema, allow_none=True\n )\n\n header_code = fields.Nested(NotebookTemplateConfigSchema, allow_none=True)\n footer_code = fields.Nested(NotebookTemplateConfigSchema, allow_none=True)\n column_expectation_code = fields.Nested(\n NotebookTemplateConfigSchema, allow_none=True\n )\n table_expectation_code = fields.Nested(\n NotebookTemplateConfigSchema, allow_none=True\n )\n\n # noinspection PyUnusedLocal\n @post_load\n def make_notebook_config(self, data, **kwargs):\n return NotebookConfig(**data)\n\n\nclass NotebooksConfig(DictDot):\n def __init__(self, suite_edit):\n self.suite_edit = suite_edit\n\n\nclass NotebooksConfigSchema(Schema):\n # for now only suite_edit, could have other customization options for\n # notebooks in the future\n suite_edit = fields.Nested(NotebookConfigSchema)\n\n # noinspection PyUnusedLocal\n @post_load\n def make_notebooks_config(self, data, **kwargs):\n return NotebooksConfig(**data)\n\n\nclass DataContextConfigSchema(Schema):\n config_version = fields.Number(\n validate=lambda x: 0 < x < 100,\n error_messages={\"invalid\": \"config version must \" \"be a number.\"},\n )\n datasources = fields.Dict(\n keys=fields.Str(), values=fields.Nested(DatasourceConfigSchema)\n )\n expectations_store_name = fields.Str()\n validations_store_name = fields.Str()\n evaluation_parameter_store_name = fields.Str()\n plugins_directory = fields.Str(allow_none=True)\n validation_operators = fields.Dict(keys=fields.Str(), values=fields.Dict())\n stores = fields.Dict(keys=fields.Str(), values=fields.Dict())\n notebooks = fields.Nested(NotebooksConfigSchema, allow_none=True)\n data_docs_sites = fields.Dict(\n keys=fields.Str(), values=fields.Dict(), allow_none=True\n )\n config_variables_file_path = fields.Str(allow_none=True)\n anonymous_usage_statistics = fields.Nested(AnonymizedUsageStatisticsConfigSchema)\n\n # noinspection PyMethodMayBeStatic\n # noinspection PyUnusedLocal\n def handle_error(self, exc, data, **kwargs):\n \"\"\"Log and raise our custom exception when (de)serialization fails.\"\"\"\n logger.error(exc.messages)\n raise ge_exceptions.InvalidDataContextConfigError(\n \"Error while processing DataContextConfig.\", exc\n )\n\n @validates_schema\n def validate_schema(self, data, **kwargs):\n if \"config_version\" not in data:\n raise ge_exceptions.InvalidDataContextConfigError(\n \"The key `config_version` is missing; please check your config file.\",\n validation_error=ValidationError(\"no config_version key\"),\n )\n\n if not isinstance(data[\"config_version\"], (int, float)):\n raise ge_exceptions.InvalidDataContextConfigError(\n \"The key `config_version` must be a number. Please check your config file.\",\n validation_error=ValidationError(\"config version not a number\"),\n )\n\n # When migrating from 0.7.x to 0.8.0\n if data[\"config_version\"] == 0 and (\n \"validations_store\" in list(data.keys())\n or \"validations_stores\" in list(data.keys())\n ):\n raise ge_exceptions.UnsupportedConfigVersionError(\n \"You appear to be using a config version from the 0.7.x series. This version is no longer supported.\"\n )\n elif data[\"config_version\"] < MINIMUM_SUPPORTED_CONFIG_VERSION:\n raise ge_exceptions.UnsupportedConfigVersionError(\n \"You appear to have an invalid config version ({}).\\n The version number must be at least {}. \"\n \"Please see the migration guide at https://docs.greatexpectations.io/en/latest/how_to_guides/migrating_versions.html\".format(\n data[\"config_version\"], MINIMUM_SUPPORTED_CONFIG_VERSION\n ),\n )\n elif data[\"config_version\"] > CURRENT_CONFIG_VERSION:\n raise ge_exceptions.InvalidDataContextConfigError(\n \"You appear to have an invalid config version ({}).\\n The maximum valid version is {}.\".format(\n data[\"config_version\"], CURRENT_CONFIG_VERSION\n ),\n validation_error=ValidationError(\"config version too high\"),\n )\n\n\ndataContextConfigSchema = DataContextConfigSchema()\ndatasourceConfigSchema = DatasourceConfigSchema()\nanonymizedUsageStatisticsSchema = AnonymizedUsageStatisticsConfigSchema()\nnotebookConfigSchema = NotebookConfigSchema()\n",
"path": "great_expectations/data_context/types/base.py"
}
] | [
{
"content": "import logging\nimport uuid\nfrom copy import deepcopy\n\nfrom marshmallow import (\n INCLUDE,\n Schema,\n ValidationError,\n fields,\n post_dump,\n post_load,\n validates_schema,\n)\nfrom ruamel.yaml import YAML\nfrom ruamel.yaml.comments import CommentedMap\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations.types import DictDot\nfrom great_expectations.types.configurations import ClassConfigSchema\n\nlogger = logging.getLogger(__name__)\n\nyaml = YAML()\n\nCURRENT_CONFIG_VERSION = 2\nMINIMUM_SUPPORTED_CONFIG_VERSION = 2\nDEFAULT_USAGE_STATISTICS_URL = (\n \"https://stats.greatexpectations.io/great_expectations/v1/usage_statistics\"\n)\n\n\nclass DataContextConfig(DictDot):\n def __init__(\n self,\n config_version,\n datasources,\n expectations_store_name,\n validations_store_name,\n evaluation_parameter_store_name,\n plugins_directory,\n validation_operators,\n stores,\n data_docs_sites,\n notebooks=None,\n config_variables_file_path=None,\n anonymous_usage_statistics=None,\n commented_map=None,\n ):\n if commented_map is None:\n commented_map = CommentedMap()\n self._commented_map = commented_map\n self._config_version = config_version\n if datasources is None:\n datasources = {}\n self.datasources = datasources\n self.expectations_store_name = expectations_store_name\n self.validations_store_name = validations_store_name\n self.evaluation_parameter_store_name = evaluation_parameter_store_name\n self.plugins_directory = plugins_directory\n if not isinstance(validation_operators, dict):\n raise ValueError(\n \"validation_operators must be configured with a dictionary\"\n )\n self.validation_operators = validation_operators\n self.stores = stores\n self.notebooks = notebooks\n self.data_docs_sites = data_docs_sites\n self.config_variables_file_path = config_variables_file_path\n if anonymous_usage_statistics is None:\n anonymous_usage_statistics = AnonymizedUsageStatisticsConfig()\n elif isinstance(anonymous_usage_statistics, dict):\n anonymous_usage_statistics = AnonymizedUsageStatisticsConfig(\n **anonymous_usage_statistics\n )\n self.anonymous_usage_statistics = anonymous_usage_statistics\n\n @property\n def commented_map(self):\n return self._commented_map\n\n @property\n def config_version(self):\n return self._config_version\n\n @classmethod\n def from_commented_map(cls, commented_map):\n try:\n config = dataContextConfigSchema.load(commented_map)\n return cls(commented_map=commented_map, **config)\n except ValidationError:\n logger.error(\n \"Encountered errors during loading data context config. See ValidationError for more details.\"\n )\n raise\n\n def to_yaml(self, outfile):\n commented_map = deepcopy(self.commented_map)\n commented_map.update(dataContextConfigSchema.dump(self))\n yaml.dump(commented_map, outfile)\n\n\nclass DatasourceConfig(DictDot):\n def __init__(\n self,\n class_name,\n module_name=None,\n data_asset_type=None,\n batch_kwargs_generators=None,\n credentials=None,\n reader_method=None,\n limit=None,\n **kwargs\n ):\n # NOTE - JPC - 20200316: Currently, we are mostly inconsistent with respect to this type...\n self._class_name = class_name\n self._module_name = module_name\n self.data_asset_type = data_asset_type\n if batch_kwargs_generators is not None:\n self.batch_kwargs_generators = batch_kwargs_generators\n if credentials is not None:\n self.credentials = credentials\n if reader_method is not None:\n self.reader_method = reader_method\n if limit is not None:\n self.limit = limit\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n @property\n def class_name(self):\n return self._class_name\n\n @property\n def module_name(self):\n return self._module_name\n\n\nclass AnonymizedUsageStatisticsConfig(DictDot):\n def __init__(self, enabled=True, data_context_id=None, usage_statistics_url=None):\n self._enabled = enabled\n if data_context_id is None:\n data_context_id = str(uuid.uuid4())\n self._explicit_id = False\n else:\n self._explicit_id = True\n\n self._data_context_id = data_context_id\n if usage_statistics_url is None:\n usage_statistics_url = DEFAULT_USAGE_STATISTICS_URL\n self._explicit_url = False\n else:\n self._explicit_url = True\n self._usage_statistics_url = usage_statistics_url\n\n @property\n def enabled(self):\n return self._enabled\n\n @enabled.setter\n def enabled(self, enabled):\n if not isinstance(enabled, bool):\n raise ValueError(\"usage statistics enabled property must be boolean\")\n self._enabled = enabled\n\n @property\n def data_context_id(self):\n return self._data_context_id\n\n @data_context_id.setter\n def data_context_id(self, data_context_id):\n try:\n uuid.UUID(data_context_id)\n except ValueError:\n raise ge_exceptions.InvalidConfigError(\n \"data_context_id must be a valid uuid\"\n )\n self._data_context_id = data_context_id\n self._explicit_id = True\n\n @property\n def explicit_id(self):\n return self._explicit_id\n\n @property\n def usage_statistics_url(self):\n return self._usage_statistics_url\n\n @usage_statistics_url.setter\n def usage_statistics_url(self, usage_statistics_url):\n self._usage_statistics_url = usage_statistics_url\n self._explicit_url = True\n\n\nclass AnonymizedUsageStatisticsConfigSchema(Schema):\n data_context_id = fields.UUID()\n enabled = fields.Boolean(default=True)\n usage_statistics_url = fields.URL(allow_none=True)\n _explicit_url = fields.Boolean(required=False)\n\n # noinspection PyUnusedLocal\n @post_load()\n def make_usage_statistics_config(self, data, **kwargs):\n if \"data_context_id\" in data:\n data[\"data_context_id\"] = str(data[\"data_context_id\"])\n return AnonymizedUsageStatisticsConfig(**data)\n\n # noinspection PyUnusedLocal\n @post_dump()\n def filter_implicit(self, data, **kwargs):\n if not data.get(\"_explicit_url\") and \"usage_statistics_url\" in data:\n del data[\"usage_statistics_url\"]\n if \"_explicit_url\" in data:\n del data[\"_explicit_url\"]\n return data\n\n\nclass DatasourceConfigSchema(Schema):\n class Meta:\n unknown = INCLUDE\n\n class_name = fields.String(required=True)\n module_name = fields.String(missing=\"great_expectations.datasource\")\n data_asset_type = fields.Nested(ClassConfigSchema)\n # TODO: Update to generator-specific\n # batch_kwargs_generators = fields.Mapping(keys=fields.Str(), values=fields.Nested(fields.GeneratorSchema))\n batch_kwargs_generators = fields.Dict(\n keys=fields.Str(), values=fields.Dict(), allow_none=True\n )\n credentials = fields.Raw(allow_none=True)\n spark_context = fields.Raw(allow_none=True)\n\n @validates_schema\n def validate_schema(self, data, **kwargs):\n if \"generators\" in data:\n raise ge_exceptions.InvalidConfigError(\n \"Your current configuration uses the 'generators' key in a datasource, but in version 0.10 of \"\n \"GE, that key is renamed to 'batch_kwargs_generators'. Please update your config to continue.\"\n )\n\n # noinspection PyUnusedLocal\n @post_load\n def make_datasource_config(self, data, **kwargs):\n return DatasourceConfig(**data)\n\n\nclass NotebookTemplateConfig(DictDot):\n def __init__(self, file_name, template_kwargs=None):\n self.file_name = file_name\n if template_kwargs:\n self.template_kwargs = template_kwargs\n else:\n self.template_kwargs = {}\n\n\nclass NotebookTemplateConfigSchema(Schema):\n file_name = fields.String()\n template_kwargs = fields.Dict(\n keys=fields.Str(), values=fields.Str(), allow_none=True\n )\n\n # noinspection PyUnusedLocal\n @post_load\n def make_notebook_template_config(self, data, **kwargs):\n return NotebookTemplateConfig(**data)\n\n\nclass NotebookConfig(DictDot):\n def __init__(\n self,\n class_name,\n module_name,\n custom_templates_module,\n header_markdown=None,\n footer_markdown=None,\n table_expectations_header_markdown=None,\n column_expectations_header_markdown=None,\n table_expectations_not_found_markdown=None,\n column_expectations_not_found_markdown=None,\n authoring_intro_markdown=None,\n column_expectations_markdown=None,\n header_code=None,\n footer_code=None,\n column_expectation_code=None,\n table_expectation_code=None,\n ):\n self.class_name = class_name\n self.module_name = module_name\n self.custom_templates_module = custom_templates_module\n\n self.header_markdown = header_markdown\n self.footer_markdown = footer_markdown\n self.table_expectations_header_markdown = table_expectations_header_markdown\n self.column_expectations_header_markdown = column_expectations_header_markdown\n self.table_expectations_not_found_markdown = (\n table_expectations_not_found_markdown\n )\n self.column_expectations_not_found_markdown = (\n column_expectations_not_found_markdown\n )\n self.authoring_intro_markdown = authoring_intro_markdown\n self.column_expectations_markdown = column_expectations_markdown\n\n self.header_code = header_code\n self.footer_code = footer_code\n self.column_expectation_code = column_expectation_code\n self.table_expectation_code = table_expectation_code\n\n\nclass NotebookConfigSchema(Schema):\n class_name = fields.String(missing=\"SuiteEditNotebookRenderer\")\n module_name = fields.String(\n missing=\"great_expectations.render.renderer.suite_edit_notebook_renderer\"\n )\n custom_templates_module = fields.String()\n\n header_markdown = fields.Nested(NotebookTemplateConfigSchema, allow_none=True)\n footer_markdown = fields.Nested(NotebookTemplateConfigSchema, allow_none=True)\n table_expectations_header_markdown = fields.Nested(\n NotebookTemplateConfigSchema, allow_none=True\n )\n column_expectations_header_markdown = fields.Nested(\n NotebookTemplateConfigSchema, allow_none=True\n )\n table_expectations_not_found_markdown = fields.Nested(\n NotebookTemplateConfigSchema, allow_none=True\n )\n column_expectations_not_found_markdown = fields.Nested(\n NotebookTemplateConfigSchema, allow_none=True\n )\n authoring_intro_markdown = fields.Nested(\n NotebookTemplateConfigSchema, allow_none=True\n )\n column_expectations_markdown = fields.Nested(\n NotebookTemplateConfigSchema, allow_none=True\n )\n\n header_code = fields.Nested(NotebookTemplateConfigSchema, allow_none=True)\n footer_code = fields.Nested(NotebookTemplateConfigSchema, allow_none=True)\n column_expectation_code = fields.Nested(\n NotebookTemplateConfigSchema, allow_none=True\n )\n table_expectation_code = fields.Nested(\n NotebookTemplateConfigSchema, allow_none=True\n )\n\n # noinspection PyUnusedLocal\n @post_load\n def make_notebook_config(self, data, **kwargs):\n return NotebookConfig(**data)\n\n\nclass NotebooksConfig(DictDot):\n def __init__(self, suite_edit):\n self.suite_edit = suite_edit\n\n\nclass NotebooksConfigSchema(Schema):\n # for now only suite_edit, could have other customization options for\n # notebooks in the future\n suite_edit = fields.Nested(NotebookConfigSchema)\n\n # noinspection PyUnusedLocal\n @post_load\n def make_notebooks_config(self, data, **kwargs):\n return NotebooksConfig(**data)\n\n\nclass DataContextConfigSchema(Schema):\n config_version = fields.Number(\n validate=lambda x: 0 < x < 100,\n error_messages={\"invalid\": \"config version must \" \"be a number.\"},\n )\n datasources = fields.Dict(\n keys=fields.Str(), values=fields.Nested(DatasourceConfigSchema)\n )\n expectations_store_name = fields.Str()\n validations_store_name = fields.Str()\n evaluation_parameter_store_name = fields.Str()\n plugins_directory = fields.Str(allow_none=True)\n validation_operators = fields.Dict(keys=fields.Str(), values=fields.Dict())\n stores = fields.Dict(keys=fields.Str(), values=fields.Dict())\n notebooks = fields.Nested(NotebooksConfigSchema, allow_none=True)\n data_docs_sites = fields.Dict(\n keys=fields.Str(), values=fields.Dict(), allow_none=True\n )\n config_variables_file_path = fields.Str(allow_none=True)\n anonymous_usage_statistics = fields.Nested(AnonymizedUsageStatisticsConfigSchema)\n\n # noinspection PyMethodMayBeStatic\n # noinspection PyUnusedLocal\n def handle_error(self, exc, data, **kwargs):\n \"\"\"Log and raise our custom exception when (de)serialization fails.\"\"\"\n logger.error(exc.messages)\n raise ge_exceptions.InvalidDataContextConfigError(\n \"Error while processing DataContextConfig.\", exc\n )\n\n @validates_schema\n def validate_schema(self, data, **kwargs):\n if \"config_version\" not in data:\n raise ge_exceptions.InvalidDataContextConfigError(\n \"The key `config_version` is missing; please check your config file.\",\n validation_error=ValidationError(\"no config_version key\"),\n )\n\n if not isinstance(data[\"config_version\"], (int, float)):\n raise ge_exceptions.InvalidDataContextConfigError(\n \"The key `config_version` must be a number. Please check your config file.\",\n validation_error=ValidationError(\"config version not a number\"),\n )\n\n # When migrating from 0.7.x to 0.8.0\n if data[\"config_version\"] == 0 and (\n \"validations_store\" in list(data.keys())\n or \"validations_stores\" in list(data.keys())\n ):\n raise ge_exceptions.UnsupportedConfigVersionError(\n \"You appear to be using a config version from the 0.7.x series. This version is no longer supported.\"\n )\n elif data[\"config_version\"] < MINIMUM_SUPPORTED_CONFIG_VERSION:\n raise ge_exceptions.UnsupportedConfigVersionError(\n \"You appear to have an invalid config version ({}).\\n The version number must be at least {}. \"\n \"Please see the migration guide at https://docs.greatexpectations.io/en/latest/how_to_guides/migrating_versions.html\".format(\n data[\"config_version\"], MINIMUM_SUPPORTED_CONFIG_VERSION\n ),\n )\n elif data[\"config_version\"] > CURRENT_CONFIG_VERSION:\n raise ge_exceptions.InvalidDataContextConfigError(\n \"You appear to have an invalid config version ({}).\\n The maximum valid version is {}.\".format(\n data[\"config_version\"], CURRENT_CONFIG_VERSION\n ),\n validation_error=ValidationError(\"config version too high\"),\n )\n\n\ndataContextConfigSchema = DataContextConfigSchema()\ndatasourceConfigSchema = DatasourceConfigSchema()\nanonymizedUsageStatisticsSchema = AnonymizedUsageStatisticsConfigSchema()\nnotebookConfigSchema = NotebookConfigSchema()\n",
"path": "great_expectations/data_context/types/base.py"
}
] | diff --git a/great_expectations/data_context/types/base.py b/great_expectations/data_context/types/base.py
index 2d1e379ad28e..585f55b7ec86 100644
--- a/great_expectations/data_context/types/base.py
+++ b/great_expectations/data_context/types/base.py
@@ -227,6 +227,7 @@ class Meta:
keys=fields.Str(), values=fields.Dict(), allow_none=True
)
credentials = fields.Raw(allow_none=True)
+ spark_context = fields.Raw(allow_none=True)
@validates_schema
def validate_schema(self, data, **kwargs):
|
cython__cython-3429 | cdiv and cmod incorrect?
I was looking at the definitions of `cdiv` and `cmod` in Shadow.py. These seem to give incorrect results for some operands. For example, in C99:
```
4 / -4 == -1 4 % -4 == 0
4 / -2 == -2 4 % -2 == 0
4 / -1 == -4 4 % -1 == 0
```
But these functions would return:
```
cdiv(4, -4) == 0 cmod(4, -4) == 4
cdiv(4, -2) == -1 cmod(4, -2) == 2
cdiv(4, -1) == -3 cmod(4, -1) == 1
```
That's based on just running those definitions in the Python interpreter. Perhaps Cython handles them specially, and doesn't in fact give the incorrect results above. Still, why not include correct Python code? The following would work:
```
def cdiv(a, b):
if a < 0:
a = -a
b = -b
if b < 0:
return (a + b + 1) // b
return a // b
def cmod(a, b):
r = a % b
if (a*b) < 0 and r: r -= b
return r
```
Sorry if this is just me misunderstanding some magic behind the scenes stuff.
| [
{
"content": "# cython.* namespace for pure mode.\nfrom __future__ import absolute_import\n\n__version__ = \"3.0a0\"\n\ntry:\n from __builtin__ import basestring\nexcept ImportError:\n basestring = str\n\n\n# BEGIN shameless copy from Cython/minivect/minitypes.py\n\nclass _ArrayType(object):\n\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting\n\n def __repr__(self):\n axes = [\":\"] * self.ndim\n if self.is_c_contig:\n axes[-1] = \"::1\"\n elif self.is_f_contig:\n axes[0] = \"::1\"\n\n return \"%s[%s]\" % (self.dtype, \", \".join(axes))\n\n\ndef index_type(base_type, item):\n \"\"\"\n Support array type creation by slicing, e.g. double[:, :] specifies\n a 2D strided array of doubles. The syntax is the same as for\n Cython memoryviews.\n \"\"\"\n class InvalidTypeSpecification(Exception):\n pass\n\n def verify_slice(s):\n if s.start or s.stop or s.step not in (None, 1):\n raise InvalidTypeSpecification(\n \"Only a step of 1 may be provided to indicate C or \"\n \"Fortran contiguity\")\n\n if isinstance(item, tuple):\n step_idx = None\n for idx, s in enumerate(item):\n verify_slice(s)\n if s.step and (step_idx or idx not in (0, len(item) - 1)):\n raise InvalidTypeSpecification(\n \"Step may only be provided once, and only in the \"\n \"first or last dimension.\")\n\n if s.step == 1:\n step_idx = idx\n\n return _ArrayType(base_type, len(item),\n is_c_contig=step_idx == len(item) - 1,\n is_f_contig=step_idx == 0)\n elif isinstance(item, slice):\n verify_slice(item)\n return _ArrayType(base_type, 1, is_c_contig=bool(item.step))\n else:\n # int[8] etc.\n assert int(item) == item # array size must be a plain integer\n array(base_type, item)\n\n# END shameless copy\n\n\ncompiled = False\n\n_Unspecified = object()\n\n# Function decorators\n\ndef _empty_decorator(x):\n return x\n\ndef locals(**arg_types):\n return _empty_decorator\n\ndef test_assert_path_exists(*paths):\n return _empty_decorator\n\ndef test_fail_if_path_exists(*paths):\n return _empty_decorator\n\nclass _EmptyDecoratorAndManager(object):\n def __call__(self, x):\n return x\n def __enter__(self):\n pass\n def __exit__(self, exc_type, exc_value, traceback):\n pass\n\nclass _Optimization(object):\n pass\n\ncclass = ccall = cfunc = _EmptyDecoratorAndManager()\n\nreturns = wraparound = boundscheck = initializedcheck = nonecheck = \\\n embedsignature = cdivision = cdivision_warnings = \\\n always_allows_keywords = profile = linetrace = infer_types = \\\n unraisable_tracebacks = freelist = \\\n lambda _: _EmptyDecoratorAndManager()\n\nexceptval = lambda _=None, check=True: _EmptyDecoratorAndManager()\n\noverflowcheck = lambda _: _EmptyDecoratorAndManager()\noptimization = _Optimization()\n\noverflowcheck.fold = optimization.use_switch = \\\n optimization.unpack_method_calls = lambda arg: _EmptyDecoratorAndManager()\n\nfinal = internal = type_version_tag = no_gc_clear = no_gc = _empty_decorator\n\nbinding = lambda _: _empty_decorator\n\n\n_cython_inline = None\ndef inline(f, *args, **kwds):\n if isinstance(f, basestring):\n global _cython_inline\n if _cython_inline is None:\n from Cython.Build.Inline import cython_inline as _cython_inline\n return _cython_inline(f, *args, **kwds)\n else:\n assert len(args) == len(kwds) == 0\n return f\n\n\ndef compile(f):\n from Cython.Build.Inline import RuntimeCompiledFunction\n return RuntimeCompiledFunction(f)\n\n\n# Special functions\n\ndef cdiv(a, b):\n q = a / b\n if q < 0:\n q += 1\n return q\n\ndef cmod(a, b):\n r = a % b\n if (a*b) < 0:\n r -= b\n return r\n\n\n# Emulated language constructs\n\ndef cast(type_, *args, **kwargs):\n kwargs.pop('typecheck', None)\n assert not kwargs\n if callable(type_):\n if not isinstance(type_, type) or not (args and isinstance(args[0], type_)):\n return type_(*args)\n return args[0]\n\ndef sizeof(arg):\n return 1\n\ndef typeof(arg):\n return arg.__class__.__name__\n # return type(arg)\n\ndef address(arg):\n return pointer(type(arg))([arg])\n\ndef declare(type=None, value=_Unspecified, **kwds):\n if type not in (None, object) and hasattr(type, '__call__'):\n if value is not _Unspecified:\n return type(value)\n else:\n return type()\n else:\n return value\n\nclass _nogil(object):\n \"\"\"Support for 'with nogil' statement and @nogil decorator.\n \"\"\"\n def __call__(self, x):\n if callable(x):\n # Used as function decorator => return the function unchanged.\n return x\n # Used as conditional context manager or to create an \"@nogil(True/False)\" decorator => keep going.\n return self\n\n def __enter__(self):\n pass\n def __exit__(self, exc_class, exc, tb):\n return exc_class is None\n\nnogil = _nogil()\ngil = _nogil()\ndel _nogil\n\n\n# Emulated types\n\nclass CythonMetaType(type):\n\n def __getitem__(type, ix):\n return array(type, ix)\n\nCythonTypeObject = CythonMetaType('CythonTypeObject', (object,), {})\n\nclass CythonType(CythonTypeObject):\n\n def _pointer(self, n=1):\n for i in range(n):\n self = pointer(self)\n return self\n\nclass PointerType(CythonType):\n\n def __init__(self, value=None):\n if isinstance(value, (ArrayType, PointerType)):\n self._items = [cast(self._basetype, a) for a in value._items]\n elif isinstance(value, list):\n self._items = [cast(self._basetype, a) for a in value]\n elif value is None or value == 0:\n self._items = []\n else:\n raise ValueError\n\n def __getitem__(self, ix):\n if ix < 0:\n raise IndexError(\"negative indexing not allowed in C\")\n return self._items[ix]\n\n def __setitem__(self, ix, value):\n if ix < 0:\n raise IndexError(\"negative indexing not allowed in C\")\n self._items[ix] = cast(self._basetype, value)\n\n def __eq__(self, value):\n if value is None and not self._items:\n return True\n elif type(self) != type(value):\n return False\n else:\n return not self._items and not value._items\n\n def __repr__(self):\n return \"%s *\" % (self._basetype,)\n\nclass ArrayType(PointerType):\n\n def __init__(self):\n self._items = [None] * self._n\n\n\nclass StructType(CythonType):\n\n def __init__(self, cast_from=_Unspecified, **data):\n if cast_from is not _Unspecified:\n # do cast\n if len(data) > 0:\n raise ValueError('Cannot accept keyword arguments when casting.')\n if type(cast_from) is not type(self):\n raise ValueError('Cannot cast from %s'%cast_from)\n for key, value in cast_from.__dict__.items():\n setattr(self, key, value)\n else:\n for key, value in data.items():\n setattr(self, key, value)\n\n def __setattr__(self, key, value):\n if key in self._members:\n self.__dict__[key] = cast(self._members[key], value)\n else:\n raise AttributeError(\"Struct has no member '%s'\" % key)\n\n\nclass UnionType(CythonType):\n\n def __init__(self, cast_from=_Unspecified, **data):\n if cast_from is not _Unspecified:\n # do type cast\n if len(data) > 0:\n raise ValueError('Cannot accept keyword arguments when casting.')\n if isinstance(cast_from, dict):\n datadict = cast_from\n elif type(cast_from) is type(self):\n datadict = cast_from.__dict__\n else:\n raise ValueError('Cannot cast from %s'%cast_from)\n else:\n datadict = data\n if len(datadict) > 1:\n raise AttributeError(\"Union can only store one field at a time.\")\n for key, value in datadict.items():\n setattr(self, key, value)\n\n def __setattr__(self, key, value):\n if key in '__dict__':\n CythonType.__setattr__(self, key, value)\n elif key in self._members:\n self.__dict__ = {key: cast(self._members[key], value)}\n else:\n raise AttributeError(\"Union has no member '%s'\" % key)\n\ndef pointer(basetype):\n class PointerInstance(PointerType):\n _basetype = basetype\n return PointerInstance\n\ndef array(basetype, n):\n class ArrayInstance(ArrayType):\n _basetype = basetype\n _n = n\n return ArrayInstance\n\ndef struct(**members):\n class StructInstance(StructType):\n _members = members\n for key in members:\n setattr(StructInstance, key, None)\n return StructInstance\n\ndef union(**members):\n class UnionInstance(UnionType):\n _members = members\n for key in members:\n setattr(UnionInstance, key, None)\n return UnionInstance\n\nclass typedef(CythonType):\n\n def __init__(self, type, name=None):\n self._basetype = type\n self.name = name\n\n def __call__(self, *arg):\n value = cast(self._basetype, *arg)\n return value\n\n def __repr__(self):\n return self.name or str(self._basetype)\n\n __getitem__ = index_type\n\nclass _FusedType(CythonType):\n pass\n\n\ndef fused_type(*args):\n if not args:\n raise TypeError(\"Expected at least one type as argument\")\n\n # Find the numeric type with biggest rank if all types are numeric\n rank = -1\n for type in args:\n if type not in (py_int, py_long, py_float, py_complex):\n break\n\n if type_ordering.index(type) > rank:\n result_type = type\n else:\n return result_type\n\n # Not a simple numeric type, return a fused type instance. The result\n # isn't really meant to be used, as we can't keep track of the context in\n # pure-mode. Casting won't do anything in this case.\n return _FusedType()\n\n\ndef _specialized_from_args(signatures, args, kwargs):\n \"Perhaps this should be implemented in a TreeFragment in Cython code\"\n raise Exception(\"yet to be implemented\")\n\n\npy_int = typedef(int, \"int\")\ntry:\n py_long = typedef(long, \"long\")\nexcept NameError: # Py3\n py_long = typedef(int, \"long\")\npy_float = typedef(float, \"float\")\npy_complex = typedef(complex, \"double complex\")\n\n\n# Predefined types\n\nint_types = ['char', 'short', 'Py_UNICODE', 'int', 'Py_UCS4', 'long', 'longlong', 'Py_ssize_t', 'size_t']\nfloat_types = ['longdouble', 'double', 'float']\ncomplex_types = ['longdoublecomplex', 'doublecomplex', 'floatcomplex', 'complex']\nother_types = ['bint', 'void', 'Py_tss_t']\n\nto_repr = {\n 'longlong': 'long long',\n 'longdouble': 'long double',\n 'longdoublecomplex': 'long double complex',\n 'doublecomplex': 'double complex',\n 'floatcomplex': 'float complex',\n}.get\n\ngs = globals()\n\n# note: cannot simply name the unicode type here as 2to3 gets in the way and replaces it by str\ntry:\n import __builtin__ as builtins\nexcept ImportError: # Py3\n import builtins\n\ngs['unicode'] = typedef(getattr(builtins, 'unicode', str), 'unicode')\ndel builtins\n\nfor name in int_types:\n reprname = to_repr(name, name)\n gs[name] = typedef(py_int, reprname)\n if name not in ('Py_UNICODE', 'Py_UCS4') and not name.endswith('size_t'):\n gs['u'+name] = typedef(py_int, \"unsigned \" + reprname)\n gs['s'+name] = typedef(py_int, \"signed \" + reprname)\n\nfor name in float_types:\n gs[name] = typedef(py_float, to_repr(name, name))\n\nfor name in complex_types:\n gs[name] = typedef(py_complex, to_repr(name, name))\n\nbint = typedef(bool, \"bint\")\nvoid = typedef(None, \"void\")\nPy_tss_t = typedef(None, \"Py_tss_t\")\n\nfor t in int_types + float_types + complex_types + other_types:\n for i in range(1, 4):\n gs[\"%s_%s\" % ('p'*i, t)] = gs[t]._pointer(i)\n\nNULL = gs['p_void'](0)\n\n# looks like 'gs' has some users out there by now...\n#del gs\n\nintegral = floating = numeric = _FusedType()\n\ntype_ordering = [py_int, py_long, py_float, py_complex]\n\nclass CythonDotParallel(object):\n \"\"\"\n The cython.parallel module.\n \"\"\"\n\n __all__ = ['parallel', 'prange', 'threadid']\n\n def parallel(self, num_threads=None):\n return nogil\n\n def prange(self, start=0, stop=None, step=1, nogil=False, schedule=None, chunksize=None, num_threads=None):\n if stop is None:\n stop = start\n start = 0\n return range(start, stop, step)\n\n def threadid(self):\n return 0\n\n # def threadsavailable(self):\n # return 1\n\nimport sys\nsys.modules['cython.parallel'] = CythonDotParallel()\ndel sys\n",
"path": "Cython/Shadow.py"
}
] | [
{
"content": "# cython.* namespace for pure mode.\nfrom __future__ import absolute_import\n\n__version__ = \"3.0a0\"\n\ntry:\n from __builtin__ import basestring\nexcept ImportError:\n basestring = str\n\n\n# BEGIN shameless copy from Cython/minivect/minitypes.py\n\nclass _ArrayType(object):\n\n is_array = True\n subtypes = ['dtype']\n\n def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,\n inner_contig=False, broadcasting=None):\n self.dtype = dtype\n self.ndim = ndim\n self.is_c_contig = is_c_contig\n self.is_f_contig = is_f_contig\n self.inner_contig = inner_contig or is_c_contig or is_f_contig\n self.broadcasting = broadcasting\n\n def __repr__(self):\n axes = [\":\"] * self.ndim\n if self.is_c_contig:\n axes[-1] = \"::1\"\n elif self.is_f_contig:\n axes[0] = \"::1\"\n\n return \"%s[%s]\" % (self.dtype, \", \".join(axes))\n\n\ndef index_type(base_type, item):\n \"\"\"\n Support array type creation by slicing, e.g. double[:, :] specifies\n a 2D strided array of doubles. The syntax is the same as for\n Cython memoryviews.\n \"\"\"\n class InvalidTypeSpecification(Exception):\n pass\n\n def verify_slice(s):\n if s.start or s.stop or s.step not in (None, 1):\n raise InvalidTypeSpecification(\n \"Only a step of 1 may be provided to indicate C or \"\n \"Fortran contiguity\")\n\n if isinstance(item, tuple):\n step_idx = None\n for idx, s in enumerate(item):\n verify_slice(s)\n if s.step and (step_idx or idx not in (0, len(item) - 1)):\n raise InvalidTypeSpecification(\n \"Step may only be provided once, and only in the \"\n \"first or last dimension.\")\n\n if s.step == 1:\n step_idx = idx\n\n return _ArrayType(base_type, len(item),\n is_c_contig=step_idx == len(item) - 1,\n is_f_contig=step_idx == 0)\n elif isinstance(item, slice):\n verify_slice(item)\n return _ArrayType(base_type, 1, is_c_contig=bool(item.step))\n else:\n # int[8] etc.\n assert int(item) == item # array size must be a plain integer\n array(base_type, item)\n\n# END shameless copy\n\n\ncompiled = False\n\n_Unspecified = object()\n\n# Function decorators\n\ndef _empty_decorator(x):\n return x\n\ndef locals(**arg_types):\n return _empty_decorator\n\ndef test_assert_path_exists(*paths):\n return _empty_decorator\n\ndef test_fail_if_path_exists(*paths):\n return _empty_decorator\n\nclass _EmptyDecoratorAndManager(object):\n def __call__(self, x):\n return x\n def __enter__(self):\n pass\n def __exit__(self, exc_type, exc_value, traceback):\n pass\n\nclass _Optimization(object):\n pass\n\ncclass = ccall = cfunc = _EmptyDecoratorAndManager()\n\nreturns = wraparound = boundscheck = initializedcheck = nonecheck = \\\n embedsignature = cdivision = cdivision_warnings = \\\n always_allows_keywords = profile = linetrace = infer_types = \\\n unraisable_tracebacks = freelist = \\\n lambda _: _EmptyDecoratorAndManager()\n\nexceptval = lambda _=None, check=True: _EmptyDecoratorAndManager()\n\noverflowcheck = lambda _: _EmptyDecoratorAndManager()\noptimization = _Optimization()\n\noverflowcheck.fold = optimization.use_switch = \\\n optimization.unpack_method_calls = lambda arg: _EmptyDecoratorAndManager()\n\nfinal = internal = type_version_tag = no_gc_clear = no_gc = _empty_decorator\n\nbinding = lambda _: _empty_decorator\n\n\n_cython_inline = None\ndef inline(f, *args, **kwds):\n if isinstance(f, basestring):\n global _cython_inline\n if _cython_inline is None:\n from Cython.Build.Inline import cython_inline as _cython_inline\n return _cython_inline(f, *args, **kwds)\n else:\n assert len(args) == len(kwds) == 0\n return f\n\n\ndef compile(f):\n from Cython.Build.Inline import RuntimeCompiledFunction\n return RuntimeCompiledFunction(f)\n\n\n# Special functions\n\ndef cdiv(a, b):\n if a < 0:\n a = -a\n b = -b\n if b < 0:\n return (a + b + 1) // b\n return a // b\n\ndef cmod(a, b):\n r = a % b\n if (a * b) < 0 and r:\n r -= b\n return r\n\n\n# Emulated language constructs\n\ndef cast(type_, *args, **kwargs):\n kwargs.pop('typecheck', None)\n assert not kwargs\n if callable(type_):\n if not isinstance(type_, type) or not (args and isinstance(args[0], type_)):\n return type_(*args)\n return args[0]\n\ndef sizeof(arg):\n return 1\n\ndef typeof(arg):\n return arg.__class__.__name__\n # return type(arg)\n\ndef address(arg):\n return pointer(type(arg))([arg])\n\ndef declare(type=None, value=_Unspecified, **kwds):\n if type not in (None, object) and hasattr(type, '__call__'):\n if value is not _Unspecified:\n return type(value)\n else:\n return type()\n else:\n return value\n\nclass _nogil(object):\n \"\"\"Support for 'with nogil' statement and @nogil decorator.\n \"\"\"\n def __call__(self, x):\n if callable(x):\n # Used as function decorator => return the function unchanged.\n return x\n # Used as conditional context manager or to create an \"@nogil(True/False)\" decorator => keep going.\n return self\n\n def __enter__(self):\n pass\n def __exit__(self, exc_class, exc, tb):\n return exc_class is None\n\nnogil = _nogil()\ngil = _nogil()\ndel _nogil\n\n\n# Emulated types\n\nclass CythonMetaType(type):\n\n def __getitem__(type, ix):\n return array(type, ix)\n\nCythonTypeObject = CythonMetaType('CythonTypeObject', (object,), {})\n\nclass CythonType(CythonTypeObject):\n\n def _pointer(self, n=1):\n for i in range(n):\n self = pointer(self)\n return self\n\nclass PointerType(CythonType):\n\n def __init__(self, value=None):\n if isinstance(value, (ArrayType, PointerType)):\n self._items = [cast(self._basetype, a) for a in value._items]\n elif isinstance(value, list):\n self._items = [cast(self._basetype, a) for a in value]\n elif value is None or value == 0:\n self._items = []\n else:\n raise ValueError\n\n def __getitem__(self, ix):\n if ix < 0:\n raise IndexError(\"negative indexing not allowed in C\")\n return self._items[ix]\n\n def __setitem__(self, ix, value):\n if ix < 0:\n raise IndexError(\"negative indexing not allowed in C\")\n self._items[ix] = cast(self._basetype, value)\n\n def __eq__(self, value):\n if value is None and not self._items:\n return True\n elif type(self) != type(value):\n return False\n else:\n return not self._items and not value._items\n\n def __repr__(self):\n return \"%s *\" % (self._basetype,)\n\nclass ArrayType(PointerType):\n\n def __init__(self):\n self._items = [None] * self._n\n\n\nclass StructType(CythonType):\n\n def __init__(self, cast_from=_Unspecified, **data):\n if cast_from is not _Unspecified:\n # do cast\n if len(data) > 0:\n raise ValueError('Cannot accept keyword arguments when casting.')\n if type(cast_from) is not type(self):\n raise ValueError('Cannot cast from %s'%cast_from)\n for key, value in cast_from.__dict__.items():\n setattr(self, key, value)\n else:\n for key, value in data.items():\n setattr(self, key, value)\n\n def __setattr__(self, key, value):\n if key in self._members:\n self.__dict__[key] = cast(self._members[key], value)\n else:\n raise AttributeError(\"Struct has no member '%s'\" % key)\n\n\nclass UnionType(CythonType):\n\n def __init__(self, cast_from=_Unspecified, **data):\n if cast_from is not _Unspecified:\n # do type cast\n if len(data) > 0:\n raise ValueError('Cannot accept keyword arguments when casting.')\n if isinstance(cast_from, dict):\n datadict = cast_from\n elif type(cast_from) is type(self):\n datadict = cast_from.__dict__\n else:\n raise ValueError('Cannot cast from %s'%cast_from)\n else:\n datadict = data\n if len(datadict) > 1:\n raise AttributeError(\"Union can only store one field at a time.\")\n for key, value in datadict.items():\n setattr(self, key, value)\n\n def __setattr__(self, key, value):\n if key in '__dict__':\n CythonType.__setattr__(self, key, value)\n elif key in self._members:\n self.__dict__ = {key: cast(self._members[key], value)}\n else:\n raise AttributeError(\"Union has no member '%s'\" % key)\n\ndef pointer(basetype):\n class PointerInstance(PointerType):\n _basetype = basetype\n return PointerInstance\n\ndef array(basetype, n):\n class ArrayInstance(ArrayType):\n _basetype = basetype\n _n = n\n return ArrayInstance\n\ndef struct(**members):\n class StructInstance(StructType):\n _members = members\n for key in members:\n setattr(StructInstance, key, None)\n return StructInstance\n\ndef union(**members):\n class UnionInstance(UnionType):\n _members = members\n for key in members:\n setattr(UnionInstance, key, None)\n return UnionInstance\n\nclass typedef(CythonType):\n\n def __init__(self, type, name=None):\n self._basetype = type\n self.name = name\n\n def __call__(self, *arg):\n value = cast(self._basetype, *arg)\n return value\n\n def __repr__(self):\n return self.name or str(self._basetype)\n\n __getitem__ = index_type\n\nclass _FusedType(CythonType):\n pass\n\n\ndef fused_type(*args):\n if not args:\n raise TypeError(\"Expected at least one type as argument\")\n\n # Find the numeric type with biggest rank if all types are numeric\n rank = -1\n for type in args:\n if type not in (py_int, py_long, py_float, py_complex):\n break\n\n if type_ordering.index(type) > rank:\n result_type = type\n else:\n return result_type\n\n # Not a simple numeric type, return a fused type instance. The result\n # isn't really meant to be used, as we can't keep track of the context in\n # pure-mode. Casting won't do anything in this case.\n return _FusedType()\n\n\ndef _specialized_from_args(signatures, args, kwargs):\n \"Perhaps this should be implemented in a TreeFragment in Cython code\"\n raise Exception(\"yet to be implemented\")\n\n\npy_int = typedef(int, \"int\")\ntry:\n py_long = typedef(long, \"long\")\nexcept NameError: # Py3\n py_long = typedef(int, \"long\")\npy_float = typedef(float, \"float\")\npy_complex = typedef(complex, \"double complex\")\n\n\n# Predefined types\n\nint_types = ['char', 'short', 'Py_UNICODE', 'int', 'Py_UCS4', 'long', 'longlong', 'Py_ssize_t', 'size_t']\nfloat_types = ['longdouble', 'double', 'float']\ncomplex_types = ['longdoublecomplex', 'doublecomplex', 'floatcomplex', 'complex']\nother_types = ['bint', 'void', 'Py_tss_t']\n\nto_repr = {\n 'longlong': 'long long',\n 'longdouble': 'long double',\n 'longdoublecomplex': 'long double complex',\n 'doublecomplex': 'double complex',\n 'floatcomplex': 'float complex',\n}.get\n\ngs = globals()\n\n# note: cannot simply name the unicode type here as 2to3 gets in the way and replaces it by str\ntry:\n import __builtin__ as builtins\nexcept ImportError: # Py3\n import builtins\n\ngs['unicode'] = typedef(getattr(builtins, 'unicode', str), 'unicode')\ndel builtins\n\nfor name in int_types:\n reprname = to_repr(name, name)\n gs[name] = typedef(py_int, reprname)\n if name not in ('Py_UNICODE', 'Py_UCS4') and not name.endswith('size_t'):\n gs['u'+name] = typedef(py_int, \"unsigned \" + reprname)\n gs['s'+name] = typedef(py_int, \"signed \" + reprname)\n\nfor name in float_types:\n gs[name] = typedef(py_float, to_repr(name, name))\n\nfor name in complex_types:\n gs[name] = typedef(py_complex, to_repr(name, name))\n\nbint = typedef(bool, \"bint\")\nvoid = typedef(None, \"void\")\nPy_tss_t = typedef(None, \"Py_tss_t\")\n\nfor t in int_types + float_types + complex_types + other_types:\n for i in range(1, 4):\n gs[\"%s_%s\" % ('p'*i, t)] = gs[t]._pointer(i)\n\nNULL = gs['p_void'](0)\n\n# looks like 'gs' has some users out there by now...\n#del gs\n\nintegral = floating = numeric = _FusedType()\n\ntype_ordering = [py_int, py_long, py_float, py_complex]\n\nclass CythonDotParallel(object):\n \"\"\"\n The cython.parallel module.\n \"\"\"\n\n __all__ = ['parallel', 'prange', 'threadid']\n\n def parallel(self, num_threads=None):\n return nogil\n\n def prange(self, start=0, stop=None, step=1, nogil=False, schedule=None, chunksize=None, num_threads=None):\n if stop is None:\n stop = start\n start = 0\n return range(start, stop, step)\n\n def threadid(self):\n return 0\n\n # def threadsavailable(self):\n # return 1\n\nimport sys\nsys.modules['cython.parallel'] = CythonDotParallel()\ndel sys\n",
"path": "Cython/Shadow.py"
}
] | diff --git a/Cython/Shadow.py b/Cython/Shadow.py
index b4c20c5b3dc..a0e014a1692 100644
--- a/Cython/Shadow.py
+++ b/Cython/Shadow.py
@@ -146,14 +146,16 @@ def compile(f):
# Special functions
def cdiv(a, b):
- q = a / b
- if q < 0:
- q += 1
- return q
+ if a < 0:
+ a = -a
+ b = -b
+ if b < 0:
+ return (a + b + 1) // b
+ return a // b
def cmod(a, b):
r = a % b
- if (a*b) < 0:
+ if (a * b) < 0 and r:
r -= b
return r
diff --git a/tests/run/cdivision_CEP_516.pyx b/tests/run/cdivision_CEP_516.pyx
index c8b24a0e1bc..fbd2def3abc 100644
--- a/tests/run/cdivision_CEP_516.pyx
+++ b/tests/run/cdivision_CEP_516.pyx
@@ -27,6 +27,9 @@ True
>>> [test_cdiv_cmod(a, b) for a, b in v]
[(1, 7), (-1, -7), (1, -7), (-1, 7)]
+>>> [test_cdiv_cmod(a, b) for a, b in [(4, -4), (4, -2), (4, -1)]]
+[(-1, 0), (-2, 0), (-4, 0)]
+
>>> all([mod_int_py(a,b) == a % b for a in range(-10, 10) for b in range(-10, 10) if b != 0])
True
>>> all([div_int_py(a,b) == a // b for a in range(-10, 10) for b in range(-10, 10) if b != 0])
|
ansible-collections__community.general-4809 | redhat_subscription module broken with RHEL 9
### Summary
When I try to ensure that a system does not have a subscription active, I get a failed task for a RHEL 9 system.
Example:
```yaml
- name: Ensure system subscription is absent
redhat_subscription:
state: absent
activationkey: "{{ sat_activationkey }}"
org_id: "{{ sat_organization }}"
```
fails for a RHEL9 host.
### Issue Type
Bug Report
### Component Name
redhat_subscription
### Ansible Version
```console (paste below)
$ ansible --version
```
### Community.general Version
```console (paste below)
$ ansible-galaxy collection list community.general
Collection Version
----------------- -------
community.general 5.0.0
```
### Configuration
```console (paste below)
$ ansible-config dump --only-changed
```
### OS / Environment
RHEL 9
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- name: Ensure system subscription is absent
redhat_subscription:
state: absent
activationkey: "{{ sat_activationkey }}"
org_id: "{{ sat_organization }}"
```
for a RHEL9 host, the task fails:
```
fatal: [servera]: FAILED! => {"changed": false, "cmd": "/sbin/subscription-manager unsubscribe --all", "msg": "", "rc": 1, "stderr": "", "stderr_lines": [], "stdout": "Usage: subscription-manager MODULE-NAME [MODULE-OPTIONS] [--help]\n\r\nPrimary Modules: \n\n attach Attach a specified subscription to the registered system, when system does not use Simple Content Access mode\n list List subscription and product information for this system\n refresh Pull the latest subscription data from the server\n register Register this system to the Customer Portal or another subscription management service\n release Configure which operating system release to use\n remove Remove all or specific subscriptions from this system\n status Show status information for this system's subscriptions and products\n unregister Unregister this system from the Customer Portal or another subscription management service\n\nOther Modules: \n\n addons Deprecated, see 'syspurpose'\n auto-attach Set if subscriptions are attached on a schedule (default of daily)\n clean Remove all local system and subscription data without affecting the server\n config List, set, or remove the configuration parameters in use by this system\n environments Display the environments available for a user\n facts View or update the detected system information\n identity Display the identity certificate for this system or request a new one\n import Import certificates which were provided outside of the tool\n orgs Display the organizations against which a user can register a system\n plugins View and configure with 'subscription-manager plugins'\n redeem Attempt to redeem a subscription for a preconfigured system\n repo-override Manage custom content repository settings\n repos List the repositories which this system is entitled to use\n role Deprecated, see 'syspurpose'\n service-level Deprecated, see 'syspurpose'\n syspurpose Convenient module for managing all system purpose settings\n usage Deprecated, see 'syspurpose'\n version Print version information\n\n", "stdout_lines": ["Usage: subscription-manager MODULE-NAME [MODULE-OPTIONS] [--help]", "", "Primary Modules: ", "", " attach Attach a specified subscription to the registered system, when system does not use Simple Content Access mode", " list List subscription and product information for this system", " refresh Pull the latest subscription data from the server", " register Register this system to the Customer Portal or another subscription management service", " release Configure which operating system release to use", " remove Remove all or specific subscriptions from this system", " status Show status information for this system's subscriptions and products", " unregister Unregister this system from the Customer Portal or another subscription management service", "", "Other Modules: ", "", " addons Deprecated, see 'syspurpose'", " auto-attach Set if subscriptions are attached on a schedule (default of daily)", " clean Remove all local system and subscription data without affecting the server", " config List, set, or remove the configuration parameters in use by this system", " environments Display the environments available for a user", " facts View or update the detected system information", " identity Display the identity certificate for this system or request a new one", " import Import certificates which were provided outside of the tool", " orgs Display the organizations against which a user can register a system", " plugins View and configure with 'subscription-manager plugins'", " redeem Attempt to redeem a subscription for a preconfigured system", " repo-override Manage custom content repository settings", " repos List the repositories which this system is entitled to use", " role Deprecated, see 'syspurpose'", " service-level Deprecated, see 'syspurpose'", " syspurpose Convenient module for managing all system purpose settings", " usage Deprecated, see 'syspurpose'", " version Print version information", ""]}
```
```yaml (paste below)
```
### Expected Results
I expected this to unsubscribe the host.
### Actual Results
```console (paste below)
"stdout_lines": [
"Usage: subscription-manager MODULE-NAME [MODULE-OPTIONS] [--help]",
"",
"Primary Modules: ",
"",
" attach Attach a specified subscription to the registered system, when system does not use Simple Content Access mode",
" list List subscription and product information for this system",
" refresh Pull the latest subscription data from the server",
" register Register this system to the Customer Portal or another subscription management service",
" release Configure which operating system release to use",
" remove Remove all or specific subscriptions from this system",
" status Show status information for this system's subscriptions and products",
" unregister Unregister this system from the Customer Portal or another subscription management service",
"",
"Other Modules: ",
"",
" addons Deprecated, see 'syspurpose'",
" auto-attach Set if subscriptions are attached on a schedule (default of daily)",
" clean Remove all local system and subscription data without affecting the server",
" config List, set, or remove the configuration parameters in use by this system",
" environments Display the environments available for a user",
" facts View or update the detected system information",
" identity Display the identity certificate for this system or request a new one",
" import Import certificates which were provided outside of the tool",
" orgs Display the organizations against which a user can register a system",
" plugins View and configure with 'subscription-manager plugins'",
" redeem Attempt to redeem a subscription for a preconfigured system",
" repo-override Manage custom content repository settings",
" repos List the repositories which this system is entitled to use",
" role Deprecated, see 'syspurpose'",
" service-level Deprecated, see 'syspurpose'",
" syspurpose Convenient module for managing all system purpose settings",
" usage Deprecated, see 'syspurpose'",
" version Print version information",
""
]
}
```
This appears to happen because in RHEL 9, `subscription-manager unsubscribe` is not a valid command sequence. There is not `unsubscribe` sub-command (but there is, in RHEL 8).
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
| [
{
"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# James Laska ([email protected])\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: redhat_subscription\nshort_description: Manage registration and subscriptions to RHSM using the C(subscription-manager) command\ndescription:\n - Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command\nauthor: \"Barnaby Court (@barnabycourt)\"\nnotes:\n - In order to register a system, subscription-manager requires either a username and password, or an activationkey and an Organization ID.\n - Since 2.5 values for I(server_hostname), I(server_insecure), I(rhsm_baseurl),\n I(server_proxy_hostname), I(server_proxy_port), I(server_proxy_user) and\n I(server_proxy_password) are no longer taken from the C(/etc/rhsm/rhsm.conf)\n config file and default to None.\nrequirements:\n - subscription-manager\noptions:\n state:\n description:\n - whether to register and subscribe (C(present)), or unregister (C(absent)) a system\n choices: [ \"present\", \"absent\" ]\n default: \"present\"\n type: str\n username:\n description:\n - access.redhat.com or Sat6 username\n type: str\n password:\n description:\n - access.redhat.com or Sat6 password\n type: str\n server_hostname:\n description:\n - Specify an alternative Red Hat Subscription Management or Sat6 server\n type: str\n server_insecure:\n description:\n - Enable or disable https server certificate verification when connecting to C(server_hostname)\n type: str\n server_prefix:\n description:\n - Specify the prefix when registering to the Red Hat Subscription Management or Sat6 server.\n type: str\n version_added: 3.3.0\n server_port:\n description:\n - Specify the port when registering to the Red Hat Subscription Management or Sat6 server.\n type: str\n version_added: 3.3.0\n rhsm_baseurl:\n description:\n - Specify CDN baseurl\n type: str\n rhsm_repo_ca_cert:\n description:\n - Specify an alternative location for a CA certificate for CDN\n type: str\n server_proxy_hostname:\n description:\n - Specify an HTTP proxy hostname.\n type: str\n server_proxy_port:\n description:\n - Specify an HTTP proxy port.\n type: str\n server_proxy_user:\n description:\n - Specify a user for HTTP proxy with basic authentication\n type: str\n server_proxy_password:\n description:\n - Specify a password for HTTP proxy with basic authentication\n type: str\n auto_attach:\n description:\n - Upon successful registration, auto-consume available subscriptions\n - Added in favor of deprecated autosubscribe in 2.5.\n type: bool\n aliases: [autosubscribe]\n activationkey:\n description:\n - supply an activation key for use with registration\n type: str\n org_id:\n description:\n - Organization ID to use in conjunction with activationkey\n type: str\n environment:\n description:\n - Register with a specific environment in the destination org. Used with Red Hat Satellite 6.x or Katello\n type: str\n pool:\n description:\n - |\n Specify a subscription pool name to consume. Regular expressions accepted. Use I(pool_ids) instead if\n possible, as it is much faster. Mutually exclusive with I(pool_ids).\n default: '^$'\n type: str\n pool_ids:\n description:\n - |\n Specify subscription pool IDs to consume. Prefer over I(pool) when possible as it is much faster.\n A pool ID may be specified as a C(string) - just the pool ID (ex. C(0123456789abcdef0123456789abcdef)),\n or as a C(dict) with the pool ID as the key, and a quantity as the value (ex.\n C(0123456789abcdef0123456789abcdef: 2). If the quantity is provided, it is used to consume multiple\n entitlements from a pool (the pool must support this). Mutually exclusive with I(pool).\n default: []\n type: list\n elements: raw\n consumer_type:\n description:\n - The type of unit to register, defaults to system\n type: str\n consumer_name:\n description:\n - Name of the system to register, defaults to the hostname\n type: str\n consumer_id:\n description:\n - |\n References an existing consumer ID to resume using a previous registration\n for this system. If the system's identity certificate is lost or corrupted,\n this option allows it to resume using its previous identity and subscriptions.\n The default is to not specify a consumer ID so a new ID is created.\n type: str\n force_register:\n description:\n - Register the system even if it is already registered\n type: bool\n default: no\n release:\n description:\n - Set a release version\n type: str\n syspurpose:\n description:\n - Set syspurpose attributes in file C(/etc/rhsm/syspurpose/syspurpose.json)\n and synchronize these attributes with RHSM server. Syspurpose attributes help attach\n the most appropriate subscriptions to the system automatically. When C(syspurpose.json) file\n already contains some attributes, then new attributes overwrite existing attributes.\n When some attribute is not listed in the new list of attributes, the existing\n attribute will be removed from C(syspurpose.json) file. Unknown attributes are ignored.\n type: dict\n default: {}\n suboptions:\n usage:\n description: Syspurpose attribute usage\n type: str\n role:\n description: Syspurpose attribute role\n type: str\n service_level_agreement:\n description: Syspurpose attribute service_level_agreement\n type: str\n addons:\n description: Syspurpose attribute addons\n type: list\n elements: str\n sync:\n description:\n - When this option is true, then syspurpose attributes are synchronized with\n RHSM server immediately. When this option is false, then syspurpose attributes\n will be synchronized with RHSM server by rhsmcertd daemon.\n type: bool\n default: no\n'''\n\nEXAMPLES = '''\n- name: Register as user (joe_user) with password (somepass) and auto-subscribe to available content.\n community.general.redhat_subscription:\n state: present\n username: joe_user\n password: somepass\n auto_attach: true\n\n- name: Same as above but subscribe to a specific pool by ID.\n community.general.redhat_subscription:\n state: present\n username: joe_user\n password: somepass\n pool_ids: 0123456789abcdef0123456789abcdef\n\n- name: Register and subscribe to multiple pools.\n community.general.redhat_subscription:\n state: present\n username: joe_user\n password: somepass\n pool_ids:\n - 0123456789abcdef0123456789abcdef\n - 1123456789abcdef0123456789abcdef\n\n- name: Same as above but consume multiple entitlements.\n community.general.redhat_subscription:\n state: present\n username: joe_user\n password: somepass\n pool_ids:\n - 0123456789abcdef0123456789abcdef: 2\n - 1123456789abcdef0123456789abcdef: 4\n\n- name: Register and pull existing system data.\n community.general.redhat_subscription:\n state: present\n username: joe_user\n password: somepass\n consumer_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\n\n- name: Register with activationkey and consume subscriptions matching Red Hat Enterprise Server or Red Hat Virtualization\n community.general.redhat_subscription:\n state: present\n activationkey: 1-222333444\n org_id: 222333444\n pool: '^(Red Hat Enterprise Server|Red Hat Virtualization)$'\n\n- name: Update the consumed subscriptions from the previous example (remove Red Hat Virtualization subscription)\n community.general.redhat_subscription:\n state: present\n activationkey: 1-222333444\n org_id: 222333444\n pool: '^Red Hat Enterprise Server$'\n\n- name: Register as user credentials into given environment (against Red Hat Satellite 6.x), and auto-subscribe.\n community.general.redhat_subscription:\n state: present\n username: joe_user\n password: somepass\n environment: Library\n auto_attach: true\n\n- name: Register as user (joe_user) with password (somepass) and a specific release\n community.general.redhat_subscription:\n state: present\n username: joe_user\n password: somepass\n release: 7.4\n\n- name: Register as user (joe_user) with password (somepass), set syspurpose attributes and synchronize them with server\n community.general.redhat_subscription:\n state: present\n username: joe_user\n password: somepass\n auto_attach: true\n syspurpose:\n usage: \"Production\"\n role: \"Red Hat Enterprise Server\"\n service_level_agreement: \"Premium\"\n addons:\n - addon1\n - addon2\n sync: true\n'''\n\nRETURN = '''\nsubscribed_pool_ids:\n description: List of pool IDs to which system is now subscribed\n returned: success\n type: complex\n sample: {\n \"8a85f9815ab905d3015ab928c7005de4\": \"1\"\n }\n'''\n\nfrom os.path import isfile\nfrom os import unlink\nimport re\nimport shutil\nimport tempfile\nimport json\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.common.text.converters import to_native\nfrom ansible.module_utils.six.moves import configparser\n\n\nSUBMAN_CMD = None\n\n\nclass RegistrationBase(object):\n\n REDHAT_REPO = \"/etc/yum.repos.d/redhat.repo\"\n\n def __init__(self, module, username=None, password=None):\n self.module = module\n self.username = username\n self.password = password\n\n def configure(self):\n raise NotImplementedError(\"Must be implemented by a sub-class\")\n\n def enable(self):\n # Remove any existing redhat.repo\n if isfile(self.REDHAT_REPO):\n unlink(self.REDHAT_REPO)\n\n def register(self):\n raise NotImplementedError(\"Must be implemented by a sub-class\")\n\n def unregister(self):\n raise NotImplementedError(\"Must be implemented by a sub-class\")\n\n def unsubscribe(self):\n raise NotImplementedError(\"Must be implemented by a sub-class\")\n\n def update_plugin_conf(self, plugin, enabled=True):\n plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin\n\n if isfile(plugin_conf):\n tmpfd, tmpfile = tempfile.mkstemp()\n shutil.copy2(plugin_conf, tmpfile)\n cfg = configparser.ConfigParser()\n cfg.read([tmpfile])\n\n if enabled:\n cfg.set('main', 'enabled', '1')\n else:\n cfg.set('main', 'enabled', '0')\n\n fd = open(tmpfile, 'w+')\n cfg.write(fd)\n fd.close()\n self.module.atomic_move(tmpfile, plugin_conf)\n\n def subscribe(self, **kwargs):\n raise NotImplementedError(\"Must be implemented by a sub-class\")\n\n\nclass Rhsm(RegistrationBase):\n def __init__(self, module, username=None, password=None):\n RegistrationBase.__init__(self, module, username, password)\n self.module = module\n\n def enable(self):\n '''\n Enable the system to receive updates from subscription-manager.\n This involves updating affected yum plugins and removing any\n conflicting yum repositories.\n '''\n RegistrationBase.enable(self)\n self.update_plugin_conf('rhnplugin', False)\n self.update_plugin_conf('subscription-manager', True)\n\n def configure(self, **kwargs):\n '''\n Configure the system as directed for registration with RHSM\n Raises:\n * Exception - if error occurs while running command\n '''\n\n args = [SUBMAN_CMD, 'config']\n\n # Pass supplied **kwargs as parameters to subscription-manager. Ignore\n # non-configuration parameters and replace '_' with '.'. For example,\n # 'server_hostname' becomes '--server.hostname'.\n options = []\n for k, v in sorted(kwargs.items()):\n if re.search(r'^(server|rhsm)_', k) and v is not None:\n options.append('--%s=%s' % (k.replace('_', '.', 1), v))\n\n # When there is nothing to configure, then it is not necessary\n # to run config command, because it only returns current\n # content of current configuration file\n if len(options) == 0:\n return\n\n args.extend(options)\n\n self.module.run_command(args, check_rc=True)\n\n @property\n def is_registered(self):\n '''\n Determine whether the current system\n Returns:\n * Boolean - whether the current system is currently registered to\n RHSM.\n '''\n\n args = [SUBMAN_CMD, 'identity']\n rc, stdout, stderr = self.module.run_command(args, check_rc=False)\n if rc == 0:\n return True\n else:\n return False\n\n def register(self, username, password, auto_attach, activationkey, org_id,\n consumer_type, consumer_name, consumer_id, force_register, environment,\n rhsm_baseurl, server_insecure, server_hostname, server_proxy_hostname,\n server_proxy_port, server_proxy_user, server_proxy_password, release):\n '''\n Register the current system to the provided RHSM or Sat6 server\n Raises:\n * Exception - if error occurs while running command\n '''\n args = [SUBMAN_CMD, 'register']\n\n # Generate command arguments\n if force_register:\n args.extend(['--force'])\n\n if rhsm_baseurl:\n args.extend(['--baseurl', rhsm_baseurl])\n\n if server_insecure:\n args.extend(['--insecure'])\n\n if server_hostname:\n args.extend(['--serverurl', server_hostname])\n\n if org_id:\n args.extend(['--org', org_id])\n\n if server_proxy_hostname and server_proxy_port:\n args.extend(['--proxy', server_proxy_hostname + ':' + server_proxy_port])\n\n if server_proxy_user:\n args.extend(['--proxyuser', server_proxy_user])\n\n if server_proxy_password:\n args.extend(['--proxypassword', server_proxy_password])\n\n if activationkey:\n args.extend(['--activationkey', activationkey])\n else:\n if auto_attach:\n args.append('--auto-attach')\n if username:\n args.extend(['--username', username])\n if password:\n args.extend(['--password', password])\n if consumer_type:\n args.extend(['--type', consumer_type])\n if consumer_name:\n args.extend(['--name', consumer_name])\n if consumer_id:\n args.extend(['--consumerid', consumer_id])\n if environment:\n args.extend(['--environment', environment])\n\n if release:\n args.extend(['--release', release])\n\n rc, stderr, stdout = self.module.run_command(args, check_rc=True, expand_user_and_vars=False)\n\n def unsubscribe(self, serials=None):\n '''\n Unsubscribe a system from subscribed channels\n Args:\n serials(list or None): list of serials to unsubscribe. If\n serials is none or an empty list, then\n all subscribed channels will be removed.\n Raises:\n * Exception - if error occurs while running command\n '''\n items = []\n if serials is not None and serials:\n items = [\"--serial=%s\" % s for s in serials]\n if serials is None:\n items = [\"--all\"]\n\n if items:\n args = [SUBMAN_CMD, 'unsubscribe'] + items\n rc, stderr, stdout = self.module.run_command(args, check_rc=True)\n return serials\n\n def unregister(self):\n '''\n Unregister a currently registered system\n Raises:\n * Exception - if error occurs while running command\n '''\n args = [SUBMAN_CMD, 'unregister']\n rc, stderr, stdout = self.module.run_command(args, check_rc=True)\n self.update_plugin_conf('rhnplugin', False)\n self.update_plugin_conf('subscription-manager', False)\n\n def subscribe(self, regexp):\n '''\n Subscribe current system to available pools matching the specified\n regular expression. It matches regexp against available pool ids first.\n If any pool ids match, subscribe to those pools and return.\n\n If no pool ids match, then match regexp against available pool product\n names. Note this can still easily match many many pools. Then subscribe\n to those pools.\n\n Since a pool id is a more specific match, we only fallback to matching\n against names if we didn't match pool ids.\n\n Raises:\n * Exception - if error occurs while running command\n '''\n # See https://github.com/ansible/ansible/issues/19466\n\n # subscribe to pools whose pool id matches regexp (and only the pool id)\n subscribed_pool_ids = self.subscribe_pool(regexp)\n\n # If we found any matches, we are done\n # Don't attempt to match pools by product name\n if subscribed_pool_ids:\n return subscribed_pool_ids\n\n # We didn't match any pool ids.\n # Now try subscribing to pools based on product name match\n # Note: This can match lots of product names.\n subscribed_by_product_pool_ids = self.subscribe_product(regexp)\n if subscribed_by_product_pool_ids:\n return subscribed_by_product_pool_ids\n\n # no matches\n return []\n\n def subscribe_by_pool_ids(self, pool_ids):\n \"\"\"\n Try to subscribe to the list of pool IDs\n \"\"\"\n available_pools = RhsmPools(self.module)\n\n available_pool_ids = [p.get_pool_id() for p in available_pools]\n\n for pool_id, quantity in sorted(pool_ids.items()):\n if pool_id in available_pool_ids:\n args = [SUBMAN_CMD, 'attach', '--pool', pool_id]\n if quantity is not None:\n args.extend(['--quantity', to_native(quantity)])\n rc, stderr, stdout = self.module.run_command(args, check_rc=True)\n else:\n self.module.fail_json(msg='Pool ID: %s not in list of available pools' % pool_id)\n return pool_ids\n\n def subscribe_pool(self, regexp):\n '''\n Subscribe current system to available pools matching the specified\n regular expression\n Raises:\n * Exception - if error occurs while running command\n '''\n\n # Available pools ready for subscription\n available_pools = RhsmPools(self.module)\n\n subscribed_pool_ids = []\n for pool in available_pools.filter_pools(regexp):\n pool.subscribe()\n subscribed_pool_ids.append(pool.get_pool_id())\n return subscribed_pool_ids\n\n def subscribe_product(self, regexp):\n '''\n Subscribe current system to available pools matching the specified\n regular expression\n Raises:\n * Exception - if error occurs while running command\n '''\n\n # Available pools ready for subscription\n available_pools = RhsmPools(self.module)\n\n subscribed_pool_ids = []\n for pool in available_pools.filter_products(regexp):\n pool.subscribe()\n subscribed_pool_ids.append(pool.get_pool_id())\n return subscribed_pool_ids\n\n def update_subscriptions(self, regexp):\n changed = False\n consumed_pools = RhsmPools(self.module, consumed=True)\n pool_ids_to_keep = [p.get_pool_id() for p in consumed_pools.filter_pools(regexp)]\n pool_ids_to_keep.extend([p.get_pool_id() for p in consumed_pools.filter_products(regexp)])\n\n serials_to_remove = [p.Serial for p in consumed_pools if p.get_pool_id() not in pool_ids_to_keep]\n serials = self.unsubscribe(serials=serials_to_remove)\n\n subscribed_pool_ids = self.subscribe(regexp)\n\n if subscribed_pool_ids or serials:\n changed = True\n return {'changed': changed, 'subscribed_pool_ids': subscribed_pool_ids,\n 'unsubscribed_serials': serials}\n\n def update_subscriptions_by_pool_ids(self, pool_ids):\n changed = False\n consumed_pools = RhsmPools(self.module, consumed=True)\n\n existing_pools = {}\n for p in consumed_pools:\n existing_pools[p.get_pool_id()] = p.QuantityUsed\n\n serials_to_remove = [p.Serial for p in consumed_pools if pool_ids.get(p.get_pool_id(), 0) != p.QuantityUsed]\n serials = self.unsubscribe(serials=serials_to_remove)\n\n missing_pools = {}\n for pool_id, quantity in sorted(pool_ids.items()):\n if existing_pools.get(pool_id, 0) != quantity:\n missing_pools[pool_id] = quantity\n\n self.subscribe_by_pool_ids(missing_pools)\n\n if missing_pools or serials:\n changed = True\n return {'changed': changed, 'subscribed_pool_ids': list(missing_pools.keys()),\n 'unsubscribed_serials': serials}\n\n def sync_syspurpose(self):\n \"\"\"\n Try to synchronize syspurpose attributes with server\n \"\"\"\n args = [SUBMAN_CMD, 'status']\n rc, stdout, stderr = self.module.run_command(args, check_rc=False)\n\n\nclass RhsmPool(object):\n '''\n Convenience class for housing subscription information\n '''\n\n def __init__(self, module, **kwargs):\n self.module = module\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n def __str__(self):\n return str(self.__getattribute__('_name'))\n\n def get_pool_id(self):\n return getattr(self, 'PoolId', getattr(self, 'PoolID'))\n\n def subscribe(self):\n args = \"subscription-manager attach --pool %s\" % self.get_pool_id()\n rc, stdout, stderr = self.module.run_command(args, check_rc=True)\n if rc == 0:\n return True\n else:\n return False\n\n\nclass RhsmPools(object):\n \"\"\"\n This class is used for manipulating pools subscriptions with RHSM\n \"\"\"\n\n def __init__(self, module, consumed=False):\n self.module = module\n self.products = self._load_product_list(consumed)\n\n def __iter__(self):\n return self.products.__iter__()\n\n def _load_product_list(self, consumed=False):\n \"\"\"\n Loads list of all available or consumed pools for system in data structure\n\n Args:\n consumed(bool): if True list consumed pools, else list available pools (default False)\n \"\"\"\n args = \"subscription-manager list\"\n if consumed:\n args += \" --consumed\"\n else:\n args += \" --available\"\n lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')\n rc, stdout, stderr = self.module.run_command(args, check_rc=True, environ_update=lang_env)\n\n products = []\n for line in stdout.split('\\n'):\n # Remove leading+trailing whitespace\n line = line.strip()\n # An empty line implies the end of a output group\n if len(line) == 0:\n continue\n # If a colon ':' is found, parse\n elif ':' in line:\n (key, value) = line.split(':', 1)\n key = key.strip().replace(\" \", \"\") # To unify\n value = value.strip()\n if key in ['ProductName', 'SubscriptionName']:\n # Remember the name for later processing\n products.append(RhsmPool(self.module, _name=value, key=value))\n elif products:\n # Associate value with most recently recorded product\n products[-1].__setattr__(key, value)\n # FIXME - log some warning?\n # else:\n # warnings.warn(\"Unhandled subscription key/value: %s/%s\" % (key,value))\n return products\n\n def filter_pools(self, regexp='^$'):\n '''\n Return a list of RhsmPools whose pool id matches the provided regular expression\n '''\n r = re.compile(regexp)\n for product in self.products:\n if r.search(product.get_pool_id()):\n yield product\n\n def filter_products(self, regexp='^$'):\n '''\n Return a list of RhsmPools whose product name matches the provided regular expression\n '''\n r = re.compile(regexp)\n for product in self.products:\n if r.search(product._name):\n yield product\n\n\nclass SysPurpose(object):\n \"\"\"\n This class is used for reading and writing to syspurpose.json file\n \"\"\"\n\n SYSPURPOSE_FILE_PATH = \"/etc/rhsm/syspurpose/syspurpose.json\"\n\n ALLOWED_ATTRIBUTES = ['role', 'usage', 'service_level_agreement', 'addons']\n\n def __init__(self, path=None):\n \"\"\"\n Initialize class used for reading syspurpose json file\n \"\"\"\n self.path = path or self.SYSPURPOSE_FILE_PATH\n\n def update_syspurpose(self, new_syspurpose):\n \"\"\"\n Try to update current syspurpose with new attributes from new_syspurpose\n \"\"\"\n syspurpose = {}\n syspurpose_changed = False\n for key, value in new_syspurpose.items():\n if key in self.ALLOWED_ATTRIBUTES:\n if value is not None:\n syspurpose[key] = value\n elif key == 'sync':\n pass\n else:\n raise KeyError(\"Attribute: %s not in list of allowed attributes: %s\" %\n (key, self.ALLOWED_ATTRIBUTES))\n current_syspurpose = self._read_syspurpose()\n if current_syspurpose != syspurpose:\n syspurpose_changed = True\n # Update current syspurpose with new values\n current_syspurpose.update(syspurpose)\n # When some key is not listed in new syspurpose, then delete it from current syspurpose\n # and ignore custom attributes created by user (e.g. \"foo\": \"bar\")\n for key in list(current_syspurpose):\n if key in self.ALLOWED_ATTRIBUTES and key not in syspurpose:\n del current_syspurpose[key]\n self._write_syspurpose(current_syspurpose)\n return syspurpose_changed\n\n def _write_syspurpose(self, new_syspurpose):\n \"\"\"\n This function tries to update current new_syspurpose attributes to\n json file.\n \"\"\"\n with open(self.path, \"w\") as fp:\n fp.write(json.dumps(new_syspurpose, indent=2, ensure_ascii=False, sort_keys=True))\n\n def _read_syspurpose(self):\n \"\"\"\n Read current syspurpuse from json file.\n \"\"\"\n current_syspurpose = {}\n try:\n with open(self.path, \"r\") as fp:\n content = fp.read()\n except IOError:\n pass\n else:\n current_syspurpose = json.loads(content)\n return current_syspurpose\n\n\ndef main():\n\n # Load RHSM configuration from file\n rhsm = Rhsm(None)\n\n # Note: the default values for parameters are:\n # 'type': 'str', 'default': None, 'required': False\n # So there is no need to repeat these values for each parameter.\n module = AnsibleModule(\n argument_spec={\n 'state': {'default': 'present', 'choices': ['present', 'absent']},\n 'username': {},\n 'password': {'no_log': True},\n 'server_hostname': {},\n 'server_insecure': {},\n 'server_prefix': {},\n 'server_port': {},\n 'rhsm_baseurl': {},\n 'rhsm_repo_ca_cert': {},\n 'auto_attach': {'aliases': ['autosubscribe'], 'type': 'bool'},\n 'activationkey': {'no_log': True},\n 'org_id': {},\n 'environment': {},\n 'pool': {'default': '^$'},\n 'pool_ids': {'default': [], 'type': 'list', 'elements': 'raw'},\n 'consumer_type': {},\n 'consumer_name': {},\n 'consumer_id': {},\n 'force_register': {'default': False, 'type': 'bool'},\n 'server_proxy_hostname': {},\n 'server_proxy_port': {},\n 'server_proxy_user': {},\n 'server_proxy_password': {'no_log': True},\n 'release': {},\n 'syspurpose': {\n 'type': 'dict',\n 'options': {\n 'role': {},\n 'usage': {},\n 'service_level_agreement': {},\n 'addons': {'type': 'list', 'elements': 'str'},\n 'sync': {'type': 'bool', 'default': False}\n }\n }\n },\n required_together=[['username', 'password'],\n ['server_proxy_hostname', 'server_proxy_port'],\n ['server_proxy_user', 'server_proxy_password']],\n mutually_exclusive=[['activationkey', 'username'],\n ['activationkey', 'consumer_id'],\n ['activationkey', 'environment'],\n ['activationkey', 'auto_attach'],\n ['pool', 'pool_ids']],\n required_if=[['state', 'present', ['username', 'activationkey'], True]],\n )\n\n rhsm.module = module\n state = module.params['state']\n username = module.params['username']\n password = module.params['password']\n server_hostname = module.params['server_hostname']\n server_insecure = module.params['server_insecure']\n server_prefix = module.params['server_prefix']\n server_port = module.params['server_port']\n rhsm_baseurl = module.params['rhsm_baseurl']\n rhsm_repo_ca_cert = module.params['rhsm_repo_ca_cert']\n auto_attach = module.params['auto_attach']\n activationkey = module.params['activationkey']\n org_id = module.params['org_id']\n if activationkey and not org_id:\n module.fail_json(msg='org_id is required when using activationkey')\n environment = module.params['environment']\n pool = module.params['pool']\n pool_ids = {}\n for value in module.params['pool_ids']:\n if isinstance(value, dict):\n if len(value) != 1:\n module.fail_json(msg='Unable to parse pool_ids option.')\n pool_id, quantity = list(value.items())[0]\n else:\n pool_id, quantity = value, None\n pool_ids[pool_id] = quantity\n consumer_type = module.params[\"consumer_type\"]\n consumer_name = module.params[\"consumer_name\"]\n consumer_id = module.params[\"consumer_id\"]\n force_register = module.params[\"force_register\"]\n server_proxy_hostname = module.params['server_proxy_hostname']\n server_proxy_port = module.params['server_proxy_port']\n server_proxy_user = module.params['server_proxy_user']\n server_proxy_password = module.params['server_proxy_password']\n release = module.params['release']\n syspurpose = module.params['syspurpose']\n\n global SUBMAN_CMD\n SUBMAN_CMD = module.get_bin_path('subscription-manager', True)\n\n syspurpose_changed = False\n if syspurpose is not None:\n try:\n syspurpose_changed = SysPurpose().update_syspurpose(syspurpose)\n except Exception as err:\n module.fail_json(msg=\"Failed to update syspurpose attributes: %s\" % to_native(err))\n\n # Ensure system is registered\n if state == 'present':\n\n # Register system\n if rhsm.is_registered and not force_register:\n if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True:\n try:\n rhsm.sync_syspurpose()\n except Exception as e:\n module.fail_json(msg=\"Failed to synchronize syspurpose attributes: %s\" % to_native(e))\n if pool != '^$' or pool_ids:\n try:\n if pool_ids:\n result = rhsm.update_subscriptions_by_pool_ids(pool_ids)\n else:\n result = rhsm.update_subscriptions(pool)\n except Exception as e:\n module.fail_json(msg=\"Failed to update subscriptions for '%s': %s\" % (server_hostname, to_native(e)))\n else:\n module.exit_json(**result)\n else:\n if syspurpose_changed is True:\n module.exit_json(changed=True, msg=\"Syspurpose attributes changed.\")\n else:\n module.exit_json(changed=False, msg=\"System already registered.\")\n else:\n try:\n rhsm.enable()\n rhsm.configure(**module.params)\n rhsm.register(username, password, auto_attach, activationkey, org_id,\n consumer_type, consumer_name, consumer_id, force_register,\n environment, rhsm_baseurl, server_insecure, server_hostname,\n server_proxy_hostname, server_proxy_port, server_proxy_user, server_proxy_password, release)\n if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True:\n rhsm.sync_syspurpose()\n if pool_ids:\n subscribed_pool_ids = rhsm.subscribe_by_pool_ids(pool_ids)\n elif pool != '^$':\n subscribed_pool_ids = rhsm.subscribe(pool)\n else:\n subscribed_pool_ids = []\n except Exception as e:\n module.fail_json(msg=\"Failed to register with '%s': %s\" % (server_hostname, to_native(e)))\n else:\n module.exit_json(changed=True,\n msg=\"System successfully registered to '%s'.\" % server_hostname,\n subscribed_pool_ids=subscribed_pool_ids)\n\n # Ensure system is *not* registered\n if state == 'absent':\n if not rhsm.is_registered:\n module.exit_json(changed=False, msg=\"System already unregistered.\")\n else:\n try:\n rhsm.unsubscribe()\n rhsm.unregister()\n except Exception as e:\n module.fail_json(msg=\"Failed to unregister: %s\" % to_native(e))\n else:\n module.exit_json(changed=True, msg=\"System successfully unregistered from %s.\" % server_hostname)\n\n\nif __name__ == '__main__':\n main()\n",
"path": "plugins/modules/packaging/os/redhat_subscription.py"
}
] | [
{
"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# James Laska ([email protected])\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: redhat_subscription\nshort_description: Manage registration and subscriptions to RHSM using the C(subscription-manager) command\ndescription:\n - Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command\nauthor: \"Barnaby Court (@barnabycourt)\"\nnotes:\n - In order to register a system, subscription-manager requires either a username and password, or an activationkey and an Organization ID.\n - Since 2.5 values for I(server_hostname), I(server_insecure), I(rhsm_baseurl),\n I(server_proxy_hostname), I(server_proxy_port), I(server_proxy_user) and\n I(server_proxy_password) are no longer taken from the C(/etc/rhsm/rhsm.conf)\n config file and default to None.\nrequirements:\n - subscription-manager\noptions:\n state:\n description:\n - whether to register and subscribe (C(present)), or unregister (C(absent)) a system\n choices: [ \"present\", \"absent\" ]\n default: \"present\"\n type: str\n username:\n description:\n - access.redhat.com or Sat6 username\n type: str\n password:\n description:\n - access.redhat.com or Sat6 password\n type: str\n server_hostname:\n description:\n - Specify an alternative Red Hat Subscription Management or Sat6 server\n type: str\n server_insecure:\n description:\n - Enable or disable https server certificate verification when connecting to C(server_hostname)\n type: str\n server_prefix:\n description:\n - Specify the prefix when registering to the Red Hat Subscription Management or Sat6 server.\n type: str\n version_added: 3.3.0\n server_port:\n description:\n - Specify the port when registering to the Red Hat Subscription Management or Sat6 server.\n type: str\n version_added: 3.3.0\n rhsm_baseurl:\n description:\n - Specify CDN baseurl\n type: str\n rhsm_repo_ca_cert:\n description:\n - Specify an alternative location for a CA certificate for CDN\n type: str\n server_proxy_hostname:\n description:\n - Specify an HTTP proxy hostname.\n type: str\n server_proxy_port:\n description:\n - Specify an HTTP proxy port.\n type: str\n server_proxy_user:\n description:\n - Specify a user for HTTP proxy with basic authentication\n type: str\n server_proxy_password:\n description:\n - Specify a password for HTTP proxy with basic authentication\n type: str\n auto_attach:\n description:\n - Upon successful registration, auto-consume available subscriptions\n - Added in favor of deprecated autosubscribe in 2.5.\n type: bool\n aliases: [autosubscribe]\n activationkey:\n description:\n - supply an activation key for use with registration\n type: str\n org_id:\n description:\n - Organization ID to use in conjunction with activationkey\n type: str\n environment:\n description:\n - Register with a specific environment in the destination org. Used with Red Hat Satellite 6.x or Katello\n type: str\n pool:\n description:\n - |\n Specify a subscription pool name to consume. Regular expressions accepted. Use I(pool_ids) instead if\n possible, as it is much faster. Mutually exclusive with I(pool_ids).\n default: '^$'\n type: str\n pool_ids:\n description:\n - |\n Specify subscription pool IDs to consume. Prefer over I(pool) when possible as it is much faster.\n A pool ID may be specified as a C(string) - just the pool ID (ex. C(0123456789abcdef0123456789abcdef)),\n or as a C(dict) with the pool ID as the key, and a quantity as the value (ex.\n C(0123456789abcdef0123456789abcdef: 2). If the quantity is provided, it is used to consume multiple\n entitlements from a pool (the pool must support this). Mutually exclusive with I(pool).\n default: []\n type: list\n elements: raw\n consumer_type:\n description:\n - The type of unit to register, defaults to system\n type: str\n consumer_name:\n description:\n - Name of the system to register, defaults to the hostname\n type: str\n consumer_id:\n description:\n - |\n References an existing consumer ID to resume using a previous registration\n for this system. If the system's identity certificate is lost or corrupted,\n this option allows it to resume using its previous identity and subscriptions.\n The default is to not specify a consumer ID so a new ID is created.\n type: str\n force_register:\n description:\n - Register the system even if it is already registered\n type: bool\n default: no\n release:\n description:\n - Set a release version\n type: str\n syspurpose:\n description:\n - Set syspurpose attributes in file C(/etc/rhsm/syspurpose/syspurpose.json)\n and synchronize these attributes with RHSM server. Syspurpose attributes help attach\n the most appropriate subscriptions to the system automatically. When C(syspurpose.json) file\n already contains some attributes, then new attributes overwrite existing attributes.\n When some attribute is not listed in the new list of attributes, the existing\n attribute will be removed from C(syspurpose.json) file. Unknown attributes are ignored.\n type: dict\n default: {}\n suboptions:\n usage:\n description: Syspurpose attribute usage\n type: str\n role:\n description: Syspurpose attribute role\n type: str\n service_level_agreement:\n description: Syspurpose attribute service_level_agreement\n type: str\n addons:\n description: Syspurpose attribute addons\n type: list\n elements: str\n sync:\n description:\n - When this option is true, then syspurpose attributes are synchronized with\n RHSM server immediately. When this option is false, then syspurpose attributes\n will be synchronized with RHSM server by rhsmcertd daemon.\n type: bool\n default: no\n'''\n\nEXAMPLES = '''\n- name: Register as user (joe_user) with password (somepass) and auto-subscribe to available content.\n community.general.redhat_subscription:\n state: present\n username: joe_user\n password: somepass\n auto_attach: true\n\n- name: Same as above but subscribe to a specific pool by ID.\n community.general.redhat_subscription:\n state: present\n username: joe_user\n password: somepass\n pool_ids: 0123456789abcdef0123456789abcdef\n\n- name: Register and subscribe to multiple pools.\n community.general.redhat_subscription:\n state: present\n username: joe_user\n password: somepass\n pool_ids:\n - 0123456789abcdef0123456789abcdef\n - 1123456789abcdef0123456789abcdef\n\n- name: Same as above but consume multiple entitlements.\n community.general.redhat_subscription:\n state: present\n username: joe_user\n password: somepass\n pool_ids:\n - 0123456789abcdef0123456789abcdef: 2\n - 1123456789abcdef0123456789abcdef: 4\n\n- name: Register and pull existing system data.\n community.general.redhat_subscription:\n state: present\n username: joe_user\n password: somepass\n consumer_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\n\n- name: Register with activationkey and consume subscriptions matching Red Hat Enterprise Server or Red Hat Virtualization\n community.general.redhat_subscription:\n state: present\n activationkey: 1-222333444\n org_id: 222333444\n pool: '^(Red Hat Enterprise Server|Red Hat Virtualization)$'\n\n- name: Update the consumed subscriptions from the previous example (remove Red Hat Virtualization subscription)\n community.general.redhat_subscription:\n state: present\n activationkey: 1-222333444\n org_id: 222333444\n pool: '^Red Hat Enterprise Server$'\n\n- name: Register as user credentials into given environment (against Red Hat Satellite 6.x), and auto-subscribe.\n community.general.redhat_subscription:\n state: present\n username: joe_user\n password: somepass\n environment: Library\n auto_attach: true\n\n- name: Register as user (joe_user) with password (somepass) and a specific release\n community.general.redhat_subscription:\n state: present\n username: joe_user\n password: somepass\n release: 7.4\n\n- name: Register as user (joe_user) with password (somepass), set syspurpose attributes and synchronize them with server\n community.general.redhat_subscription:\n state: present\n username: joe_user\n password: somepass\n auto_attach: true\n syspurpose:\n usage: \"Production\"\n role: \"Red Hat Enterprise Server\"\n service_level_agreement: \"Premium\"\n addons:\n - addon1\n - addon2\n sync: true\n'''\n\nRETURN = '''\nsubscribed_pool_ids:\n description: List of pool IDs to which system is now subscribed\n returned: success\n type: complex\n sample: {\n \"8a85f9815ab905d3015ab928c7005de4\": \"1\"\n }\n'''\n\nfrom os.path import isfile\nfrom os import unlink\nimport re\nimport shutil\nimport tempfile\nimport json\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.common.text.converters import to_native\nfrom ansible.module_utils.six.moves import configparser\n\n\nSUBMAN_CMD = None\n\n\nclass RegistrationBase(object):\n\n REDHAT_REPO = \"/etc/yum.repos.d/redhat.repo\"\n\n def __init__(self, module, username=None, password=None):\n self.module = module\n self.username = username\n self.password = password\n\n def configure(self):\n raise NotImplementedError(\"Must be implemented by a sub-class\")\n\n def enable(self):\n # Remove any existing redhat.repo\n if isfile(self.REDHAT_REPO):\n unlink(self.REDHAT_REPO)\n\n def register(self):\n raise NotImplementedError(\"Must be implemented by a sub-class\")\n\n def unregister(self):\n raise NotImplementedError(\"Must be implemented by a sub-class\")\n\n def unsubscribe(self):\n raise NotImplementedError(\"Must be implemented by a sub-class\")\n\n def update_plugin_conf(self, plugin, enabled=True):\n plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin\n\n if isfile(plugin_conf):\n tmpfd, tmpfile = tempfile.mkstemp()\n shutil.copy2(plugin_conf, tmpfile)\n cfg = configparser.ConfigParser()\n cfg.read([tmpfile])\n\n if enabled:\n cfg.set('main', 'enabled', '1')\n else:\n cfg.set('main', 'enabled', '0')\n\n fd = open(tmpfile, 'w+')\n cfg.write(fd)\n fd.close()\n self.module.atomic_move(tmpfile, plugin_conf)\n\n def subscribe(self, **kwargs):\n raise NotImplementedError(\"Must be implemented by a sub-class\")\n\n\nclass Rhsm(RegistrationBase):\n def __init__(self, module, username=None, password=None):\n RegistrationBase.__init__(self, module, username, password)\n self.module = module\n\n def enable(self):\n '''\n Enable the system to receive updates from subscription-manager.\n This involves updating affected yum plugins and removing any\n conflicting yum repositories.\n '''\n RegistrationBase.enable(self)\n self.update_plugin_conf('rhnplugin', False)\n self.update_plugin_conf('subscription-manager', True)\n\n def configure(self, **kwargs):\n '''\n Configure the system as directed for registration with RHSM\n Raises:\n * Exception - if error occurs while running command\n '''\n\n args = [SUBMAN_CMD, 'config']\n\n # Pass supplied **kwargs as parameters to subscription-manager. Ignore\n # non-configuration parameters and replace '_' with '.'. For example,\n # 'server_hostname' becomes '--server.hostname'.\n options = []\n for k, v in sorted(kwargs.items()):\n if re.search(r'^(server|rhsm)_', k) and v is not None:\n options.append('--%s=%s' % (k.replace('_', '.', 1), v))\n\n # When there is nothing to configure, then it is not necessary\n # to run config command, because it only returns current\n # content of current configuration file\n if len(options) == 0:\n return\n\n args.extend(options)\n\n self.module.run_command(args, check_rc=True)\n\n @property\n def is_registered(self):\n '''\n Determine whether the current system\n Returns:\n * Boolean - whether the current system is currently registered to\n RHSM.\n '''\n\n args = [SUBMAN_CMD, 'identity']\n rc, stdout, stderr = self.module.run_command(args, check_rc=False)\n if rc == 0:\n return True\n else:\n return False\n\n def register(self, username, password, auto_attach, activationkey, org_id,\n consumer_type, consumer_name, consumer_id, force_register, environment,\n rhsm_baseurl, server_insecure, server_hostname, server_proxy_hostname,\n server_proxy_port, server_proxy_user, server_proxy_password, release):\n '''\n Register the current system to the provided RHSM or Sat6 server\n Raises:\n * Exception - if error occurs while running command\n '''\n args = [SUBMAN_CMD, 'register']\n\n # Generate command arguments\n if force_register:\n args.extend(['--force'])\n\n if rhsm_baseurl:\n args.extend(['--baseurl', rhsm_baseurl])\n\n if server_insecure:\n args.extend(['--insecure'])\n\n if server_hostname:\n args.extend(['--serverurl', server_hostname])\n\n if org_id:\n args.extend(['--org', org_id])\n\n if server_proxy_hostname and server_proxy_port:\n args.extend(['--proxy', server_proxy_hostname + ':' + server_proxy_port])\n\n if server_proxy_user:\n args.extend(['--proxyuser', server_proxy_user])\n\n if server_proxy_password:\n args.extend(['--proxypassword', server_proxy_password])\n\n if activationkey:\n args.extend(['--activationkey', activationkey])\n else:\n if auto_attach:\n args.append('--auto-attach')\n if username:\n args.extend(['--username', username])\n if password:\n args.extend(['--password', password])\n if consumer_type:\n args.extend(['--type', consumer_type])\n if consumer_name:\n args.extend(['--name', consumer_name])\n if consumer_id:\n args.extend(['--consumerid', consumer_id])\n if environment:\n args.extend(['--environment', environment])\n\n if release:\n args.extend(['--release', release])\n\n rc, stderr, stdout = self.module.run_command(args, check_rc=True, expand_user_and_vars=False)\n\n def unsubscribe(self, serials=None):\n '''\n Unsubscribe a system from subscribed channels\n Args:\n serials(list or None): list of serials to unsubscribe. If\n serials is none or an empty list, then\n all subscribed channels will be removed.\n Raises:\n * Exception - if error occurs while running command\n '''\n items = []\n if serials is not None and serials:\n items = [\"--serial=%s\" % s for s in serials]\n if serials is None:\n items = [\"--all\"]\n\n if items:\n args = [SUBMAN_CMD, 'remove'] + items\n rc, stderr, stdout = self.module.run_command(args, check_rc=True)\n return serials\n\n def unregister(self):\n '''\n Unregister a currently registered system\n Raises:\n * Exception - if error occurs while running command\n '''\n args = [SUBMAN_CMD, 'unregister']\n rc, stderr, stdout = self.module.run_command(args, check_rc=True)\n self.update_plugin_conf('rhnplugin', False)\n self.update_plugin_conf('subscription-manager', False)\n\n def subscribe(self, regexp):\n '''\n Subscribe current system to available pools matching the specified\n regular expression. It matches regexp against available pool ids first.\n If any pool ids match, subscribe to those pools and return.\n\n If no pool ids match, then match regexp against available pool product\n names. Note this can still easily match many many pools. Then subscribe\n to those pools.\n\n Since a pool id is a more specific match, we only fallback to matching\n against names if we didn't match pool ids.\n\n Raises:\n * Exception - if error occurs while running command\n '''\n # See https://github.com/ansible/ansible/issues/19466\n\n # subscribe to pools whose pool id matches regexp (and only the pool id)\n subscribed_pool_ids = self.subscribe_pool(regexp)\n\n # If we found any matches, we are done\n # Don't attempt to match pools by product name\n if subscribed_pool_ids:\n return subscribed_pool_ids\n\n # We didn't match any pool ids.\n # Now try subscribing to pools based on product name match\n # Note: This can match lots of product names.\n subscribed_by_product_pool_ids = self.subscribe_product(regexp)\n if subscribed_by_product_pool_ids:\n return subscribed_by_product_pool_ids\n\n # no matches\n return []\n\n def subscribe_by_pool_ids(self, pool_ids):\n \"\"\"\n Try to subscribe to the list of pool IDs\n \"\"\"\n available_pools = RhsmPools(self.module)\n\n available_pool_ids = [p.get_pool_id() for p in available_pools]\n\n for pool_id, quantity in sorted(pool_ids.items()):\n if pool_id in available_pool_ids:\n args = [SUBMAN_CMD, 'attach', '--pool', pool_id]\n if quantity is not None:\n args.extend(['--quantity', to_native(quantity)])\n rc, stderr, stdout = self.module.run_command(args, check_rc=True)\n else:\n self.module.fail_json(msg='Pool ID: %s not in list of available pools' % pool_id)\n return pool_ids\n\n def subscribe_pool(self, regexp):\n '''\n Subscribe current system to available pools matching the specified\n regular expression\n Raises:\n * Exception - if error occurs while running command\n '''\n\n # Available pools ready for subscription\n available_pools = RhsmPools(self.module)\n\n subscribed_pool_ids = []\n for pool in available_pools.filter_pools(regexp):\n pool.subscribe()\n subscribed_pool_ids.append(pool.get_pool_id())\n return subscribed_pool_ids\n\n def subscribe_product(self, regexp):\n '''\n Subscribe current system to available pools matching the specified\n regular expression\n Raises:\n * Exception - if error occurs while running command\n '''\n\n # Available pools ready for subscription\n available_pools = RhsmPools(self.module)\n\n subscribed_pool_ids = []\n for pool in available_pools.filter_products(regexp):\n pool.subscribe()\n subscribed_pool_ids.append(pool.get_pool_id())\n return subscribed_pool_ids\n\n def update_subscriptions(self, regexp):\n changed = False\n consumed_pools = RhsmPools(self.module, consumed=True)\n pool_ids_to_keep = [p.get_pool_id() for p in consumed_pools.filter_pools(regexp)]\n pool_ids_to_keep.extend([p.get_pool_id() for p in consumed_pools.filter_products(regexp)])\n\n serials_to_remove = [p.Serial for p in consumed_pools if p.get_pool_id() not in pool_ids_to_keep]\n serials = self.unsubscribe(serials=serials_to_remove)\n\n subscribed_pool_ids = self.subscribe(regexp)\n\n if subscribed_pool_ids or serials:\n changed = True\n return {'changed': changed, 'subscribed_pool_ids': subscribed_pool_ids,\n 'unsubscribed_serials': serials}\n\n def update_subscriptions_by_pool_ids(self, pool_ids):\n changed = False\n consumed_pools = RhsmPools(self.module, consumed=True)\n\n existing_pools = {}\n for p in consumed_pools:\n existing_pools[p.get_pool_id()] = p.QuantityUsed\n\n serials_to_remove = [p.Serial for p in consumed_pools if pool_ids.get(p.get_pool_id(), 0) != p.QuantityUsed]\n serials = self.unsubscribe(serials=serials_to_remove)\n\n missing_pools = {}\n for pool_id, quantity in sorted(pool_ids.items()):\n if existing_pools.get(pool_id, 0) != quantity:\n missing_pools[pool_id] = quantity\n\n self.subscribe_by_pool_ids(missing_pools)\n\n if missing_pools or serials:\n changed = True\n return {'changed': changed, 'subscribed_pool_ids': list(missing_pools.keys()),\n 'unsubscribed_serials': serials}\n\n def sync_syspurpose(self):\n \"\"\"\n Try to synchronize syspurpose attributes with server\n \"\"\"\n args = [SUBMAN_CMD, 'status']\n rc, stdout, stderr = self.module.run_command(args, check_rc=False)\n\n\nclass RhsmPool(object):\n '''\n Convenience class for housing subscription information\n '''\n\n def __init__(self, module, **kwargs):\n self.module = module\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n def __str__(self):\n return str(self.__getattribute__('_name'))\n\n def get_pool_id(self):\n return getattr(self, 'PoolId', getattr(self, 'PoolID'))\n\n def subscribe(self):\n args = \"subscription-manager attach --pool %s\" % self.get_pool_id()\n rc, stdout, stderr = self.module.run_command(args, check_rc=True)\n if rc == 0:\n return True\n else:\n return False\n\n\nclass RhsmPools(object):\n \"\"\"\n This class is used for manipulating pools subscriptions with RHSM\n \"\"\"\n\n def __init__(self, module, consumed=False):\n self.module = module\n self.products = self._load_product_list(consumed)\n\n def __iter__(self):\n return self.products.__iter__()\n\n def _load_product_list(self, consumed=False):\n \"\"\"\n Loads list of all available or consumed pools for system in data structure\n\n Args:\n consumed(bool): if True list consumed pools, else list available pools (default False)\n \"\"\"\n args = \"subscription-manager list\"\n if consumed:\n args += \" --consumed\"\n else:\n args += \" --available\"\n lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')\n rc, stdout, stderr = self.module.run_command(args, check_rc=True, environ_update=lang_env)\n\n products = []\n for line in stdout.split('\\n'):\n # Remove leading+trailing whitespace\n line = line.strip()\n # An empty line implies the end of a output group\n if len(line) == 0:\n continue\n # If a colon ':' is found, parse\n elif ':' in line:\n (key, value) = line.split(':', 1)\n key = key.strip().replace(\" \", \"\") # To unify\n value = value.strip()\n if key in ['ProductName', 'SubscriptionName']:\n # Remember the name for later processing\n products.append(RhsmPool(self.module, _name=value, key=value))\n elif products:\n # Associate value with most recently recorded product\n products[-1].__setattr__(key, value)\n # FIXME - log some warning?\n # else:\n # warnings.warn(\"Unhandled subscription key/value: %s/%s\" % (key,value))\n return products\n\n def filter_pools(self, regexp='^$'):\n '''\n Return a list of RhsmPools whose pool id matches the provided regular expression\n '''\n r = re.compile(regexp)\n for product in self.products:\n if r.search(product.get_pool_id()):\n yield product\n\n def filter_products(self, regexp='^$'):\n '''\n Return a list of RhsmPools whose product name matches the provided regular expression\n '''\n r = re.compile(regexp)\n for product in self.products:\n if r.search(product._name):\n yield product\n\n\nclass SysPurpose(object):\n \"\"\"\n This class is used for reading and writing to syspurpose.json file\n \"\"\"\n\n SYSPURPOSE_FILE_PATH = \"/etc/rhsm/syspurpose/syspurpose.json\"\n\n ALLOWED_ATTRIBUTES = ['role', 'usage', 'service_level_agreement', 'addons']\n\n def __init__(self, path=None):\n \"\"\"\n Initialize class used for reading syspurpose json file\n \"\"\"\n self.path = path or self.SYSPURPOSE_FILE_PATH\n\n def update_syspurpose(self, new_syspurpose):\n \"\"\"\n Try to update current syspurpose with new attributes from new_syspurpose\n \"\"\"\n syspurpose = {}\n syspurpose_changed = False\n for key, value in new_syspurpose.items():\n if key in self.ALLOWED_ATTRIBUTES:\n if value is not None:\n syspurpose[key] = value\n elif key == 'sync':\n pass\n else:\n raise KeyError(\"Attribute: %s not in list of allowed attributes: %s\" %\n (key, self.ALLOWED_ATTRIBUTES))\n current_syspurpose = self._read_syspurpose()\n if current_syspurpose != syspurpose:\n syspurpose_changed = True\n # Update current syspurpose with new values\n current_syspurpose.update(syspurpose)\n # When some key is not listed in new syspurpose, then delete it from current syspurpose\n # and ignore custom attributes created by user (e.g. \"foo\": \"bar\")\n for key in list(current_syspurpose):\n if key in self.ALLOWED_ATTRIBUTES and key not in syspurpose:\n del current_syspurpose[key]\n self._write_syspurpose(current_syspurpose)\n return syspurpose_changed\n\n def _write_syspurpose(self, new_syspurpose):\n \"\"\"\n This function tries to update current new_syspurpose attributes to\n json file.\n \"\"\"\n with open(self.path, \"w\") as fp:\n fp.write(json.dumps(new_syspurpose, indent=2, ensure_ascii=False, sort_keys=True))\n\n def _read_syspurpose(self):\n \"\"\"\n Read current syspurpuse from json file.\n \"\"\"\n current_syspurpose = {}\n try:\n with open(self.path, \"r\") as fp:\n content = fp.read()\n except IOError:\n pass\n else:\n current_syspurpose = json.loads(content)\n return current_syspurpose\n\n\ndef main():\n\n # Load RHSM configuration from file\n rhsm = Rhsm(None)\n\n # Note: the default values for parameters are:\n # 'type': 'str', 'default': None, 'required': False\n # So there is no need to repeat these values for each parameter.\n module = AnsibleModule(\n argument_spec={\n 'state': {'default': 'present', 'choices': ['present', 'absent']},\n 'username': {},\n 'password': {'no_log': True},\n 'server_hostname': {},\n 'server_insecure': {},\n 'server_prefix': {},\n 'server_port': {},\n 'rhsm_baseurl': {},\n 'rhsm_repo_ca_cert': {},\n 'auto_attach': {'aliases': ['autosubscribe'], 'type': 'bool'},\n 'activationkey': {'no_log': True},\n 'org_id': {},\n 'environment': {},\n 'pool': {'default': '^$'},\n 'pool_ids': {'default': [], 'type': 'list', 'elements': 'raw'},\n 'consumer_type': {},\n 'consumer_name': {},\n 'consumer_id': {},\n 'force_register': {'default': False, 'type': 'bool'},\n 'server_proxy_hostname': {},\n 'server_proxy_port': {},\n 'server_proxy_user': {},\n 'server_proxy_password': {'no_log': True},\n 'release': {},\n 'syspurpose': {\n 'type': 'dict',\n 'options': {\n 'role': {},\n 'usage': {},\n 'service_level_agreement': {},\n 'addons': {'type': 'list', 'elements': 'str'},\n 'sync': {'type': 'bool', 'default': False}\n }\n }\n },\n required_together=[['username', 'password'],\n ['server_proxy_hostname', 'server_proxy_port'],\n ['server_proxy_user', 'server_proxy_password']],\n mutually_exclusive=[['activationkey', 'username'],\n ['activationkey', 'consumer_id'],\n ['activationkey', 'environment'],\n ['activationkey', 'auto_attach'],\n ['pool', 'pool_ids']],\n required_if=[['state', 'present', ['username', 'activationkey'], True]],\n )\n\n rhsm.module = module\n state = module.params['state']\n username = module.params['username']\n password = module.params['password']\n server_hostname = module.params['server_hostname']\n server_insecure = module.params['server_insecure']\n server_prefix = module.params['server_prefix']\n server_port = module.params['server_port']\n rhsm_baseurl = module.params['rhsm_baseurl']\n rhsm_repo_ca_cert = module.params['rhsm_repo_ca_cert']\n auto_attach = module.params['auto_attach']\n activationkey = module.params['activationkey']\n org_id = module.params['org_id']\n if activationkey and not org_id:\n module.fail_json(msg='org_id is required when using activationkey')\n environment = module.params['environment']\n pool = module.params['pool']\n pool_ids = {}\n for value in module.params['pool_ids']:\n if isinstance(value, dict):\n if len(value) != 1:\n module.fail_json(msg='Unable to parse pool_ids option.')\n pool_id, quantity = list(value.items())[0]\n else:\n pool_id, quantity = value, None\n pool_ids[pool_id] = quantity\n consumer_type = module.params[\"consumer_type\"]\n consumer_name = module.params[\"consumer_name\"]\n consumer_id = module.params[\"consumer_id\"]\n force_register = module.params[\"force_register\"]\n server_proxy_hostname = module.params['server_proxy_hostname']\n server_proxy_port = module.params['server_proxy_port']\n server_proxy_user = module.params['server_proxy_user']\n server_proxy_password = module.params['server_proxy_password']\n release = module.params['release']\n syspurpose = module.params['syspurpose']\n\n global SUBMAN_CMD\n SUBMAN_CMD = module.get_bin_path('subscription-manager', True)\n\n syspurpose_changed = False\n if syspurpose is not None:\n try:\n syspurpose_changed = SysPurpose().update_syspurpose(syspurpose)\n except Exception as err:\n module.fail_json(msg=\"Failed to update syspurpose attributes: %s\" % to_native(err))\n\n # Ensure system is registered\n if state == 'present':\n\n # Register system\n if rhsm.is_registered and not force_register:\n if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True:\n try:\n rhsm.sync_syspurpose()\n except Exception as e:\n module.fail_json(msg=\"Failed to synchronize syspurpose attributes: %s\" % to_native(e))\n if pool != '^$' or pool_ids:\n try:\n if pool_ids:\n result = rhsm.update_subscriptions_by_pool_ids(pool_ids)\n else:\n result = rhsm.update_subscriptions(pool)\n except Exception as e:\n module.fail_json(msg=\"Failed to update subscriptions for '%s': %s\" % (server_hostname, to_native(e)))\n else:\n module.exit_json(**result)\n else:\n if syspurpose_changed is True:\n module.exit_json(changed=True, msg=\"Syspurpose attributes changed.\")\n else:\n module.exit_json(changed=False, msg=\"System already registered.\")\n else:\n try:\n rhsm.enable()\n rhsm.configure(**module.params)\n rhsm.register(username, password, auto_attach, activationkey, org_id,\n consumer_type, consumer_name, consumer_id, force_register,\n environment, rhsm_baseurl, server_insecure, server_hostname,\n server_proxy_hostname, server_proxy_port, server_proxy_user, server_proxy_password, release)\n if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True:\n rhsm.sync_syspurpose()\n if pool_ids:\n subscribed_pool_ids = rhsm.subscribe_by_pool_ids(pool_ids)\n elif pool != '^$':\n subscribed_pool_ids = rhsm.subscribe(pool)\n else:\n subscribed_pool_ids = []\n except Exception as e:\n module.fail_json(msg=\"Failed to register with '%s': %s\" % (server_hostname, to_native(e)))\n else:\n module.exit_json(changed=True,\n msg=\"System successfully registered to '%s'.\" % server_hostname,\n subscribed_pool_ids=subscribed_pool_ids)\n\n # Ensure system is *not* registered\n if state == 'absent':\n if not rhsm.is_registered:\n module.exit_json(changed=False, msg=\"System already unregistered.\")\n else:\n try:\n rhsm.unsubscribe()\n rhsm.unregister()\n except Exception as e:\n module.fail_json(msg=\"Failed to unregister: %s\" % to_native(e))\n else:\n module.exit_json(changed=True, msg=\"System successfully unregistered from %s.\" % server_hostname)\n\n\nif __name__ == '__main__':\n main()\n",
"path": "plugins/modules/packaging/os/redhat_subscription.py"
}
] | diff --git a/changelogs/fragments/4809-redhat_subscription-unsubscribe.yaml b/changelogs/fragments/4809-redhat_subscription-unsubscribe.yaml
new file mode 100644
index 00000000000..39a364d0072
--- /dev/null
+++ b/changelogs/fragments/4809-redhat_subscription-unsubscribe.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - redhat_subscription - fix unsubscribing on RHEL 9 (https://github.com/ansible-collections/community.general/issues/4741).
diff --git a/plugins/modules/packaging/os/redhat_subscription.py b/plugins/modules/packaging/os/redhat_subscription.py
index 7bb540b3f13..7309ba7d66f 100644
--- a/plugins/modules/packaging/os/redhat_subscription.py
+++ b/plugins/modules/packaging/os/redhat_subscription.py
@@ -468,7 +468,7 @@ def unsubscribe(self, serials=None):
items = ["--all"]
if items:
- args = [SUBMAN_CMD, 'unsubscribe'] + items
+ args = [SUBMAN_CMD, 'remove'] + items
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
return serials
diff --git a/tests/unit/plugins/modules/packaging/os/test_redhat_subscription.py b/tests/unit/plugins/modules/packaging/os/test_redhat_subscription.py
index 7f430ee72cb..8eb7ead6741 100644
--- a/tests/unit/plugins/modules/packaging/os/test_redhat_subscription.py
+++ b/tests/unit/plugins/modules/packaging/os/test_redhat_subscription.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
# Author: Jiri Hnidek ([email protected])
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -118,7 +119,7 @@ def test_without_required_parameters(capfd, patch_redhat_subscription):
(0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', '')
),
(
- ['/testbin/subscription-manager', 'unsubscribe', '--all'],
+ ['/testbin/subscription-manager', 'remove', '--all'],
{'check_rc': True},
(0, '', '')
),
@@ -755,7 +756,7 @@ def test_without_required_parameters(capfd, patch_redhat_subscription):
(
[
'/testbin/subscription-manager',
- 'unsubscribe',
+ 'remove',
'--serial=7807912223970164816',
],
{'check_rc': True},
|
mne-tools__mne-python-10487 | Argument `theme` of `raw.plot()` or config key does not work on macOS
As spotted following the discussion here https://github.com/mne-tools/mne-qt-browser/issues/83 the argument `theme` does not change the theme on either my macOS or Windows computer.
Sample code:
```
import mne
folder = mne.datasets.sample.data_path() / 'MEG' / 'sample'
raw = mne.io.read_raw(folder / 'sample_audvis_filt-0-40_raw.fif', preload=True)
raw.plot(theme='dark')
```
| [
{
"content": "\"\"\"Functions to plot raw M/EEG data.\"\"\"\n\n# Authors: Eric Larson <[email protected]>\n# Jaakko Leppakangas <[email protected]>\n# Daniel McCloy <[email protected]>\n#\n# License: Simplified BSD\n\nfrom functools import partial\nfrom collections import OrderedDict\n\nimport numpy as np\n\nfrom ..annotations import _annotations_starts_stops\nfrom ..filter import create_filter\nfrom ..io.pick import pick_types, _pick_data_channels, pick_info, pick_channels\nfrom ..utils import verbose, _validate_type, _check_option\nfrom ..time_frequency import psd_welch\nfrom ..defaults import _handle_default\nfrom .topo import _plot_topo, _plot_timeseries, _plot_timeseries_unified\nfrom .utils import (plt_show, _compute_scalings, _handle_decim, _check_cov,\n _shorten_path_from_middle, _handle_precompute,\n _get_channel_plotting_order, _make_event_color_dict)\n\n_RAW_CLIP_DEF = 1.5\n\n\n@verbose\ndef plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=20,\n bgcolor='w', color=None, bad_color='lightgray',\n event_color='cyan', scalings=None, remove_dc=True, order=None,\n show_options=False, title=None, show=True, block=False,\n highpass=None, lowpass=None, filtorder=4,\n clipping=_RAW_CLIP_DEF, show_first_samp=False,\n proj=True, group_by='type', butterfly=False, decim='auto',\n noise_cov=None, event_id=None, show_scrollbars=True,\n show_scalebars=True, time_format='float',\n precompute=None, use_opengl=None, *, theme=None, verbose=None):\n \"\"\"Plot raw data.\n\n Parameters\n ----------\n raw : instance of Raw\n The raw data to plot.\n events : array | None\n Events to show with vertical bars.\n duration : float\n Time window (s) to plot. The lesser of this value and the duration\n of the raw file will be used.\n start : float\n Initial time to show (can be changed dynamically once plotted). If\n show_first_samp is True, then it is taken relative to\n ``raw.first_samp``.\n n_channels : int\n Number of channels to plot at once. Defaults to 20. The lesser of\n ``n_channels`` and ``len(raw.ch_names)`` will be shown.\n Has no effect if ``order`` is 'position', 'selection' or 'butterfly'.\n bgcolor : color object\n Color of the background.\n color : dict | color object | None\n Color for the data traces. If None, defaults to::\n\n dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='m',\n emg='k', ref_meg='steelblue', misc='k', stim='k',\n resp='k', chpi='k')\n\n bad_color : color object\n Color to make bad channels.\n %(event_color)s\n Defaults to ``'cyan'``.\n %(scalings)s\n remove_dc : bool\n If True remove DC component when plotting data.\n order : array of int | None\n Order in which to plot data. If the array is shorter than the number of\n channels, only the given channels are plotted. If None (default), all\n channels are plotted. If ``group_by`` is ``'position'`` or\n ``'selection'``, the ``order`` parameter is used only for selecting the\n channels to be plotted.\n show_options : bool\n If True, a dialog for options related to projection is shown.\n title : str | None\n The title of the window. If None, and either the filename of the\n raw object or '<unknown>' will be displayed as title.\n show : bool\n Show figure if True.\n block : bool\n Whether to halt program execution until the figure is closed.\n Useful for setting bad channels on the fly by clicking on a line.\n May not work on all systems / platforms.\n (Only Qt) If you run from a script, this needs to\n be ``True`` or a Qt-eventloop needs to be started somewhere\n else in the script (e.g. if you want to implement the browser\n inside another Qt-Application).\n highpass : float | None\n Highpass to apply when displaying data.\n lowpass : float | None\n Lowpass to apply when displaying data.\n If highpass > lowpass, a bandstop rather than bandpass filter\n will be applied.\n filtorder : int\n Filtering order. 0 will use FIR filtering with MNE defaults.\n Other values will construct an IIR filter of the given order\n and apply it with :func:`~scipy.signal.filtfilt` (making the effective\n order twice ``filtorder``). Filtering may produce some edge artifacts\n (at the left and right edges) of the signals during display.\n\n .. versionchanged:: 0.18\n Support for ``filtorder=0`` to use FIR filtering.\n clipping : str | float | None\n If None, channels are allowed to exceed their designated bounds in\n the plot. If \"clamp\", then values are clamped to the appropriate\n range for display, creating step-like artifacts. If \"transparent\",\n then excessive values are not shown, creating gaps in the traces.\n If float, clipping occurs for values beyond the ``clipping`` multiple\n of their dedicated range, so ``clipping=1.`` is an alias for\n ``clipping='transparent'``.\n\n .. versionchanged:: 0.21\n Support for float, and default changed from None to 1.5.\n show_first_samp : bool\n If True, show time axis relative to the ``raw.first_samp``.\n proj : bool\n Whether to apply projectors prior to plotting (default is ``True``).\n Individual projectors can be enabled/disabled interactively (see\n Notes). This argument only affects the plot; use ``raw.apply_proj()``\n to modify the data stored in the Raw object.\n %(group_by_browse)s\n butterfly : bool\n Whether to start in butterfly mode. Defaults to False.\n decim : int | 'auto'\n Amount to decimate the data during display for speed purposes.\n You should only decimate if the data are sufficiently low-passed,\n otherwise aliasing can occur. The 'auto' mode (default) uses\n the decimation that results in a sampling rate least three times\n larger than ``min(info['lowpass'], lowpass)`` (e.g., a 40 Hz lowpass\n will result in at least a 120 Hz displayed sample rate).\n noise_cov : instance of Covariance | str | None\n Noise covariance used to whiten the data while plotting.\n Whitened data channels are scaled by ``scalings['whitened']``,\n and their channel names are shown in italic.\n Can be a string to load a covariance from disk.\n See also :meth:`mne.Evoked.plot_white` for additional inspection\n of noise covariance properties when whitening evoked data.\n For data processed with SSS, the effective dependence between\n magnetometers and gradiometers may introduce differences in scaling,\n consider using :meth:`mne.Evoked.plot_white`.\n\n .. versionadded:: 0.16.0\n event_id : dict | None\n Event IDs used to show at event markers (default None shows\n the event numbers).\n\n .. versionadded:: 0.16.0\n %(show_scrollbars)s\n %(show_scalebars)s\n\n .. versionadded:: 0.20.0\n %(time_format)s\n %(precompute)s\n %(use_opengl)s\n %(theme_pg)s\n\n .. versionadded:: 1.0\n %(verbose)s\n\n Returns\n -------\n fig : matplotlib.figure.Figure | ``PyQt5.QtWidgets.QMainWindow``\n Browser instance.\n\n Notes\n -----\n The arrow keys (up/down/left/right) can typically be used to navigate\n between channels and time ranges, but this depends on the backend\n matplotlib is configured to use (e.g., mpl.use('TkAgg') should work). The\n left/right arrows will scroll by 25%% of ``duration``, whereas\n shift+left/shift+right will scroll by 100%% of ``duration``. The scaling\n can be adjusted with - and + (or =) keys. The viewport dimensions can be\n adjusted with page up/page down and home/end keys. Full screen mode can be\n toggled with the F11 key, and scrollbars can be hidden/shown by pressing\n 'z'. Right-click a channel label to view its location. To mark or un-mark a\n channel as bad, click on a channel label or a channel trace. The changes\n will be reflected immediately in the raw object's ``raw.info['bads']``\n entry.\n\n If projectors are present, a button labelled \"Prj\" in the lower right\n corner of the plot window opens a secondary control window, which allows\n enabling/disabling specific projectors individually. This provides a means\n of interactively observing how each projector would affect the raw data if\n it were applied.\n\n Annotation mode is toggled by pressing 'a', butterfly mode by pressing\n 'b', and whitening mode (when ``noise_cov is not None``) by pressing 'w'.\n By default, the channel means are removed when ``remove_dc`` is set to\n ``True``. This flag can be toggled by pressing 'd'.\n\n .. note:: For the Qt backend to run in IPython with ``block=False``\n you must run the magic command ``%%gui qt5`` first.\n .. note:: To report issues with the qt-backend, please use the\n `issues <https://github.com/mne-tools/mne-qt-browser/issues>`_\n of ``mne-qt-browser``.\n \"\"\"\n from ..io.base import BaseRaw\n from ._figure import _get_browser\n\n info = raw.info.copy()\n sfreq = info['sfreq']\n projs = info['projs']\n # this will be an attr for which projectors are currently \"on\" in the plot\n projs_on = np.full_like(projs, proj, dtype=bool)\n # disable projs in info if user doesn't want to see them right away\n if not proj:\n with info._unlock():\n info['projs'] = list()\n\n # handle defaults / check arg validity\n color = _handle_default('color', color)\n scalings = _compute_scalings(scalings, raw, remove_dc=remove_dc,\n duration=duration)\n if scalings['whitened'] == 'auto':\n scalings['whitened'] = 1.\n _validate_type(raw, BaseRaw, 'raw', 'Raw')\n decim, picks_data = _handle_decim(info, decim, lowpass)\n noise_cov = _check_cov(noise_cov, info)\n units = _handle_default('units', None)\n unit_scalings = _handle_default('scalings', None)\n _check_option('group_by', group_by,\n ('selection', 'position', 'original', 'type'))\n\n # clipping\n _validate_type(clipping, (None, 'numeric', str), 'clipping')\n if isinstance(clipping, str):\n _check_option('clipping', clipping, ('clamp', 'transparent'),\n extra='when a string')\n clipping = 1. if clipping == 'transparent' else clipping\n elif clipping is not None:\n clipping = float(clipping)\n\n # be forgiving if user asks for too much time\n duration = min(raw.times[-1], float(duration))\n\n # determine IIR filtering parameters\n if highpass is not None and highpass <= 0:\n raise ValueError(f'highpass must be > 0, got {highpass}')\n if highpass is None and lowpass is None:\n ba = filt_bounds = None\n else:\n filtorder = int(filtorder)\n if filtorder == 0:\n method = 'fir'\n iir_params = None\n else:\n method = 'iir'\n iir_params = dict(order=filtorder, output='sos', ftype='butter')\n ba = create_filter(np.zeros((1, int(round(duration * sfreq)))),\n sfreq, highpass, lowpass, method=method,\n iir_params=iir_params)\n filt_bounds = _annotations_starts_stops(\n raw, ('edge', 'bad_acq_skip'), invert=True)\n\n # compute event times in seconds\n if events is not None:\n event_times = (events[:, 0] - raw.first_samp).astype(float)\n event_times /= sfreq\n event_nums = events[:, 2]\n else:\n event_times = event_nums = None\n\n # determine trace order\n ch_names = np.array(raw.ch_names)\n ch_types = np.array(raw.get_channel_types())\n order = _get_channel_plotting_order(order, ch_types)\n n_channels = min(info['nchan'], n_channels, len(order))\n # adjust order based on channel selection, if needed\n selections = None\n if group_by in ('selection', 'position'):\n selections = _setup_channel_selections(raw, group_by, order)\n order = np.concatenate(list(selections.values()))\n default_selection = list(selections)[0]\n n_channels = len(selections[default_selection])\n\n # handle event colors\n event_color_dict = _make_event_color_dict(event_color, events, event_id)\n\n # handle first_samp\n first_time = raw._first_time if show_first_samp else 0\n start += first_time\n event_id_rev = {v: k for k, v in (event_id or {}).items()}\n\n # generate window title; allow instances without a filename (e.g., ICA)\n if title is None:\n title = '<unknown>'\n fnames = raw._filenames.copy()\n if len(fnames):\n title = fnames.pop(0)\n extra = f' ... (+ {len(fnames)} more)' if len(fnames) else ''\n title = f'{title}{extra}'\n if len(title) > 60:\n title = _shorten_path_from_middle(title)\n elif not isinstance(title, str):\n raise TypeError(f'title must be None or a string, got a {type(title)}')\n\n # gather parameters and initialize figure\n _validate_type(use_opengl, (bool, None), 'use_opengl')\n precompute = _handle_precompute(precompute)\n params = dict(inst=raw,\n info=info,\n # channels and channel order\n ch_names=ch_names,\n ch_types=ch_types,\n ch_order=order,\n picks=order[:n_channels],\n n_channels=n_channels,\n picks_data=picks_data,\n group_by=group_by,\n ch_selections=selections,\n # time\n t_start=start,\n duration=duration,\n n_times=raw.n_times,\n first_time=first_time,\n time_format=time_format,\n decim=decim,\n # events\n event_color_dict=event_color_dict,\n event_times=event_times,\n event_nums=event_nums,\n event_id_rev=event_id_rev,\n # preprocessing\n projs=projs,\n projs_on=projs_on,\n apply_proj=proj,\n remove_dc=remove_dc,\n filter_coefs=ba,\n filter_bounds=filt_bounds,\n noise_cov=noise_cov,\n # scalings\n scalings=scalings,\n units=units,\n unit_scalings=unit_scalings,\n # colors\n ch_color_bad=bad_color,\n ch_color_dict=color,\n # display\n butterfly=butterfly,\n clipping=clipping,\n scrollbars_visible=show_scrollbars,\n scalebars_visible=show_scalebars,\n window_title=title,\n bgcolor=bgcolor,\n # Qt-specific\n precompute=precompute,\n use_opengl=use_opengl)\n\n fig = _get_browser(show=show, block=block, **params)\n\n return fig\n\n\n@verbose\ndef plot_raw_psd(raw, fmin=0, fmax=np.inf, tmin=None, tmax=None, proj=False,\n n_fft=None, n_overlap=0, reject_by_annotation=True,\n picks=None, ax=None, color='black', xscale='linear',\n area_mode='std', area_alpha=0.33, dB=True, estimate='auto',\n show=True, n_jobs=1, average=False, line_alpha=None,\n spatial_colors=True, sphere=None, window='hamming',\n exclude='bads', verbose=None):\n \"\"\"%(plot_psd_doc)s.\n\n Parameters\n ----------\n raw : instance of Raw\n The raw object.\n fmin : float\n Start frequency to consider.\n fmax : float\n End frequency to consider.\n tmin : float | None\n Start time to consider.\n tmax : float | None\n End time to consider.\n proj : bool\n Apply projection.\n n_fft : int | None\n Number of points to use in Welch FFT calculations.\n Default is None, which uses the minimum of 2048 and the\n number of time points.\n n_overlap : int\n The number of points of overlap between blocks. The default value\n is 0 (no overlap).\n %(reject_by_annotation_raw)s\n %(picks_plot_psd_good_data)s\n ax : instance of Axes | None\n Axes to plot into. If None, axes will be created.\n %(color_plot_psd)s\n %(xscale_plot_psd)s\n %(area_mode_plot_psd)s\n %(area_alpha_plot_psd)s\n %(dB_plot_psd)s\n %(estimate_plot_psd)s\n %(show)s\n %(n_jobs)s\n %(average_plot_psd)s\n %(line_alpha_plot_psd)s\n %(spatial_colors_plot_psd)s\n %(sphere_topomap_auto)s\n %(window_psd)s\n\n .. versionadded:: 0.22.0\n exclude : list of str | 'bads'\n Channels names to exclude from being shown. If 'bads', the bad channels\n are excluded. Pass an empty list to plot all channels (including\n channels marked \"bad\", if any).\n\n .. versionadded:: 0.24.0\n %(verbose)s\n\n Returns\n -------\n fig : instance of Figure\n Figure with frequency spectra of the data channels.\n \"\"\"\n from ._mpl_figure import _psd_figure\n # handle FFT\n if n_fft is None:\n if tmax is None or not np.isfinite(tmax):\n tmax = raw.times[-1]\n tmin = 0. if tmin is None else tmin\n n_fft = min(np.diff(raw.time_as_index([tmin, tmax]))[0] + 1, 2048)\n # generate figure\n fig = _psd_figure(\n inst=raw, proj=proj, picks=picks, axes=ax, tmin=tmin, tmax=tmax,\n fmin=fmin, fmax=fmax, sphere=sphere, xscale=xscale, dB=dB,\n average=average, estimate=estimate, area_mode=area_mode,\n line_alpha=line_alpha, area_alpha=area_alpha, color=color,\n spatial_colors=spatial_colors, n_jobs=n_jobs, n_fft=n_fft,\n n_overlap=n_overlap, reject_by_annotation=reject_by_annotation,\n window=window, exclude=exclude)\n plt_show(show)\n return fig\n\n\n@verbose\ndef plot_raw_psd_topo(raw, tmin=0., tmax=None, fmin=0., fmax=100., proj=False,\n n_fft=2048, n_overlap=0, layout=None, color='w',\n fig_facecolor='k', axis_facecolor='k', dB=True,\n show=True, block=False, n_jobs=1, axes=None,\n verbose=None):\n \"\"\"Plot channel-wise frequency spectra as topography.\n\n Parameters\n ----------\n raw : instance of io.Raw\n The raw instance to use.\n tmin : float\n Start time for calculations. Defaults to zero.\n tmax : float | None\n End time for calculations. If None (default), the end of data is used.\n fmin : float\n Start frequency to consider. Defaults to zero.\n fmax : float\n End frequency to consider. Defaults to 100.\n proj : bool\n Apply projection. Defaults to False.\n n_fft : int\n Number of points to use in Welch FFT calculations. Defaults to 2048.\n n_overlap : int\n The number of points of overlap between blocks. Defaults to 0\n (no overlap).\n layout : instance of Layout | None\n Layout instance specifying sensor positions (does not need to be\n specified for Neuromag data). If None (default), the correct layout is\n inferred from the data.\n color : str | tuple\n A matplotlib-compatible color to use for the curves. Defaults to white.\n fig_facecolor : str | tuple\n A matplotlib-compatible color to use for the figure background.\n Defaults to black.\n axis_facecolor : str | tuple\n A matplotlib-compatible color to use for the axis background.\n Defaults to black.\n dB : bool\n If True, transform data to decibels. Defaults to True.\n show : bool\n Show figure if True. Defaults to True.\n block : bool\n Whether to halt program execution until the figure is closed.\n May not work on all systems / platforms. Defaults to False.\n %(n_jobs)s\n axes : instance of matplotlib Axes | None\n Axes to plot into. If None, axes will be created.\n %(verbose)s\n\n Returns\n -------\n fig : instance of matplotlib.figure.Figure\n Figure distributing one image per channel across sensor topography.\n \"\"\"\n if layout is None:\n from ..channels.layout import find_layout\n layout = find_layout(raw.info)\n\n psds, freqs = psd_welch(raw, tmin=tmin, tmax=tmax, fmin=fmin,\n fmax=fmax, proj=proj, n_fft=n_fft,\n n_overlap=n_overlap, n_jobs=n_jobs)\n if dB:\n psds = 10 * np.log10(psds)\n y_label = 'dB'\n else:\n y_label = 'Power'\n show_func = partial(_plot_timeseries_unified, data=[psds], color=color,\n times=[freqs])\n click_func = partial(_plot_timeseries, data=[psds], color=color,\n times=[freqs])\n picks = _pick_data_channels(raw.info)\n info = pick_info(raw.info, picks)\n\n fig = _plot_topo(info, times=freqs, show_func=show_func,\n click_func=click_func, layout=layout,\n axis_facecolor=axis_facecolor,\n fig_facecolor=fig_facecolor, x_label='Frequency (Hz)',\n unified=True, y_label=y_label, axes=axes)\n\n try:\n plt_show(show, block=block)\n except TypeError: # not all versions have this\n plt_show(show)\n return fig\n\n\ndef _setup_channel_selections(raw, kind, order):\n \"\"\"Get dictionary of channel groupings.\"\"\"\n from ..channels import (read_vectorview_selection, _SELECTIONS,\n _EEG_SELECTIONS, _divide_to_regions)\n from ..utils import _get_stim_channel\n _check_option('group_by', kind, ('position', 'selection'))\n if kind == 'position':\n selections_dict = _divide_to_regions(raw.info)\n keys = _SELECTIONS[1:] # omit 'Vertex'\n else: # kind == 'selection'\n from ..channels.channels import _get_ch_info\n (has_vv_mag, has_vv_grad, *_, has_neuromag_122_grad, has_csd_coils\n ) = _get_ch_info(raw.info)\n if not (has_vv_grad or has_vv_mag or has_neuromag_122_grad):\n raise ValueError(\"order='selection' only works for Neuromag \"\n \"data. Use order='position' instead.\")\n selections_dict = OrderedDict()\n # get stim channel (if any)\n stim_ch = _get_stim_channel(None, raw.info, raise_error=False)\n stim_ch = stim_ch if len(stim_ch) else ['']\n stim_ch = pick_channels(raw.ch_names, stim_ch)\n # loop over regions\n keys = np.concatenate([_SELECTIONS, _EEG_SELECTIONS])\n for key in keys:\n channels = read_vectorview_selection(key, info=raw.info)\n picks = pick_channels(raw.ch_names, channels)\n picks = np.intersect1d(picks, order)\n if not len(picks):\n continue # omit empty selections\n selections_dict[key] = np.concatenate([picks, stim_ch])\n # add misc channels\n misc = pick_types(raw.info, meg=False, eeg=False, stim=True, eog=True,\n ecg=True, emg=True, ref_meg=False, misc=True,\n resp=True, chpi=True, exci=True, ias=True, syst=True,\n seeg=False, bio=True, ecog=False, fnirs=False, dbs=False,\n exclude=())\n if len(misc) and np.in1d(misc, order).any():\n selections_dict['Misc'] = misc\n return selections_dict\n",
"path": "mne/viz/raw.py"
}
] | [
{
"content": "\"\"\"Functions to plot raw M/EEG data.\"\"\"\n\n# Authors: Eric Larson <[email protected]>\n# Jaakko Leppakangas <[email protected]>\n# Daniel McCloy <[email protected]>\n#\n# License: Simplified BSD\n\nfrom functools import partial\nfrom collections import OrderedDict\n\nimport numpy as np\n\nfrom ..annotations import _annotations_starts_stops\nfrom ..filter import create_filter\nfrom ..io.pick import pick_types, _pick_data_channels, pick_info, pick_channels\nfrom ..utils import verbose, _validate_type, _check_option\nfrom ..time_frequency import psd_welch\nfrom ..defaults import _handle_default\nfrom .topo import _plot_topo, _plot_timeseries, _plot_timeseries_unified\nfrom .utils import (plt_show, _compute_scalings, _handle_decim, _check_cov,\n _shorten_path_from_middle, _handle_precompute,\n _get_channel_plotting_order, _make_event_color_dict)\n\n_RAW_CLIP_DEF = 1.5\n\n\n@verbose\ndef plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=20,\n bgcolor='w', color=None, bad_color='lightgray',\n event_color='cyan', scalings=None, remove_dc=True, order=None,\n show_options=False, title=None, show=True, block=False,\n highpass=None, lowpass=None, filtorder=4,\n clipping=_RAW_CLIP_DEF, show_first_samp=False,\n proj=True, group_by='type', butterfly=False, decim='auto',\n noise_cov=None, event_id=None, show_scrollbars=True,\n show_scalebars=True, time_format='float',\n precompute=None, use_opengl=None, *, theme=None, verbose=None):\n \"\"\"Plot raw data.\n\n Parameters\n ----------\n raw : instance of Raw\n The raw data to plot.\n events : array | None\n Events to show with vertical bars.\n duration : float\n Time window (s) to plot. The lesser of this value and the duration\n of the raw file will be used.\n start : float\n Initial time to show (can be changed dynamically once plotted). If\n show_first_samp is True, then it is taken relative to\n ``raw.first_samp``.\n n_channels : int\n Number of channels to plot at once. Defaults to 20. The lesser of\n ``n_channels`` and ``len(raw.ch_names)`` will be shown.\n Has no effect if ``order`` is 'position', 'selection' or 'butterfly'.\n bgcolor : color object\n Color of the background.\n color : dict | color object | None\n Color for the data traces. If None, defaults to::\n\n dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='m',\n emg='k', ref_meg='steelblue', misc='k', stim='k',\n resp='k', chpi='k')\n\n bad_color : color object\n Color to make bad channels.\n %(event_color)s\n Defaults to ``'cyan'``.\n %(scalings)s\n remove_dc : bool\n If True remove DC component when plotting data.\n order : array of int | None\n Order in which to plot data. If the array is shorter than the number of\n channels, only the given channels are plotted. If None (default), all\n channels are plotted. If ``group_by`` is ``'position'`` or\n ``'selection'``, the ``order`` parameter is used only for selecting the\n channels to be plotted.\n show_options : bool\n If True, a dialog for options related to projection is shown.\n title : str | None\n The title of the window. If None, and either the filename of the\n raw object or '<unknown>' will be displayed as title.\n show : bool\n Show figure if True.\n block : bool\n Whether to halt program execution until the figure is closed.\n Useful for setting bad channels on the fly by clicking on a line.\n May not work on all systems / platforms.\n (Only Qt) If you run from a script, this needs to\n be ``True`` or a Qt-eventloop needs to be started somewhere\n else in the script (e.g. if you want to implement the browser\n inside another Qt-Application).\n highpass : float | None\n Highpass to apply when displaying data.\n lowpass : float | None\n Lowpass to apply when displaying data.\n If highpass > lowpass, a bandstop rather than bandpass filter\n will be applied.\n filtorder : int\n Filtering order. 0 will use FIR filtering with MNE defaults.\n Other values will construct an IIR filter of the given order\n and apply it with :func:`~scipy.signal.filtfilt` (making the effective\n order twice ``filtorder``). Filtering may produce some edge artifacts\n (at the left and right edges) of the signals during display.\n\n .. versionchanged:: 0.18\n Support for ``filtorder=0`` to use FIR filtering.\n clipping : str | float | None\n If None, channels are allowed to exceed their designated bounds in\n the plot. If \"clamp\", then values are clamped to the appropriate\n range for display, creating step-like artifacts. If \"transparent\",\n then excessive values are not shown, creating gaps in the traces.\n If float, clipping occurs for values beyond the ``clipping`` multiple\n of their dedicated range, so ``clipping=1.`` is an alias for\n ``clipping='transparent'``.\n\n .. versionchanged:: 0.21\n Support for float, and default changed from None to 1.5.\n show_first_samp : bool\n If True, show time axis relative to the ``raw.first_samp``.\n proj : bool\n Whether to apply projectors prior to plotting (default is ``True``).\n Individual projectors can be enabled/disabled interactively (see\n Notes). This argument only affects the plot; use ``raw.apply_proj()``\n to modify the data stored in the Raw object.\n %(group_by_browse)s\n butterfly : bool\n Whether to start in butterfly mode. Defaults to False.\n decim : int | 'auto'\n Amount to decimate the data during display for speed purposes.\n You should only decimate if the data are sufficiently low-passed,\n otherwise aliasing can occur. The 'auto' mode (default) uses\n the decimation that results in a sampling rate least three times\n larger than ``min(info['lowpass'], lowpass)`` (e.g., a 40 Hz lowpass\n will result in at least a 120 Hz displayed sample rate).\n noise_cov : instance of Covariance | str | None\n Noise covariance used to whiten the data while plotting.\n Whitened data channels are scaled by ``scalings['whitened']``,\n and their channel names are shown in italic.\n Can be a string to load a covariance from disk.\n See also :meth:`mne.Evoked.plot_white` for additional inspection\n of noise covariance properties when whitening evoked data.\n For data processed with SSS, the effective dependence between\n magnetometers and gradiometers may introduce differences in scaling,\n consider using :meth:`mne.Evoked.plot_white`.\n\n .. versionadded:: 0.16.0\n event_id : dict | None\n Event IDs used to show at event markers (default None shows\n the event numbers).\n\n .. versionadded:: 0.16.0\n %(show_scrollbars)s\n %(show_scalebars)s\n\n .. versionadded:: 0.20.0\n %(time_format)s\n %(precompute)s\n %(use_opengl)s\n %(theme_pg)s\n\n .. versionadded:: 1.0\n %(verbose)s\n\n Returns\n -------\n fig : matplotlib.figure.Figure | ``PyQt5.QtWidgets.QMainWindow``\n Browser instance.\n\n Notes\n -----\n The arrow keys (up/down/left/right) can typically be used to navigate\n between channels and time ranges, but this depends on the backend\n matplotlib is configured to use (e.g., mpl.use('TkAgg') should work). The\n left/right arrows will scroll by 25%% of ``duration``, whereas\n shift+left/shift+right will scroll by 100%% of ``duration``. The scaling\n can be adjusted with - and + (or =) keys. The viewport dimensions can be\n adjusted with page up/page down and home/end keys. Full screen mode can be\n toggled with the F11 key, and scrollbars can be hidden/shown by pressing\n 'z'. Right-click a channel label to view its location. To mark or un-mark a\n channel as bad, click on a channel label or a channel trace. The changes\n will be reflected immediately in the raw object's ``raw.info['bads']``\n entry.\n\n If projectors are present, a button labelled \"Prj\" in the lower right\n corner of the plot window opens a secondary control window, which allows\n enabling/disabling specific projectors individually. This provides a means\n of interactively observing how each projector would affect the raw data if\n it were applied.\n\n Annotation mode is toggled by pressing 'a', butterfly mode by pressing\n 'b', and whitening mode (when ``noise_cov is not None``) by pressing 'w'.\n By default, the channel means are removed when ``remove_dc`` is set to\n ``True``. This flag can be toggled by pressing 'd'.\n\n .. note:: For the Qt backend to run in IPython with ``block=False``\n you must run the magic command ``%%gui qt5`` first.\n .. note:: To report issues with the qt-backend, please use the\n `issues <https://github.com/mne-tools/mne-qt-browser/issues>`_\n of ``mne-qt-browser``.\n \"\"\"\n from ..io.base import BaseRaw\n from ._figure import _get_browser\n\n info = raw.info.copy()\n sfreq = info['sfreq']\n projs = info['projs']\n # this will be an attr for which projectors are currently \"on\" in the plot\n projs_on = np.full_like(projs, proj, dtype=bool)\n # disable projs in info if user doesn't want to see them right away\n if not proj:\n with info._unlock():\n info['projs'] = list()\n\n # handle defaults / check arg validity\n color = _handle_default('color', color)\n scalings = _compute_scalings(scalings, raw, remove_dc=remove_dc,\n duration=duration)\n if scalings['whitened'] == 'auto':\n scalings['whitened'] = 1.\n _validate_type(raw, BaseRaw, 'raw', 'Raw')\n decim, picks_data = _handle_decim(info, decim, lowpass)\n noise_cov = _check_cov(noise_cov, info)\n units = _handle_default('units', None)\n unit_scalings = _handle_default('scalings', None)\n _check_option('group_by', group_by,\n ('selection', 'position', 'original', 'type'))\n\n # clipping\n _validate_type(clipping, (None, 'numeric', str), 'clipping')\n if isinstance(clipping, str):\n _check_option('clipping', clipping, ('clamp', 'transparent'),\n extra='when a string')\n clipping = 1. if clipping == 'transparent' else clipping\n elif clipping is not None:\n clipping = float(clipping)\n\n # be forgiving if user asks for too much time\n duration = min(raw.times[-1], float(duration))\n\n # determine IIR filtering parameters\n if highpass is not None and highpass <= 0:\n raise ValueError(f'highpass must be > 0, got {highpass}')\n if highpass is None and lowpass is None:\n ba = filt_bounds = None\n else:\n filtorder = int(filtorder)\n if filtorder == 0:\n method = 'fir'\n iir_params = None\n else:\n method = 'iir'\n iir_params = dict(order=filtorder, output='sos', ftype='butter')\n ba = create_filter(np.zeros((1, int(round(duration * sfreq)))),\n sfreq, highpass, lowpass, method=method,\n iir_params=iir_params)\n filt_bounds = _annotations_starts_stops(\n raw, ('edge', 'bad_acq_skip'), invert=True)\n\n # compute event times in seconds\n if events is not None:\n event_times = (events[:, 0] - raw.first_samp).astype(float)\n event_times /= sfreq\n event_nums = events[:, 2]\n else:\n event_times = event_nums = None\n\n # determine trace order\n ch_names = np.array(raw.ch_names)\n ch_types = np.array(raw.get_channel_types())\n order = _get_channel_plotting_order(order, ch_types)\n n_channels = min(info['nchan'], n_channels, len(order))\n # adjust order based on channel selection, if needed\n selections = None\n if group_by in ('selection', 'position'):\n selections = _setup_channel_selections(raw, group_by, order)\n order = np.concatenate(list(selections.values()))\n default_selection = list(selections)[0]\n n_channels = len(selections[default_selection])\n\n # handle event colors\n event_color_dict = _make_event_color_dict(event_color, events, event_id)\n\n # handle first_samp\n first_time = raw._first_time if show_first_samp else 0\n start += first_time\n event_id_rev = {v: k for k, v in (event_id or {}).items()}\n\n # generate window title; allow instances without a filename (e.g., ICA)\n if title is None:\n title = '<unknown>'\n fnames = raw._filenames.copy()\n if len(fnames):\n title = fnames.pop(0)\n extra = f' ... (+ {len(fnames)} more)' if len(fnames) else ''\n title = f'{title}{extra}'\n if len(title) > 60:\n title = _shorten_path_from_middle(title)\n elif not isinstance(title, str):\n raise TypeError(f'title must be None or a string, got a {type(title)}')\n\n # gather parameters and initialize figure\n _validate_type(use_opengl, (bool, None), 'use_opengl')\n precompute = _handle_precompute(precompute)\n params = dict(inst=raw,\n info=info,\n # channels and channel order\n ch_names=ch_names,\n ch_types=ch_types,\n ch_order=order,\n picks=order[:n_channels],\n n_channels=n_channels,\n picks_data=picks_data,\n group_by=group_by,\n ch_selections=selections,\n # time\n t_start=start,\n duration=duration,\n n_times=raw.n_times,\n first_time=first_time,\n time_format=time_format,\n decim=decim,\n # events\n event_color_dict=event_color_dict,\n event_times=event_times,\n event_nums=event_nums,\n event_id_rev=event_id_rev,\n # preprocessing\n projs=projs,\n projs_on=projs_on,\n apply_proj=proj,\n remove_dc=remove_dc,\n filter_coefs=ba,\n filter_bounds=filt_bounds,\n noise_cov=noise_cov,\n # scalings\n scalings=scalings,\n units=units,\n unit_scalings=unit_scalings,\n # colors\n ch_color_bad=bad_color,\n ch_color_dict=color,\n # display\n butterfly=butterfly,\n clipping=clipping,\n scrollbars_visible=show_scrollbars,\n scalebars_visible=show_scalebars,\n window_title=title,\n bgcolor=bgcolor,\n # Qt-specific\n precompute=precompute,\n use_opengl=use_opengl,\n theme=theme)\n\n fig = _get_browser(show=show, block=block, **params)\n\n return fig\n\n\n@verbose\ndef plot_raw_psd(raw, fmin=0, fmax=np.inf, tmin=None, tmax=None, proj=False,\n n_fft=None, n_overlap=0, reject_by_annotation=True,\n picks=None, ax=None, color='black', xscale='linear',\n area_mode='std', area_alpha=0.33, dB=True, estimate='auto',\n show=True, n_jobs=1, average=False, line_alpha=None,\n spatial_colors=True, sphere=None, window='hamming',\n exclude='bads', verbose=None):\n \"\"\"%(plot_psd_doc)s.\n\n Parameters\n ----------\n raw : instance of Raw\n The raw object.\n fmin : float\n Start frequency to consider.\n fmax : float\n End frequency to consider.\n tmin : float | None\n Start time to consider.\n tmax : float | None\n End time to consider.\n proj : bool\n Apply projection.\n n_fft : int | None\n Number of points to use in Welch FFT calculations.\n Default is None, which uses the minimum of 2048 and the\n number of time points.\n n_overlap : int\n The number of points of overlap between blocks. The default value\n is 0 (no overlap).\n %(reject_by_annotation_raw)s\n %(picks_plot_psd_good_data)s\n ax : instance of Axes | None\n Axes to plot into. If None, axes will be created.\n %(color_plot_psd)s\n %(xscale_plot_psd)s\n %(area_mode_plot_psd)s\n %(area_alpha_plot_psd)s\n %(dB_plot_psd)s\n %(estimate_plot_psd)s\n %(show)s\n %(n_jobs)s\n %(average_plot_psd)s\n %(line_alpha_plot_psd)s\n %(spatial_colors_plot_psd)s\n %(sphere_topomap_auto)s\n %(window_psd)s\n\n .. versionadded:: 0.22.0\n exclude : list of str | 'bads'\n Channels names to exclude from being shown. If 'bads', the bad channels\n are excluded. Pass an empty list to plot all channels (including\n channels marked \"bad\", if any).\n\n .. versionadded:: 0.24.0\n %(verbose)s\n\n Returns\n -------\n fig : instance of Figure\n Figure with frequency spectra of the data channels.\n \"\"\"\n from ._mpl_figure import _psd_figure\n # handle FFT\n if n_fft is None:\n if tmax is None or not np.isfinite(tmax):\n tmax = raw.times[-1]\n tmin = 0. if tmin is None else tmin\n n_fft = min(np.diff(raw.time_as_index([tmin, tmax]))[0] + 1, 2048)\n # generate figure\n fig = _psd_figure(\n inst=raw, proj=proj, picks=picks, axes=ax, tmin=tmin, tmax=tmax,\n fmin=fmin, fmax=fmax, sphere=sphere, xscale=xscale, dB=dB,\n average=average, estimate=estimate, area_mode=area_mode,\n line_alpha=line_alpha, area_alpha=area_alpha, color=color,\n spatial_colors=spatial_colors, n_jobs=n_jobs, n_fft=n_fft,\n n_overlap=n_overlap, reject_by_annotation=reject_by_annotation,\n window=window, exclude=exclude)\n plt_show(show)\n return fig\n\n\n@verbose\ndef plot_raw_psd_topo(raw, tmin=0., tmax=None, fmin=0., fmax=100., proj=False,\n n_fft=2048, n_overlap=0, layout=None, color='w',\n fig_facecolor='k', axis_facecolor='k', dB=True,\n show=True, block=False, n_jobs=1, axes=None,\n verbose=None):\n \"\"\"Plot channel-wise frequency spectra as topography.\n\n Parameters\n ----------\n raw : instance of io.Raw\n The raw instance to use.\n tmin : float\n Start time for calculations. Defaults to zero.\n tmax : float | None\n End time for calculations. If None (default), the end of data is used.\n fmin : float\n Start frequency to consider. Defaults to zero.\n fmax : float\n End frequency to consider. Defaults to 100.\n proj : bool\n Apply projection. Defaults to False.\n n_fft : int\n Number of points to use in Welch FFT calculations. Defaults to 2048.\n n_overlap : int\n The number of points of overlap between blocks. Defaults to 0\n (no overlap).\n layout : instance of Layout | None\n Layout instance specifying sensor positions (does not need to be\n specified for Neuromag data). If None (default), the correct layout is\n inferred from the data.\n color : str | tuple\n A matplotlib-compatible color to use for the curves. Defaults to white.\n fig_facecolor : str | tuple\n A matplotlib-compatible color to use for the figure background.\n Defaults to black.\n axis_facecolor : str | tuple\n A matplotlib-compatible color to use for the axis background.\n Defaults to black.\n dB : bool\n If True, transform data to decibels. Defaults to True.\n show : bool\n Show figure if True. Defaults to True.\n block : bool\n Whether to halt program execution until the figure is closed.\n May not work on all systems / platforms. Defaults to False.\n %(n_jobs)s\n axes : instance of matplotlib Axes | None\n Axes to plot into. If None, axes will be created.\n %(verbose)s\n\n Returns\n -------\n fig : instance of matplotlib.figure.Figure\n Figure distributing one image per channel across sensor topography.\n \"\"\"\n if layout is None:\n from ..channels.layout import find_layout\n layout = find_layout(raw.info)\n\n psds, freqs = psd_welch(raw, tmin=tmin, tmax=tmax, fmin=fmin,\n fmax=fmax, proj=proj, n_fft=n_fft,\n n_overlap=n_overlap, n_jobs=n_jobs)\n if dB:\n psds = 10 * np.log10(psds)\n y_label = 'dB'\n else:\n y_label = 'Power'\n show_func = partial(_plot_timeseries_unified, data=[psds], color=color,\n times=[freqs])\n click_func = partial(_plot_timeseries, data=[psds], color=color,\n times=[freqs])\n picks = _pick_data_channels(raw.info)\n info = pick_info(raw.info, picks)\n\n fig = _plot_topo(info, times=freqs, show_func=show_func,\n click_func=click_func, layout=layout,\n axis_facecolor=axis_facecolor,\n fig_facecolor=fig_facecolor, x_label='Frequency (Hz)',\n unified=True, y_label=y_label, axes=axes)\n\n try:\n plt_show(show, block=block)\n except TypeError: # not all versions have this\n plt_show(show)\n return fig\n\n\ndef _setup_channel_selections(raw, kind, order):\n \"\"\"Get dictionary of channel groupings.\"\"\"\n from ..channels import (read_vectorview_selection, _SELECTIONS,\n _EEG_SELECTIONS, _divide_to_regions)\n from ..utils import _get_stim_channel\n _check_option('group_by', kind, ('position', 'selection'))\n if kind == 'position':\n selections_dict = _divide_to_regions(raw.info)\n keys = _SELECTIONS[1:] # omit 'Vertex'\n else: # kind == 'selection'\n from ..channels.channels import _get_ch_info\n (has_vv_mag, has_vv_grad, *_, has_neuromag_122_grad, has_csd_coils\n ) = _get_ch_info(raw.info)\n if not (has_vv_grad or has_vv_mag or has_neuromag_122_grad):\n raise ValueError(\"order='selection' only works for Neuromag \"\n \"data. Use order='position' instead.\")\n selections_dict = OrderedDict()\n # get stim channel (if any)\n stim_ch = _get_stim_channel(None, raw.info, raise_error=False)\n stim_ch = stim_ch if len(stim_ch) else ['']\n stim_ch = pick_channels(raw.ch_names, stim_ch)\n # loop over regions\n keys = np.concatenate([_SELECTIONS, _EEG_SELECTIONS])\n for key in keys:\n channels = read_vectorview_selection(key, info=raw.info)\n picks = pick_channels(raw.ch_names, channels)\n picks = np.intersect1d(picks, order)\n if not len(picks):\n continue # omit empty selections\n selections_dict[key] = np.concatenate([picks, stim_ch])\n # add misc channels\n misc = pick_types(raw.info, meg=False, eeg=False, stim=True, eog=True,\n ecg=True, emg=True, ref_meg=False, misc=True,\n resp=True, chpi=True, exci=True, ias=True, syst=True,\n seeg=False, bio=True, ecog=False, fnirs=False, dbs=False,\n exclude=())\n if len(misc) and np.in1d(misc, order).any():\n selections_dict['Misc'] = misc\n return selections_dict\n",
"path": "mne/viz/raw.py"
}
] | diff --git a/mne/viz/raw.py b/mne/viz/raw.py
index 5f238139a68..07fe8aeac74 100644
--- a/mne/viz/raw.py
+++ b/mne/viz/raw.py
@@ -351,7 +351,8 @@ def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=20,
bgcolor=bgcolor,
# Qt-specific
precompute=precompute,
- use_opengl=use_opengl)
+ use_opengl=use_opengl,
+ theme=theme)
fig = _get_browser(show=show, block=block, **params)
|
conda__conda-5009 | When lacking permissions to write, clone message should quote prefix.
When trying to install a new package into a location that the user lacks write permissions (read-only root), conda helpfully suggests cloning the environment into a new location:
```
CondaIOError: IO error: Missing write permissions in: C:\Program Files\Anaconda
#
# You don't appear to have the necessary permissions to install packages
# into the install area 'C:\Program Files\Anaconda'.
# However you can clone this environment into your home directory and
# then make changes to it.
# This may be done using the command:
#
# $ conda create -n my_deathstar --clone=C:\Program Files\Anaconda\envs\deathstar
```
As shown in the example above, this clone path may include spaces. This will be particularly common on Windows, where a global install will result in files written to Program Files, which a non-administrator user will not be able to write to, and contains spaces. Because the command presents a prefix, it should be quoted to guard against this case.
| [
{
"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom os.path import join\n\nfrom .common import name_prefix\nfrom ..base.context import context\nfrom ..exceptions import CondaIOError\n\n\ndef read_message(fn):\n res = []\n for envs_dir in context.envs_dirs:\n path = join(envs_dir, '.conda-help', fn)\n try:\n with open(path) as fi:\n s = fi.read().decode('utf-8')\n s = s.replace('${envs_dir}', envs_dir)\n res.append(s)\n except IOError:\n pass\n return ''.join(res)\n\n\ndef root_read_only(command, prefix, json=False):\n assert command in {'install', 'update', 'remove'}\n\n msg = read_message('ro.txt')\n if not msg:\n msg = \"\"\"\\\nMissing write permissions in: ${root_dir}\n#\n# You don't appear to have the necessary permissions to ${command} packages\n# into the install area '${root_dir}'.\n# However you can clone this environment into your home directory and\n# then make changes to it.\n# This may be done using the command:\n#\n# $ conda create -n my_${name} --clone=${prefix}\n\"\"\"\n msg = msg.replace('${root_dir}', context.root_prefix)\n msg = msg.replace('${prefix}', prefix)\n msg = msg.replace('${name}', name_prefix(prefix))\n msg = msg.replace('${command}', command)\n raise CondaIOError(msg)\n",
"path": "conda/cli/help.py"
}
] | [
{
"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom os.path import join\n\nfrom .common import name_prefix\nfrom ..base.context import context\nfrom ..exceptions import CondaIOError\n\n\ndef read_message(fn):\n res = []\n for envs_dir in context.envs_dirs:\n path = join(envs_dir, '.conda-help', fn)\n try:\n with open(path) as fi:\n s = fi.read().decode('utf-8')\n s = s.replace('${envs_dir}', envs_dir)\n res.append(s)\n except IOError:\n pass\n return ''.join(res)\n\n\ndef root_read_only(command, prefix, json=False):\n assert command in {'install', 'update', 'remove'}\n\n msg = read_message('ro.txt')\n if not msg:\n msg = \"\"\"\\\nMissing write permissions in: ${root_dir}\n#\n# You don't appear to have the necessary permissions to ${command} packages\n# into the install area '${root_dir}'.\n# However you can clone this environment into your home directory and\n# then make changes to it.\n# This may be done using the command:\n#\n# $ conda create -n my_${name} --clone=\"${prefix}\"\n\"\"\"\n msg = msg.replace('${root_dir}', context.root_prefix)\n msg = msg.replace('${prefix}', prefix)\n msg = msg.replace('${name}', name_prefix(prefix))\n msg = msg.replace('${command}', command)\n raise CondaIOError(msg)\n",
"path": "conda/cli/help.py"
}
] | diff --git a/conda/cli/help.py b/conda/cli/help.py
index 855c2d3ea7d..278681a4670 100644
--- a/conda/cli/help.py
+++ b/conda/cli/help.py
@@ -35,7 +35,7 @@ def root_read_only(command, prefix, json=False):
# then make changes to it.
# This may be done using the command:
#
-# $ conda create -n my_${name} --clone=${prefix}
+# $ conda create -n my_${name} --clone="${prefix}"
"""
msg = msg.replace('${root_dir}', context.root_prefix)
msg = msg.replace('${prefix}', prefix)
|
microsoft__torchgeo-1433 | USAVars Augmentation maps to 0
### Description
In the USAVars Datamodule, the default augmentation from NonGeoDatamodule is used. However, the dataset returns uint8 data, and it comes out of the augmentation still as uint8. This means you get an error when trying to train but also that your input images are just all zeros.
### Steps to reproduce
```
dm = USAVarsDataModule(root="path/to/usa_vars", batch_size=16)
dm.setup("fit")
dl = dm.train_dataloader()
batch = next(iter(dl))
aug_batch = dm.aug(batch)
print(aug_batch["image"].max())
```
### Version
'0.5.0.dev0'
| [
{
"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"USAVars dataset.\"\"\"\n\nimport glob\nimport os\nfrom collections.abc import Sequence\nfrom typing import Callable, Optional\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport rasterio\nimport torch\nfrom matplotlib.figure import Figure\nfrom torch import Tensor\n\nfrom .geo import NonGeoDataset\nfrom .utils import download_url, extract_archive\n\n\nclass USAVars(NonGeoDataset):\n \"\"\"USAVars dataset.\n\n The USAVars dataset is reproduction of the dataset used in the paper \"`A\n generalizable and accessible approach to machine learning with global satellite\n imagery <https://doi.org/10.1038/s41467-021-24638-z>`_\". Specifically, this dataset\n includes 1 sq km. crops of NAIP imagery resampled to 4m/px cenetered on ~100k points\n that are sampled randomly from the contiguous states in the USA. Each point contains\n three continuous valued labels (taken from the dataset released in the paper): tree\n cover percentage, elevation, and population density.\n\n Dataset format:\n\n * images are 4-channel GeoTIFFs\n * labels are singular float values\n\n Dataset labels:\n\n * tree cover\n * elevation\n * population density\n\n If you use this dataset in your research, please cite the following paper:\n\n * https://doi.org/10.1038/s41467-021-24638-z\n\n .. versionadded:: 0.3\n \"\"\"\n\n url_prefix = (\n \"https://files.codeocean.com/files/verified/\"\n + \"fa908bbc-11f9-4421-8bd3-72a4bf00427f_v2.0/data/int/applications\"\n )\n pop_csv_suffix = \"CONTUS_16_640_POP_100000_0.csv?download\"\n uar_csv_suffix = \"CONTUS_16_640_UAR_100000_0.csv?download\"\n\n data_url = \"https://mosaiks.blob.core.windows.net/datasets/uar.zip\"\n dirname = \"uar\"\n\n md5 = \"677e89fd20e5dd0fe4d29b61827c2456\"\n\n label_urls = {\n \"housing\": f\"{url_prefix}/housing/outcomes_sampled_housing_{pop_csv_suffix}\",\n \"income\": f\"{url_prefix}/income/outcomes_sampled_income_{pop_csv_suffix}\",\n \"roads\": f\"{url_prefix}/roads/outcomes_sampled_roads_{pop_csv_suffix}\",\n \"nightlights\": f\"{url_prefix}/nightlights/\"\n + f\"outcomes_sampled_nightlights_{pop_csv_suffix}\",\n \"population\": f\"{url_prefix}/population/\"\n + f\"outcomes_sampled_population_{uar_csv_suffix}\",\n \"elevation\": f\"{url_prefix}/elevation/\"\n + f\"outcomes_sampled_elevation_{uar_csv_suffix}\",\n \"treecover\": f\"{url_prefix}/treecover/\"\n + f\"outcomes_sampled_treecover_{uar_csv_suffix}\",\n }\n\n split_metadata = {\n \"train\": {\n \"url\": \"https://mosaiks.blob.core.windows.net/datasets/train_split.txt\",\n \"filename\": \"train_split.txt\",\n \"md5\": \"3f58fffbf5fe177611112550297200e7\",\n },\n \"val\": {\n \"url\": \"https://mosaiks.blob.core.windows.net/datasets/val_split.txt\",\n \"filename\": \"val_split.txt\",\n \"md5\": \"bca7183b132b919dec0fc24fb11662a0\",\n },\n \"test\": {\n \"url\": \"https://mosaiks.blob.core.windows.net/datasets/test_split.txt\",\n \"filename\": \"test_split.txt\",\n \"md5\": \"97bb36bc003ae0bf556a8d6e8f77141a\",\n },\n }\n\n ALL_LABELS = [\"treecover\", \"elevation\", \"population\"]\n\n def __init__(\n self,\n root: str = \"data\",\n split: str = \"train\",\n labels: Sequence[str] = ALL_LABELS,\n transforms: Optional[Callable[[dict[str, Tensor]], dict[str, Tensor]]] = None,\n download: bool = False,\n checksum: bool = False,\n ) -> None:\n \"\"\"Initialize a new USAVars dataset instance.\n\n Args:\n root: root directory where dataset can be found\n split: train/val/test split to load\n labels: list of labels to include\n transforms: a function/transform that takes input sample and its target as\n entry and returns a transformed version\n download: if True, download dataset and store it in the root directory\n checksum: if True, check the MD5 of the downloaded files (may be slow)\n\n Raises:\n AssertionError: if invalid labels are provided\n ImportError: if pandas is not installed\n RuntimeError: if ``download=False`` and data is not found, or checksums\n don't match\n \"\"\"\n self.root = root\n\n assert split in self.split_metadata\n self.split = split\n\n for lab in labels:\n assert lab in self.ALL_LABELS\n\n self.labels = labels\n self.transforms = transforms\n self.download = download\n self.checksum = checksum\n\n self._verify()\n\n try:\n import pandas as pd # noqa: F401\n except ImportError:\n raise ImportError(\n \"pandas is not installed and is required to use this dataset\"\n )\n\n self.files = self._load_files()\n\n self.label_dfs = {\n lab: pd.read_csv(os.path.join(self.root, lab + \".csv\"), index_col=\"ID\")\n for lab in self.labels\n }\n\n def __getitem__(self, index: int) -> dict[str, Tensor]:\n \"\"\"Return an index within the dataset.\n\n Args:\n index: index to return\n\n Returns:\n data and label at that index\n \"\"\"\n tif_file = self.files[index]\n id_ = tif_file[5:-4]\n\n sample = {\n \"labels\": Tensor(\n [self.label_dfs[lab].loc[id_][lab] for lab in self.labels]\n ),\n \"image\": self._load_image(os.path.join(self.root, \"uar\", tif_file)),\n \"centroid_lat\": Tensor([self.label_dfs[self.labels[0]].loc[id_][\"lat\"]]),\n \"centroid_lon\": Tensor([self.label_dfs[self.labels[0]].loc[id_][\"lon\"]]),\n }\n\n if self.transforms is not None:\n sample = self.transforms(sample)\n\n return sample\n\n def __len__(self) -> int:\n \"\"\"Return the number of data points in the dataset.\n\n Returns:\n length of the dataset\n \"\"\"\n return len(self.files)\n\n def _load_files(self) -> list[str]:\n \"\"\"Loads file names.\"\"\"\n with open(os.path.join(self.root, f\"{self.split}_split.txt\")) as f:\n files = f.read().splitlines()\n return files\n\n def _load_image(self, path: str) -> Tensor:\n \"\"\"Load a single image.\n\n Args:\n path: path to the image\n\n Returns:\n the image\n \"\"\"\n with rasterio.open(path) as f:\n array: \"np.typing.NDArray[np.int_]\" = f.read()\n tensor = torch.from_numpy(array)\n return tensor\n\n def _verify(self) -> None:\n \"\"\"Verify the integrity of the dataset.\n\n Raises:\n RuntimeError: if ``download=False`` but dataset is missing or checksum fails\n \"\"\"\n # Check if the extracted files already exist\n pathname = os.path.join(self.root, \"uar\")\n csv_pathname = os.path.join(self.root, \"*.csv\")\n split_pathname = os.path.join(self.root, \"*_split.txt\")\n\n csv_split_count = (len(glob.glob(csv_pathname)), len(glob.glob(split_pathname)))\n if glob.glob(pathname) and csv_split_count == (7, 3):\n return\n\n # Check if the zip files have already been downloaded\n pathname = os.path.join(self.root, self.dirname + \".zip\")\n if glob.glob(pathname) and csv_split_count == (7, 3):\n self._extract()\n return\n\n # Check if the user requested to download the dataset\n if not self.download:\n raise RuntimeError(\n f\"Dataset not found in `root={self.root}` and `download=False`, \"\n \"either specify a different `root` directory or use `download=True` \"\n \"to automatically download the dataset.\"\n )\n\n self._download()\n self._extract()\n\n def _download(self) -> None:\n \"\"\"Download the dataset.\"\"\"\n for f_name in self.label_urls:\n download_url(self.label_urls[f_name], self.root, filename=f_name + \".csv\")\n\n download_url(self.data_url, self.root, md5=self.md5 if self.checksum else None)\n\n for metadata in self.split_metadata.values():\n download_url(\n metadata[\"url\"],\n self.root,\n md5=metadata[\"md5\"] if self.checksum else None,\n )\n\n def _extract(self) -> None:\n \"\"\"Extract the dataset.\"\"\"\n extract_archive(os.path.join(self.root, self.dirname + \".zip\"))\n\n def plot(\n self,\n sample: dict[str, Tensor],\n show_labels: bool = True,\n suptitle: Optional[str] = None,\n ) -> Figure:\n \"\"\"Plot a sample from the dataset.\n\n Args:\n sample: a sample returned by :meth:`__getitem__`\n show_labels: flag indicating whether to show labels above panel\n suptitle: optional string to use as a suptitle\n\n Returns:\n a matplotlib Figure with the rendered sample\n \"\"\"\n image = sample[\"image\"][:3].numpy() # get RGB inds\n image = np.moveaxis(image, 0, 2)\n\n fig, axs = plt.subplots(figsize=(10, 10))\n axs.imshow(image)\n axs.axis(\"off\")\n\n if show_labels:\n labels = [(lab, val) for lab, val in sample.items() if lab != \"image\"]\n label_string = \"\"\n for lab, val in labels:\n label_string += f\"{lab}={round(val[0].item(), 2)} \"\n axs.set_title(label_string)\n\n if suptitle is not None:\n plt.suptitle(suptitle)\n\n return fig\n",
"path": "torchgeo/datasets/usavars.py"
}
] | [
{
"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"USAVars dataset.\"\"\"\n\nimport glob\nimport os\nfrom collections.abc import Sequence\nfrom typing import Callable, Optional\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport rasterio\nimport torch\nfrom matplotlib.figure import Figure\nfrom torch import Tensor\n\nfrom .geo import NonGeoDataset\nfrom .utils import download_url, extract_archive\n\n\nclass USAVars(NonGeoDataset):\n \"\"\"USAVars dataset.\n\n The USAVars dataset is reproduction of the dataset used in the paper \"`A\n generalizable and accessible approach to machine learning with global satellite\n imagery <https://doi.org/10.1038/s41467-021-24638-z>`_\". Specifically, this dataset\n includes 1 sq km. crops of NAIP imagery resampled to 4m/px cenetered on ~100k points\n that are sampled randomly from the contiguous states in the USA. Each point contains\n three continuous valued labels (taken from the dataset released in the paper): tree\n cover percentage, elevation, and population density.\n\n Dataset format:\n\n * images are 4-channel GeoTIFFs\n * labels are singular float values\n\n Dataset labels:\n\n * tree cover\n * elevation\n * population density\n\n If you use this dataset in your research, please cite the following paper:\n\n * https://doi.org/10.1038/s41467-021-24638-z\n\n .. versionadded:: 0.3\n \"\"\"\n\n url_prefix = (\n \"https://files.codeocean.com/files/verified/\"\n + \"fa908bbc-11f9-4421-8bd3-72a4bf00427f_v2.0/data/int/applications\"\n )\n pop_csv_suffix = \"CONTUS_16_640_POP_100000_0.csv?download\"\n uar_csv_suffix = \"CONTUS_16_640_UAR_100000_0.csv?download\"\n\n data_url = \"https://mosaiks.blob.core.windows.net/datasets/uar.zip\"\n dirname = \"uar\"\n\n md5 = \"677e89fd20e5dd0fe4d29b61827c2456\"\n\n label_urls = {\n \"housing\": f\"{url_prefix}/housing/outcomes_sampled_housing_{pop_csv_suffix}\",\n \"income\": f\"{url_prefix}/income/outcomes_sampled_income_{pop_csv_suffix}\",\n \"roads\": f\"{url_prefix}/roads/outcomes_sampled_roads_{pop_csv_suffix}\",\n \"nightlights\": f\"{url_prefix}/nightlights/\"\n + f\"outcomes_sampled_nightlights_{pop_csv_suffix}\",\n \"population\": f\"{url_prefix}/population/\"\n + f\"outcomes_sampled_population_{uar_csv_suffix}\",\n \"elevation\": f\"{url_prefix}/elevation/\"\n + f\"outcomes_sampled_elevation_{uar_csv_suffix}\",\n \"treecover\": f\"{url_prefix}/treecover/\"\n + f\"outcomes_sampled_treecover_{uar_csv_suffix}\",\n }\n\n split_metadata = {\n \"train\": {\n \"url\": \"https://mosaiks.blob.core.windows.net/datasets/train_split.txt\",\n \"filename\": \"train_split.txt\",\n \"md5\": \"3f58fffbf5fe177611112550297200e7\",\n },\n \"val\": {\n \"url\": \"https://mosaiks.blob.core.windows.net/datasets/val_split.txt\",\n \"filename\": \"val_split.txt\",\n \"md5\": \"bca7183b132b919dec0fc24fb11662a0\",\n },\n \"test\": {\n \"url\": \"https://mosaiks.blob.core.windows.net/datasets/test_split.txt\",\n \"filename\": \"test_split.txt\",\n \"md5\": \"97bb36bc003ae0bf556a8d6e8f77141a\",\n },\n }\n\n ALL_LABELS = [\"treecover\", \"elevation\", \"population\"]\n\n def __init__(\n self,\n root: str = \"data\",\n split: str = \"train\",\n labels: Sequence[str] = ALL_LABELS,\n transforms: Optional[Callable[[dict[str, Tensor]], dict[str, Tensor]]] = None,\n download: bool = False,\n checksum: bool = False,\n ) -> None:\n \"\"\"Initialize a new USAVars dataset instance.\n\n Args:\n root: root directory where dataset can be found\n split: train/val/test split to load\n labels: list of labels to include\n transforms: a function/transform that takes input sample and its target as\n entry and returns a transformed version\n download: if True, download dataset and store it in the root directory\n checksum: if True, check the MD5 of the downloaded files (may be slow)\n\n Raises:\n AssertionError: if invalid labels are provided\n ImportError: if pandas is not installed\n RuntimeError: if ``download=False`` and data is not found, or checksums\n don't match\n \"\"\"\n self.root = root\n\n assert split in self.split_metadata\n self.split = split\n\n for lab in labels:\n assert lab in self.ALL_LABELS\n\n self.labels = labels\n self.transforms = transforms\n self.download = download\n self.checksum = checksum\n\n self._verify()\n\n try:\n import pandas as pd # noqa: F401\n except ImportError:\n raise ImportError(\n \"pandas is not installed and is required to use this dataset\"\n )\n\n self.files = self._load_files()\n\n self.label_dfs = {\n lab: pd.read_csv(os.path.join(self.root, lab + \".csv\"), index_col=\"ID\")\n for lab in self.labels\n }\n\n def __getitem__(self, index: int) -> dict[str, Tensor]:\n \"\"\"Return an index within the dataset.\n\n Args:\n index: index to return\n\n Returns:\n data and label at that index\n \"\"\"\n tif_file = self.files[index]\n id_ = tif_file[5:-4]\n\n sample = {\n \"labels\": Tensor(\n [self.label_dfs[lab].loc[id_][lab] for lab in self.labels]\n ),\n \"image\": self._load_image(os.path.join(self.root, \"uar\", tif_file)),\n \"centroid_lat\": Tensor([self.label_dfs[self.labels[0]].loc[id_][\"lat\"]]),\n \"centroid_lon\": Tensor([self.label_dfs[self.labels[0]].loc[id_][\"lon\"]]),\n }\n\n if self.transforms is not None:\n sample = self.transforms(sample)\n\n return sample\n\n def __len__(self) -> int:\n \"\"\"Return the number of data points in the dataset.\n\n Returns:\n length of the dataset\n \"\"\"\n return len(self.files)\n\n def _load_files(self) -> list[str]:\n \"\"\"Loads file names.\"\"\"\n with open(os.path.join(self.root, f\"{self.split}_split.txt\")) as f:\n files = f.read().splitlines()\n return files\n\n def _load_image(self, path: str) -> Tensor:\n \"\"\"Load a single image.\n\n Args:\n path: path to the image\n\n Returns:\n the image\n \"\"\"\n with rasterio.open(path) as f:\n array: \"np.typing.NDArray[np.int_]\" = f.read()\n tensor = torch.from_numpy(array).float()\n return tensor\n\n def _verify(self) -> None:\n \"\"\"Verify the integrity of the dataset.\n\n Raises:\n RuntimeError: if ``download=False`` but dataset is missing or checksum fails\n \"\"\"\n # Check if the extracted files already exist\n pathname = os.path.join(self.root, \"uar\")\n csv_pathname = os.path.join(self.root, \"*.csv\")\n split_pathname = os.path.join(self.root, \"*_split.txt\")\n\n csv_split_count = (len(glob.glob(csv_pathname)), len(glob.glob(split_pathname)))\n if glob.glob(pathname) and csv_split_count == (7, 3):\n return\n\n # Check if the zip files have already been downloaded\n pathname = os.path.join(self.root, self.dirname + \".zip\")\n if glob.glob(pathname) and csv_split_count == (7, 3):\n self._extract()\n return\n\n # Check if the user requested to download the dataset\n if not self.download:\n raise RuntimeError(\n f\"Dataset not found in `root={self.root}` and `download=False`, \"\n \"either specify a different `root` directory or use `download=True` \"\n \"to automatically download the dataset.\"\n )\n\n self._download()\n self._extract()\n\n def _download(self) -> None:\n \"\"\"Download the dataset.\"\"\"\n for f_name in self.label_urls:\n download_url(self.label_urls[f_name], self.root, filename=f_name + \".csv\")\n\n download_url(self.data_url, self.root, md5=self.md5 if self.checksum else None)\n\n for metadata in self.split_metadata.values():\n download_url(\n metadata[\"url\"],\n self.root,\n md5=metadata[\"md5\"] if self.checksum else None,\n )\n\n def _extract(self) -> None:\n \"\"\"Extract the dataset.\"\"\"\n extract_archive(os.path.join(self.root, self.dirname + \".zip\"))\n\n def plot(\n self,\n sample: dict[str, Tensor],\n show_labels: bool = True,\n suptitle: Optional[str] = None,\n ) -> Figure:\n \"\"\"Plot a sample from the dataset.\n\n Args:\n sample: a sample returned by :meth:`__getitem__`\n show_labels: flag indicating whether to show labels above panel\n suptitle: optional string to use as a suptitle\n\n Returns:\n a matplotlib Figure with the rendered sample\n \"\"\"\n image = sample[\"image\"][:3].numpy() # get RGB inds\n image = np.moveaxis(image, 0, 2)\n\n fig, axs = plt.subplots(figsize=(10, 10))\n axs.imshow(image)\n axs.axis(\"off\")\n\n if show_labels:\n labels = [(lab, val) for lab, val in sample.items() if lab != \"image\"]\n label_string = \"\"\n for lab, val in labels:\n label_string += f\"{lab}={round(val[0].item(), 2)} \"\n axs.set_title(label_string)\n\n if suptitle is not None:\n plt.suptitle(suptitle)\n\n return fig\n",
"path": "torchgeo/datasets/usavars.py"
}
] | diff --git a/torchgeo/datasets/usavars.py b/torchgeo/datasets/usavars.py
index b27bffaaf4b..43369dfdcab 100644
--- a/torchgeo/datasets/usavars.py
+++ b/torchgeo/datasets/usavars.py
@@ -200,7 +200,7 @@ def _load_image(self, path: str) -> Tensor:
"""
with rasterio.open(path) as f:
array: "np.typing.NDArray[np.int_]" = f.read()
- tensor = torch.from_numpy(array)
+ tensor = torch.from_numpy(array).float()
return tensor
def _verify(self) -> None:
|
gammapy__gammapy-5254 | FluxPoints.write() is ignoring overwrite when file extension is not FITS
**Gammapy version**
1.2
**Bug description**
When writing a `FluxPoints` table (_.ecsv_) to file it is not possible to overwrite it.
**Expected behavior**
The `overwrite` argument should be taken into account.
**To Reproduce**
From any analysis get a `FluxPoints` instance (in my case it was from obtained from `LightCurveEstimator.run()`)
and call the `write()` method with `overwrite=True`.
You will get the following error,
`OSError: File xxxxxx.ecsv already exists. If you mean to replace it then use the argument "overwrite=True".`
**Other information**
The bug is here: https://github.com/gammapy/gammapy/blob/2475a006e02ac1497eccc362de61e68a5f7a10eb/gammapy/estimators/points/core.py#L270
It should be sufficient to pass the `overwrite` argument to `table.write()`, because the default (inherited from `astropy.table.Table.write()`) is `False`.
| [
{
"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport logging\nfrom copy import deepcopy\nimport numpy as np\nfrom scipy import stats\nfrom scipy.interpolate import interp1d\nfrom scipy.optimize import minimize\nfrom astropy.io import fits\nfrom astropy.io.registry import IORegistryError\nfrom astropy.table import Table, vstack\nfrom astropy.time import Time\nfrom astropy.visualization import quantity_support\nimport matplotlib.pyplot as plt\nfrom gammapy.data import GTI\nfrom gammapy.maps import Map, MapAxis, Maps, RegionNDMap, TimeMapAxis\nfrom gammapy.maps.axes import UNIT_STRING_FORMAT, flat_if_equal\nfrom gammapy.modeling.models import TemplateSpectralModel\nfrom gammapy.modeling.models.spectral import scale_plot_flux\nfrom gammapy.modeling.scipy import stat_profile_ul_scipy\nfrom gammapy.utils.interpolation import interpolate_profile\nfrom gammapy.utils.scripts import make_path\nfrom gammapy.utils.table import table_standardise_units_copy\nfrom gammapy.utils.time import time_ref_to_dict\nfrom ..map.core import DEFAULT_UNIT, FluxMaps\n\n__all__ = [\"FluxPoints\"]\n\nlog = logging.getLogger(__name__)\n\n\ndef squash_fluxpoints(flux_point, axis):\n \"\"\"Squash a FluxPoints object into one point.\n The log-likelihoods profiles in each bin are summed\n to compute the resultant quantities. Stat profiles\n must be present on the fluxpoints object for\n this method to work.\n \"\"\"\n\n value_scan = flux_point.stat_scan.geom.axes[\"norm\"].center\n stat_scan = np.sum(flux_point.stat_scan.data, axis=0).ravel()\n f = interp1d(value_scan, stat_scan, kind=\"quadratic\", bounds_error=False)\n f = interpolate_profile(value_scan, stat_scan)\n minimizer = minimize(\n f,\n x0=value_scan[int(len(value_scan) / 2)],\n bounds=[(value_scan[0], value_scan[-1])],\n method=\"L-BFGS-B\",\n )\n\n maps = Maps()\n geom = flux_point.geom.to_image()\n if axis.name != \"energy\":\n geom = geom.to_cube([flux_point.geom.axes[\"energy\"]])\n\n maps[\"norm\"] = Map.from_geom(geom, data=minimizer.x)\n maps[\"norm_err\"] = Map.from_geom(geom, data=np.sqrt(minimizer.hess_inv.todense()))\n maps[\"n_dof\"] = Map.from_geom(geom, data=flux_point.geom.axes[axis.name].nbin)\n\n if \"norm_ul\" in flux_point.available_quantities:\n delta_ts = flux_point.meta.get(\"n_sigma_ul\", 2) ** 2\n ul = stat_profile_ul_scipy(value_scan, stat_scan, delta_ts=delta_ts)\n maps[\"norm_ul\"] = Map.from_geom(geom, data=ul.value)\n\n maps[\"stat\"] = Map.from_geom(geom, data=f(minimizer.x))\n\n maps[\"stat_scan\"] = Map.from_geom(\n geom=geom.to_cube([MapAxis.from_nodes(value_scan, name=\"norm\")]), data=stat_scan\n )\n try:\n maps[\"stat_null\"] = Map.from_geom(geom, data=np.sum(flux_point.stat_null.data))\n maps[\"ts\"] = maps[\"stat_null\"] - maps[\"stat\"]\n except AttributeError:\n log.info(\n \"Stat null info not present on original FluxPoints object. TS not computed\"\n )\n\n maps[\"success\"] = Map.from_geom(geom=geom, data=minimizer.success, dtype=bool)\n\n combined_fp = FluxPoints.from_maps(\n maps=maps,\n sed_type=flux_point.sed_type_init,\n reference_model=flux_point.reference_model,\n gti=flux_point.gti,\n meta=flux_point.meta,\n )\n return combined_fp\n\n\nclass FluxPoints(FluxMaps):\n \"\"\"Flux points container.\n\n The supported formats are described here: :ref:`gadf:flux-points`.\n\n In summary, the following formats and minimum required columns are:\n\n * Format ``dnde``: columns ``e_ref`` and ``dnde``\n * Format ``e2dnde``: columns ``e_ref``, ``e2dnde``\n * Format ``flux``: columns ``e_min``, ``e_max``, ``flux``\n * Format ``eflux``: columns ``e_min``, ``e_max``, ``eflux``\n\n Parameters\n ----------\n table : `~astropy.table.Table`\n Table with flux point data.\n\n Attributes\n ----------\n table : `~astropy.table.Table`\n Table with flux point data.\n\n Examples\n --------\n The `FluxPoints` object is most easily created by reading a file with\n flux points given in one of the formats documented above::\n\n >>> from gammapy.estimators import FluxPoints\n >>> filename = '$GAMMAPY_DATA/hawc_crab/HAWC19_flux_points.fits'\n >>> flux_points = FluxPoints.read(filename)\n >>> flux_points.plot() #doctest: +SKIP\n\n An instance of `FluxPoints` can also be created by passing an instance of\n `astropy.table.Table`, which contains the required columns, such as `'e_ref'`\n and `'dnde'`. The corresponding `sed_type` has to be defined in the meta data\n of the table::\n\n >>> import numpy as np\n >>> from astropy import units as u\n >>> from astropy.table import Table\n >>> from gammapy.estimators import FluxPoints\n >>> from gammapy.modeling.models import PowerLawSpectralModel\n >>> table = Table()\n >>> pwl = PowerLawSpectralModel()\n >>> e_ref = np.geomspace(1, 100, 7) * u.TeV\n >>> table[\"e_ref\"] = e_ref\n >>> table[\"dnde\"] = pwl(e_ref)\n >>> table[\"dnde_err\"] = pwl.evaluate_error(e_ref)[0]\n >>> table.meta[\"SED_TYPE\"] = \"dnde\"\n >>> flux_points = FluxPoints.from_table(table)\n >>> flux_points.plot(sed_type=\"flux\") #doctest: +SKIP\n\n If you have flux points in a different data format, the format can be changed\n by renaming the table columns and adding meta data::\n\n\n >>> from astropy import units as u\n >>> from astropy.table import Table\n >>> from gammapy.estimators import FluxPoints\n >>> from gammapy.utils.scripts import make_path\n\n >>> filename = make_path('$GAMMAPY_DATA/tests/spectrum/flux_points/flux_points_ctb_37b.txt')\n >>> table = Table.read(filename ,format='ascii.csv', delimiter=' ', comment='#')\n >>> table.rename_column('Differential_Flux', 'dnde')\n >>> table['dnde'].unit = 'cm-2 s-1 TeV-1'\n\n >>> table.rename_column('lower_error', 'dnde_errn')\n >>> table['dnde_errn'].unit = 'cm-2 s-1 TeV-1'\n\n >>> table.rename_column('upper_error', 'dnde_errp')\n >>> table['dnde_errp'].unit = 'cm-2 s-1 TeV-1'\n\n >>> table.rename_column('E', 'e_ref')\n >>> table['e_ref'].unit = 'TeV'\n\n >>> flux_points = FluxPoints.from_table(table, sed_type=\"dnde\")\n >>> flux_points.plot(sed_type=\"e2dnde\") #doctest: +SKIP\n\n\n Note: In order to reproduce the example you need the tests datasets folder.\n You may download it with the command\n ``gammapy download datasets --tests --out $GAMMAPY_DATA``\n \"\"\"\n\n @classmethod\n def read(\n cls,\n filename,\n sed_type=None,\n format=None,\n reference_model=None,\n checksum=False,\n **kwargs,\n ):\n \"\"\"Read precomputed flux points.\n\n Parameters\n ----------\n filename : str\n Filename.\n sed_type : {\"dnde\", \"flux\", \"eflux\", \"e2dnde\", \"likelihood\"}\n SED type.\n format : {\"gadf-sed\", \"lightcurve\", \"profile\"}, optional\n Format string. If None, the format is extracted from the input.\n Default is None.\n reference_model : `SpectralModel`\n Reference spectral model.\n checksum : bool\n If True checks both DATASUM and CHECKSUM cards in the file headers. Default is False.\n **kwargs : dict, optional\n Keyword arguments passed to `astropy.table.Table.read`.\n\n Returns\n -------\n flux_points : `FluxPoints`\n Flux points.\n \"\"\"\n filename = make_path(filename)\n gti = None\n kwargs.setdefault(\"format\", \"ascii.ecsv\")\n try:\n table = Table.read(filename, **kwargs)\n except (IORegistryError, UnicodeDecodeError):\n with fits.open(filename, checksum=checksum) as hdulist:\n if \"FLUXPOINTS\" in hdulist:\n fp = hdulist[\"FLUXPOINTS\"]\n else:\n fp = hdulist[\"\"] # to handle older files\n table = Table.read(fp)\n if \"GTI\" in hdulist:\n gti = GTI.from_table_hdu(hdulist[\"GTI\"])\n\n return cls.from_table(\n table=table,\n sed_type=sed_type,\n reference_model=reference_model,\n format=format,\n gti=gti,\n )\n\n def write(\n self, filename, sed_type=None, format=None, overwrite=False, checksum=False\n ):\n \"\"\"Write flux points.\n\n Parameters\n ----------\n filename : str\n Filename.\n sed_type : {\"dnde\", \"flux\", \"eflux\", \"e2dnde\", \"likelihood\"}, optional\n SED type. Default is None.\n format : {\"gadf-sed\", \"lightcurve\", \"binned-time-series\", \"profile\"}, optional\n Format specification. The following formats are supported:\n\n * \"gadf-sed\": format for SED flux points see :ref:`gadf:flux-points`\n for details\n * \"lightcurve\": Gammapy internal format to store energy dependent\n lightcurves. Basically a generalisation of the \"gadf\" format, but\n currently there is no detailed documentation available.\n * \"binned-time-series\": table format support by Astropy's\n `~astropy.timeseries.BinnedTimeSeries`.\n * \"profile\": Gammapy internal format to store energy dependent\n flux profiles. Basically a generalisation of the \"gadf\" format, but\n currently there is no detailed documentation available.\n\n If None, the format will be guessed by looking at the axes that are present in the object.\n Default is None.\n\n overwrite : bool, optional\n Overwrite existing file. Default is False.\n checksum : bool, optional\n When True adds both DATASUM and CHECKSUM cards to the headers written to the file.\n Default is False.\n \"\"\"\n filename = make_path(filename)\n\n if sed_type is None:\n sed_type = self.sed_type_init\n table = self.to_table(sed_type=sed_type, format=format)\n\n if \".fits\" not in filename.suffixes:\n table.write(filename)\n return\n\n primary_hdu = fits.PrimaryHDU()\n hdu_evt = fits.BinTableHDU(table, name=\"FLUXPOINTS\")\n hdu_all = fits.HDUList([primary_hdu, hdu_evt])\n if self.gti:\n hdu_all.append(self.gti.to_table_hdu())\n\n hdu_all.writeto(filename, overwrite=overwrite, checksum=checksum)\n\n @staticmethod\n def _convert_loglike_columns(table):\n # TODO: check sign and factor 2 here\n # https://github.com/gammapy/gammapy/pull/2546#issuecomment-554274318\n # The idea below is to support the format here:\n # https://gamma-astro-data-formats.readthedocs.io/en/latest/spectra/flux_points/index.html#likelihood-columns\n # but internally to go to the uniform \"stat\"\n\n if \"loglike\" in table.colnames and \"stat\" not in table.colnames:\n table[\"stat\"] = 2 * table[\"loglike\"]\n\n if \"loglike_null\" in table.colnames and \"stat_null\" not in table.colnames:\n table[\"stat_null\"] = 2 * table[\"loglike_null\"]\n\n if \"dloglike_scan\" in table.colnames and \"stat_scan\" not in table.colnames:\n table[\"stat_scan\"] = 2 * table[\"dloglike_scan\"]\n\n return table\n\n @staticmethod\n def _table_guess_format(table):\n \"\"\"Format of the table to be transformed to FluxPoints.\"\"\"\n names = table.colnames\n if \"time_min\" in names:\n return \"lightcurve\"\n elif \"x_min\" in names:\n return \"profile\"\n else:\n return \"gadf-sed\"\n\n @classmethod\n def from_table(\n cls, table, sed_type=None, format=None, reference_model=None, gti=None\n ):\n \"\"\"Create flux points from a table. The table column names must be consistent with the\n sed_type.\n\n Parameters\n ----------\n table : `~astropy.table.Table`\n Table.\n sed_type : {\"dnde\", \"flux\", \"eflux\", \"e2dnde\", \"likelihood\"}, optional\n SED type. Default is None.\n format : {\"gadf-sed\", \"lightcurve\", \"profile\"}, optional\n Table format. If None, it is extracted from the table column content. Default is None.\n reference_model : `SpectralModel`, optional\n Reference spectral model. Default is None.\n gti : `GTI`, optional\n Good time intervals. Default is None.\n\n Returns\n -------\n flux_points : `FluxPoints`\n Flux points.\n \"\"\"\n table = table_standardise_units_copy(table)\n\n if format is None:\n format = cls._table_guess_format(table)\n log.info(\"Inferred format: \" + format)\n\n if sed_type is None:\n sed_type = table.meta.get(\"SED_TYPE\", None)\n\n if sed_type is None:\n sed_type = cls._guess_sed_type(table.colnames)\n\n if sed_type is None:\n raise ValueError(\"Specifying the SED type is required\")\n\n if sed_type == \"likelihood\":\n table = cls._convert_loglike_columns(table)\n if reference_model is None:\n reference_model = TemplateSpectralModel(\n energy=flat_if_equal(table[\"e_ref\"].quantity),\n values=flat_if_equal(table[\"ref_dnde\"].quantity),\n )\n\n maps = Maps()\n table.meta.setdefault(\"SED_TYPE\", sed_type)\n\n for name in cls.all_quantities(sed_type=sed_type):\n if name in table.colnames:\n maps[name] = RegionNDMap.from_table(\n table=table, colname=name, format=format\n )\n\n meta = cls._get_meta_gadf(table)\n return cls.from_maps(\n maps=maps,\n reference_model=reference_model,\n meta=meta,\n sed_type=sed_type,\n gti=gti,\n )\n\n @staticmethod\n def _get_meta_gadf(table):\n meta = {}\n meta.update(table.meta)\n conf_ul = table.meta.get(\"UL_CONF\")\n if conf_ul:\n n_sigma_ul = np.round(stats.norm.isf(0.5 * (1 - conf_ul)), 1)\n meta[\"n_sigma_ul\"] = n_sigma_ul\n meta[\"sed_type_init\"] = table.meta.get(\"SED_TYPE\")\n return meta\n\n @staticmethod\n def _format_table(table):\n \"\"\"Format table.\"\"\"\n for column in table.colnames:\n if column.startswith((\"dnde\", \"eflux\", \"flux\", \"e2dnde\", \"ref\")):\n table[column].format = \".3e\"\n elif column.startswith(\n (\"e_min\", \"e_max\", \"e_ref\", \"sqrt_ts\", \"norm\", \"ts\", \"stat\")\n ):\n table[column].format = \".3f\"\n\n return table\n\n def _guess_format(self):\n \"\"\"Format of the FluxPoints object.\"\"\"\n names = self.geom.axes.names\n if \"time\" in names:\n return \"lightcurve\"\n elif \"projected-distance\" in names:\n return \"profile\"\n else:\n return \"gadf-sed\"\n\n def to_table(self, sed_type=None, format=None, formatted=False):\n \"\"\"Create table for a given SED type.\n\n Parameters\n ----------\n sed_type : {\"likelihood\", \"dnde\", \"e2dnde\", \"flux\", \"eflux\"}\n SED type to convert to. Default is `likelihood`.\n format : {\"gadf-sed\", \"lightcurve\", \"binned-time-series\", \"profile\"}, optional\n Format specification. The following formats are supported:\n\n * \"gadf-sed\": format for SED flux points see :ref:`gadf:flux-points`\n for details\n * \"lightcurve\": Gammapy internal format to store energy dependent\n lightcurves. Basically a generalisation of the \"gadf\" format, but\n currently there is no detailed documentation available.\n * \"binned-time-series\": table format support by Astropy's\n `~astropy.timeseries.BinnedTimeSeries`.\n * \"profile\": Gammapy internal format to store energy dependent\n flux profiles. Basically a generalisation of the \"gadf\" format, but\n currently there is no detailed documentation available.\n\n If None, the format will be guessed by looking at the axes that are present in the object.\n Default is None.\n\n formatted : bool\n Formatted version with column formats applied. Numerical columns are\n formatted to .3f and .3e respectively. Default is False.\n\n Returns\n -------\n table : `~astropy.table.Table`\n Flux points table.\n\n Examples\n --------\n\n This is how to read and plot example flux points:\n\n >>> from gammapy.estimators import FluxPoints\n >>> fp = FluxPoints.read(\"$GAMMAPY_DATA/hawc_crab/HAWC19_flux_points.fits\")\n >>> table = fp.to_table(sed_type=\"flux\", formatted=True)\n >>> print(table[:2])\n e_ref e_min e_max flux flux_err flux_ul ts sqrt_ts is_ul\n TeV TeV TeV 1 / (s cm2) 1 / (s cm2) 1 / (s cm2)\n ----- ----- ----- ----------- ----------- ----------- -------- ------- -----\n 1.334 1.000 1.780 1.423e-11 3.135e-13 nan 2734.000 52.288 False\n 2.372 1.780 3.160 5.780e-12 1.082e-13 nan 4112.000 64.125 False\n \"\"\"\n if sed_type is None:\n sed_type = self.sed_type_init\n\n if format is None:\n format = self._guess_format()\n log.info(\"Inferred format: \" + format)\n\n if format == \"gadf-sed\":\n # TODO: what to do with GTI info?\n if not self.geom.axes.names == [\"energy\"]:\n raise ValueError(\n \"Only flux points with a single energy axis \"\n \"can be converted to 'gadf-sed'\"\n )\n\n idx = (Ellipsis, 0, 0)\n table = self.energy_axis.to_table(format=\"gadf-sed\")\n table.meta[\"SED_TYPE\"] = sed_type\n\n if not self.is_convertible_to_flux_sed_type:\n table.remove_columns([\"e_min\", \"e_max\"])\n\n if self.n_sigma_ul:\n table.meta[\"UL_CONF\"] = np.round(\n 1 - 2 * stats.norm.sf(self.n_sigma_ul), 7\n )\n\n if sed_type == \"likelihood\":\n table[\"ref_dnde\"] = self.dnde_ref[idx]\n table[\"ref_flux\"] = self.flux_ref[idx]\n table[\"ref_eflux\"] = self.eflux_ref[idx]\n\n for quantity in self.all_quantities(sed_type=sed_type):\n data = getattr(self, quantity, None)\n if data:\n table[quantity] = data.quantity[idx]\n\n if self.has_stat_profiles:\n norm_axis = self.stat_scan.geom.axes[\"norm\"]\n table[\"norm_scan\"] = norm_axis.center.reshape((1, -1))\n table[\"stat\"] = self.stat.data[idx]\n table[\"stat_scan\"] = self.stat_scan.data[idx]\n\n table[\"is_ul\"] = self.is_ul.data[idx]\n if not self.has_ul:\n table.remove_columns(\"is_ul\")\n\n elif format == \"lightcurve\":\n time_axis = self.geom.axes[\"time\"]\n\n tables = []\n for idx, (time_min, time_max) in enumerate(time_axis.iter_by_edges):\n table_flat = Table()\n table_flat[\"time_min\"] = [time_min.mjd]\n table_flat[\"time_max\"] = [time_max.mjd]\n\n fp = self.slice_by_idx(slices={\"time\": idx})\n table = fp.to_table(sed_type=sed_type, format=\"gadf-sed\")\n\n for column in table.columns:\n table_flat[column] = table[column][np.newaxis]\n\n tables.append(table_flat)\n\n table = vstack(tables)\n\n # serialize with reference time set to mjd=0.0\n ref_time = Time(0.0, format=\"mjd\", scale=time_axis.reference_time.scale)\n table.meta.update(time_ref_to_dict(ref_time, scale=ref_time.scale))\n table.meta[\"TIMEUNIT\"] = \"d\"\n\n elif format == \"binned-time-series\":\n message = (\n \"Format 'binned-time-series' support a single time axis \"\n f\"only. Got {self.geom.axes.names}\"\n )\n\n if not self.geom.axes.is_unidimensional:\n raise ValueError(message)\n\n axis = self.geom.axes.primary_axis\n\n if not isinstance(axis, TimeMapAxis):\n raise ValueError(message)\n\n table = Table()\n table[\"time_bin_start\"] = axis.time_min\n table[\"time_bin_size\"] = axis.time_delta\n\n for quantity in self.all_quantities(sed_type=sed_type):\n data = getattr(self, quantity, None)\n if data:\n table[quantity] = data.quantity.squeeze()\n elif format == \"profile\":\n x_axis = self.geom.axes[\"projected-distance\"]\n\n tables = []\n for idx, (x_min, x_max) in enumerate(x_axis.iter_by_edges):\n table_flat = Table()\n table_flat[\"x_min\"] = [x_min]\n table_flat[\"x_max\"] = [x_max]\n table_flat[\"x_ref\"] = [(x_max + x_min) / 2]\n\n fp = self.slice_by_idx(slices={\"projected-distance\": idx})\n table = fp.to_table(sed_type=sed_type, format=\"gadf-sed\")\n\n for column in table.columns:\n table_flat[column] = table[column][np.newaxis]\n\n tables.append(table_flat)\n\n table = vstack(tables)\n\n else:\n raise ValueError(f\"Not a supported format {format}\")\n\n if formatted:\n table = self._format_table(table=table)\n\n return table\n\n @staticmethod\n def _energy_ref_lafferty(model, energy_min, energy_max):\n \"\"\"Helper for `to_sed_type`.\n\n Compute energy_ref that the value at energy_ref corresponds\n to the mean value between energy_min and energy_max.\n \"\"\"\n flux = model.integral(energy_min, energy_max)\n dnde_mean = flux / (energy_max - energy_min)\n return model.inverse(dnde_mean)\n\n def _plot_get_flux_err(self, sed_type=None):\n \"\"\"Compute flux error for given SED type\"\"\"\n y_errn, y_errp = None, None\n\n if \"norm_err\" in self.available_quantities:\n # symmetric error\n y_errn = getattr(self, sed_type + \"_err\")\n y_errp = y_errn.copy()\n\n if \"norm_errp\" in self.available_quantities:\n y_errn = getattr(self, sed_type + \"_errn\")\n y_errp = getattr(self, sed_type + \"_errp\")\n\n return y_errn, y_errp\n\n def plot(self, ax=None, sed_type=None, energy_power=0, time_format=\"iso\", **kwargs):\n \"\"\"Plot flux points.\n\n Parameters\n ----------\n ax : `~matplotlib.axes.Axes`, optional\n Axis object to plot on. Default is None.\n sed_type : {\"dnde\", \"flux\", \"eflux\", \"e2dnde\"}, optional\n SED type. Default is None.\n energy_power : float, optional\n Power of energy to multiply flux axis with. Default is 0.\n time_format : {\"iso\", \"mjd\"}\n Used time format is a time axis is present. Default is \"iso\".\n **kwargs : dict, optional\n Keyword arguments passed to `~RegionNDMap.plot`.\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes`\n Axis object.\n \"\"\"\n if sed_type is None:\n sed_type = self.sed_type_plot_default\n\n if not self.norm.geom.is_region:\n raise ValueError(\"Plotting only supported for region based flux points\")\n\n if ax is None:\n ax = plt.gca()\n\n flux_unit = DEFAULT_UNIT[sed_type]\n\n flux = getattr(self, sed_type)\n\n # get errors and ul\n y_errn, y_errp = self._plot_get_flux_err(sed_type=sed_type)\n is_ul = self.is_ul.data\n\n if self.has_ul and y_errn and is_ul.any():\n flux_ul = getattr(self, sed_type + \"_ul\").quantity\n y_errn.data[is_ul] = np.clip(\n 0.5 * flux_ul[is_ul].to_value(y_errn.unit), 0, np.inf\n )\n y_errp.data[is_ul] = 0\n flux.data[is_ul] = flux_ul[is_ul].to_value(flux.unit)\n kwargs.setdefault(\"uplims\", is_ul)\n\n # set flux points plotting defaults\n if y_errp and y_errn:\n y_errp = np.clip(\n scale_plot_flux(y_errp, energy_power=energy_power).quantity, 0, np.inf\n )\n y_errn = np.clip(\n scale_plot_flux(y_errn, energy_power=energy_power).quantity, 0, np.inf\n )\n kwargs.setdefault(\"yerr\", (y_errn, y_errp))\n else:\n kwargs.setdefault(\"yerr\", None)\n\n flux = scale_plot_flux(flux=flux.to_unit(flux_unit), energy_power=energy_power)\n if \"time\" in flux.geom.axes_names:\n flux.geom.axes[\"time\"].time_format = time_format\n ax = flux.plot(ax=ax, **kwargs)\n ax.set_ylabel(f\"{sed_type} [{ax.yaxis.units.to_string(UNIT_STRING_FORMAT)}]\")\n ax.set_yscale(\"log\")\n return ax\n\n def plot_ts_profiles(\n self,\n ax=None,\n sed_type=None,\n add_cbar=True,\n **kwargs,\n ):\n \"\"\"Plot fit statistic SED profiles as a density plot.\n\n Parameters\n ----------\n ax : `~matplotlib.axes.Axes`, optional\n Axis object to plot on. Default is None.\n sed_type : {\"dnde\", \"flux\", \"eflux\", \"e2dnde\"}, optional\n SED type. Default is None.\n add_cbar : bool, optional\n Whether to add a colorbar to the plot. Default is True.\n **kwargs : dict, optional\n Keyword arguments passed to `~matplotlib.pyplot.pcolormesh`.\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes`\n Axis object.\n \"\"\"\n if ax is None:\n ax = plt.gca()\n\n if sed_type is None:\n sed_type = self.sed_type_plot_default\n\n if not self.norm.geom.is_region:\n raise ValueError(\"Plotting only supported for region based flux points\")\n\n if not self.geom.axes.is_unidimensional:\n raise ValueError(\n \"Profile plotting is only supported for unidimensional maps\"\n )\n\n axis = self.geom.axes.primary_axis\n\n if isinstance(axis, TimeMapAxis) and not axis.is_contiguous:\n axis = axis.to_contiguous()\n\n if ax.yaxis.units is None:\n yunits = DEFAULT_UNIT[sed_type]\n else:\n yunits = ax.yaxis.units\n\n ax.yaxis.set_units(yunits)\n\n flux_ref = getattr(self, sed_type + \"_ref\").to(yunits)\n\n ts = self.ts_scan\n\n norm_min, norm_max = ts.geom.axes[\"norm\"].bounds.to_value(\"\")\n\n flux = MapAxis.from_bounds(\n norm_min * flux_ref.value.min(),\n norm_max * flux_ref.value.max(),\n nbin=500,\n interp=axis.interp,\n unit=flux_ref.unit,\n )\n\n norm = flux.center / flux_ref.reshape((-1, 1))\n\n coords = ts.geom.get_coord()\n coords[\"norm\"] = norm\n coords[axis.name] = axis.center.reshape((-1, 1))\n\n z = ts.interp_by_coord(coords, values_scale=\"stat-profile\")\n\n kwargs.setdefault(\"vmax\", 0)\n kwargs.setdefault(\"vmin\", -4)\n kwargs.setdefault(\"zorder\", 0)\n kwargs.setdefault(\"cmap\", \"Blues\")\n kwargs.setdefault(\"linewidths\", 0)\n kwargs.setdefault(\"shading\", \"auto\")\n\n # clipped values are set to NaN so that they appear white on the plot\n z[-z < kwargs[\"vmin\"]] = np.nan\n\n with quantity_support():\n caxes = ax.pcolormesh(axis.as_plot_edges, flux.edges, -z.T, **kwargs)\n\n axis.format_plot_xaxis(ax=ax)\n\n ax.set_ylabel(f\"{sed_type} [{ax.yaxis.units.to_string(UNIT_STRING_FORMAT)}]\")\n ax.set_yscale(\"log\")\n\n if add_cbar:\n label = \"Fit statistic difference\"\n ax.figure.colorbar(caxes, ax=ax, label=label)\n\n return ax\n\n def recompute_ul(self, n_sigma_ul=2, **kwargs):\n \"\"\"Recompute upper limits corresponding to the given value.\n The pre-computed statistic profiles must exist for the re-computation.\n\n Parameters\n ----------\n n_sigma_ul : int\n Number of sigma to use for upper limit computation. Default is 2.\n **kwargs : dict, optional\n Keyword arguments passed to `~scipy.optimize.brentq`.\n\n Returns\n -------\n flux_points : `~gammapy.estimators.FluxPoints`\n A new FluxPoints object with modified upper limits.\n\n Examples\n --------\n >>> from gammapy.estimators import FluxPoints\n >>> filename = '$GAMMAPY_DATA/tests/spectrum/flux_points/binlike.fits'\n >>> flux_points = FluxPoints.read(filename)\n >>> flux_points_recomputed = flux_points.recompute_ul(n_sigma_ul=3)\n >>> print(flux_points.meta[\"n_sigma_ul\"], flux_points.flux_ul.data[0])\n 2.0 [[3.95451985e-09]]\n >>> print(flux_points_recomputed.meta[\"n_sigma_ul\"], flux_points_recomputed.flux_ul.data[0])\n 3 [[6.22245374e-09]]\n \"\"\"\n\n if not self.has_stat_profiles:\n raise ValueError(\n \"Stat profiles not present. Upper limit computation is not possible\"\n )\n\n delta_ts = n_sigma_ul**2\n\n flux_points = deepcopy(self)\n\n value_scan = self.stat_scan.geom.axes[\"norm\"].center\n shape_axes = self.stat_scan.geom._shape[slice(3, None)][::-1]\n for idx in np.ndindex(shape_axes):\n stat_scan = np.abs(\n self.stat_scan.data[idx].squeeze() - self.stat.data[idx].squeeze()\n )\n flux_points.norm_ul.data[idx] = stat_profile_ul_scipy(\n value_scan, stat_scan, delta_ts=delta_ts, **kwargs\n )\n flux_points.meta[\"n_sigma_ul\"] = n_sigma_ul\n return flux_points\n\n def resample_axis(self, axis_new):\n \"\"\"Rebin the flux point object along the new axis.\n The log-likelihoods profiles in each bin are summed\n to compute the resultant quantities.\n Stat profiles must be present on the fluxpoints object for\n this method to work.\n\n For now, works only for flat fluxpoints.\n\n Parameters\n ----------\n axis_new : `MapAxis` or `TimeMapAxis`\n The new axis to resample along\n\n Returns\n -------\n flux_points : `~gammapy.estimators.FluxPoints`\n A new FluxPoints object with modified axis.\n \"\"\"\n\n if not self.has_stat_profiles:\n raise ValueError(\"Stat profiles not present, rebinning is not possible\")\n\n fluxpoints = []\n for edge_min, edge_max in zip(axis_new.edges_min, axis_new.edges_max):\n if isinstance(axis_new, TimeMapAxis):\n edge_min = edge_min + axis_new.reference_time\n edge_max = edge_max + axis_new.reference_time\n fp = self.slice_by_coord({axis_new.name: slice(edge_min, edge_max)})\n fp_new = squash_fluxpoints(fp, axis_new)\n fluxpoints.append(fp_new)\n\n return self.__class__.from_stack(fluxpoints, axis=axis_new)\n",
"path": "gammapy/estimators/points/core.py"
}
] | [
{
"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport logging\nfrom copy import deepcopy\nimport numpy as np\nfrom scipy import stats\nfrom scipy.interpolate import interp1d\nfrom scipy.optimize import minimize\nfrom astropy.io import fits\nfrom astropy.io.registry import IORegistryError\nfrom astropy.table import Table, vstack\nfrom astropy.time import Time\nfrom astropy.visualization import quantity_support\nimport matplotlib.pyplot as plt\nfrom gammapy.data import GTI\nfrom gammapy.maps import Map, MapAxis, Maps, RegionNDMap, TimeMapAxis\nfrom gammapy.maps.axes import UNIT_STRING_FORMAT, flat_if_equal\nfrom gammapy.modeling.models import TemplateSpectralModel\nfrom gammapy.modeling.models.spectral import scale_plot_flux\nfrom gammapy.modeling.scipy import stat_profile_ul_scipy\nfrom gammapy.utils.interpolation import interpolate_profile\nfrom gammapy.utils.scripts import make_path\nfrom gammapy.utils.table import table_standardise_units_copy\nfrom gammapy.utils.time import time_ref_to_dict\nfrom ..map.core import DEFAULT_UNIT, FluxMaps\n\n__all__ = [\"FluxPoints\"]\n\nlog = logging.getLogger(__name__)\n\n\ndef squash_fluxpoints(flux_point, axis):\n \"\"\"Squash a FluxPoints object into one point.\n The log-likelihoods profiles in each bin are summed\n to compute the resultant quantities. Stat profiles\n must be present on the fluxpoints object for\n this method to work.\n \"\"\"\n\n value_scan = flux_point.stat_scan.geom.axes[\"norm\"].center\n stat_scan = np.sum(flux_point.stat_scan.data, axis=0).ravel()\n f = interp1d(value_scan, stat_scan, kind=\"quadratic\", bounds_error=False)\n f = interpolate_profile(value_scan, stat_scan)\n minimizer = minimize(\n f,\n x0=value_scan[int(len(value_scan) / 2)],\n bounds=[(value_scan[0], value_scan[-1])],\n method=\"L-BFGS-B\",\n )\n\n maps = Maps()\n geom = flux_point.geom.to_image()\n if axis.name != \"energy\":\n geom = geom.to_cube([flux_point.geom.axes[\"energy\"]])\n\n maps[\"norm\"] = Map.from_geom(geom, data=minimizer.x)\n maps[\"norm_err\"] = Map.from_geom(geom, data=np.sqrt(minimizer.hess_inv.todense()))\n maps[\"n_dof\"] = Map.from_geom(geom, data=flux_point.geom.axes[axis.name].nbin)\n\n if \"norm_ul\" in flux_point.available_quantities:\n delta_ts = flux_point.meta.get(\"n_sigma_ul\", 2) ** 2\n ul = stat_profile_ul_scipy(value_scan, stat_scan, delta_ts=delta_ts)\n maps[\"norm_ul\"] = Map.from_geom(geom, data=ul.value)\n\n maps[\"stat\"] = Map.from_geom(geom, data=f(minimizer.x))\n\n maps[\"stat_scan\"] = Map.from_geom(\n geom=geom.to_cube([MapAxis.from_nodes(value_scan, name=\"norm\")]), data=stat_scan\n )\n try:\n maps[\"stat_null\"] = Map.from_geom(geom, data=np.sum(flux_point.stat_null.data))\n maps[\"ts\"] = maps[\"stat_null\"] - maps[\"stat\"]\n except AttributeError:\n log.info(\n \"Stat null info not present on original FluxPoints object. TS not computed\"\n )\n\n maps[\"success\"] = Map.from_geom(geom=geom, data=minimizer.success, dtype=bool)\n\n combined_fp = FluxPoints.from_maps(\n maps=maps,\n sed_type=flux_point.sed_type_init,\n reference_model=flux_point.reference_model,\n gti=flux_point.gti,\n meta=flux_point.meta,\n )\n return combined_fp\n\n\nclass FluxPoints(FluxMaps):\n \"\"\"Flux points container.\n\n The supported formats are described here: :ref:`gadf:flux-points`.\n\n In summary, the following formats and minimum required columns are:\n\n * Format ``dnde``: columns ``e_ref`` and ``dnde``\n * Format ``e2dnde``: columns ``e_ref``, ``e2dnde``\n * Format ``flux``: columns ``e_min``, ``e_max``, ``flux``\n * Format ``eflux``: columns ``e_min``, ``e_max``, ``eflux``\n\n Parameters\n ----------\n table : `~astropy.table.Table`\n Table with flux point data.\n\n Attributes\n ----------\n table : `~astropy.table.Table`\n Table with flux point data.\n\n Examples\n --------\n The `FluxPoints` object is most easily created by reading a file with\n flux points given in one of the formats documented above::\n\n >>> from gammapy.estimators import FluxPoints\n >>> filename = '$GAMMAPY_DATA/hawc_crab/HAWC19_flux_points.fits'\n >>> flux_points = FluxPoints.read(filename)\n >>> flux_points.plot() #doctest: +SKIP\n\n An instance of `FluxPoints` can also be created by passing an instance of\n `astropy.table.Table`, which contains the required columns, such as `'e_ref'`\n and `'dnde'`. The corresponding `sed_type` has to be defined in the meta data\n of the table::\n\n >>> import numpy as np\n >>> from astropy import units as u\n >>> from astropy.table import Table\n >>> from gammapy.estimators import FluxPoints\n >>> from gammapy.modeling.models import PowerLawSpectralModel\n >>> table = Table()\n >>> pwl = PowerLawSpectralModel()\n >>> e_ref = np.geomspace(1, 100, 7) * u.TeV\n >>> table[\"e_ref\"] = e_ref\n >>> table[\"dnde\"] = pwl(e_ref)\n >>> table[\"dnde_err\"] = pwl.evaluate_error(e_ref)[0]\n >>> table.meta[\"SED_TYPE\"] = \"dnde\"\n >>> flux_points = FluxPoints.from_table(table)\n >>> flux_points.plot(sed_type=\"flux\") #doctest: +SKIP\n\n If you have flux points in a different data format, the format can be changed\n by renaming the table columns and adding meta data::\n\n\n >>> from astropy import units as u\n >>> from astropy.table import Table\n >>> from gammapy.estimators import FluxPoints\n >>> from gammapy.utils.scripts import make_path\n\n >>> filename = make_path('$GAMMAPY_DATA/tests/spectrum/flux_points/flux_points_ctb_37b.txt')\n >>> table = Table.read(filename ,format='ascii.csv', delimiter=' ', comment='#')\n >>> table.rename_column('Differential_Flux', 'dnde')\n >>> table['dnde'].unit = 'cm-2 s-1 TeV-1'\n\n >>> table.rename_column('lower_error', 'dnde_errn')\n >>> table['dnde_errn'].unit = 'cm-2 s-1 TeV-1'\n\n >>> table.rename_column('upper_error', 'dnde_errp')\n >>> table['dnde_errp'].unit = 'cm-2 s-1 TeV-1'\n\n >>> table.rename_column('E', 'e_ref')\n >>> table['e_ref'].unit = 'TeV'\n\n >>> flux_points = FluxPoints.from_table(table, sed_type=\"dnde\")\n >>> flux_points.plot(sed_type=\"e2dnde\") #doctest: +SKIP\n\n\n Note: In order to reproduce the example you need the tests datasets folder.\n You may download it with the command\n ``gammapy download datasets --tests --out $GAMMAPY_DATA``\n \"\"\"\n\n @classmethod\n def read(\n cls,\n filename,\n sed_type=None,\n format=None,\n reference_model=None,\n checksum=False,\n **kwargs,\n ):\n \"\"\"Read precomputed flux points.\n\n Parameters\n ----------\n filename : str\n Filename.\n sed_type : {\"dnde\", \"flux\", \"eflux\", \"e2dnde\", \"likelihood\"}\n SED type.\n format : {\"gadf-sed\", \"lightcurve\", \"profile\"}, optional\n Format string. If None, the format is extracted from the input.\n Default is None.\n reference_model : `SpectralModel`\n Reference spectral model.\n checksum : bool\n If True checks both DATASUM and CHECKSUM cards in the file headers. Default is False.\n **kwargs : dict, optional\n Keyword arguments passed to `astropy.table.Table.read`.\n\n Returns\n -------\n flux_points : `FluxPoints`\n Flux points.\n \"\"\"\n filename = make_path(filename)\n gti = None\n kwargs.setdefault(\"format\", \"ascii.ecsv\")\n try:\n table = Table.read(filename, **kwargs)\n except (IORegistryError, UnicodeDecodeError):\n with fits.open(filename, checksum=checksum) as hdulist:\n if \"FLUXPOINTS\" in hdulist:\n fp = hdulist[\"FLUXPOINTS\"]\n else:\n fp = hdulist[\"\"] # to handle older files\n table = Table.read(fp)\n if \"GTI\" in hdulist:\n gti = GTI.from_table_hdu(hdulist[\"GTI\"])\n\n return cls.from_table(\n table=table,\n sed_type=sed_type,\n reference_model=reference_model,\n format=format,\n gti=gti,\n )\n\n def write(\n self, filename, sed_type=None, format=None, overwrite=False, checksum=False\n ):\n \"\"\"Write flux points.\n\n Parameters\n ----------\n filename : str\n Filename.\n sed_type : {\"dnde\", \"flux\", \"eflux\", \"e2dnde\", \"likelihood\"}, optional\n SED type. Default is None.\n format : {\"gadf-sed\", \"lightcurve\", \"binned-time-series\", \"profile\"}, optional\n Format specification. The following formats are supported:\n\n * \"gadf-sed\": format for SED flux points see :ref:`gadf:flux-points`\n for details\n * \"lightcurve\": Gammapy internal format to store energy dependent\n lightcurves. Basically a generalisation of the \"gadf\" format, but\n currently there is no detailed documentation available.\n * \"binned-time-series\": table format support by Astropy's\n `~astropy.timeseries.BinnedTimeSeries`.\n * \"profile\": Gammapy internal format to store energy dependent\n flux profiles. Basically a generalisation of the \"gadf\" format, but\n currently there is no detailed documentation available.\n\n If None, the format will be guessed by looking at the axes that are present in the object.\n Default is None.\n\n overwrite : bool, optional\n Overwrite existing file. Default is False.\n checksum : bool, optional\n When True adds both DATASUM and CHECKSUM cards to the headers written to the file.\n Default is False.\n \"\"\"\n filename = make_path(filename)\n\n if sed_type is None:\n sed_type = self.sed_type_init\n table = self.to_table(sed_type=sed_type, format=format)\n\n if \".fits\" not in filename.suffixes:\n table.write(filename, overwrite=overwrite)\n return\n\n primary_hdu = fits.PrimaryHDU()\n hdu_evt = fits.BinTableHDU(table, name=\"FLUXPOINTS\")\n hdu_all = fits.HDUList([primary_hdu, hdu_evt])\n if self.gti:\n hdu_all.append(self.gti.to_table_hdu())\n\n hdu_all.writeto(filename, overwrite=overwrite, checksum=checksum)\n\n @staticmethod\n def _convert_loglike_columns(table):\n # TODO: check sign and factor 2 here\n # https://github.com/gammapy/gammapy/pull/2546#issuecomment-554274318\n # The idea below is to support the format here:\n # https://gamma-astro-data-formats.readthedocs.io/en/latest/spectra/flux_points/index.html#likelihood-columns\n # but internally to go to the uniform \"stat\"\n\n if \"loglike\" in table.colnames and \"stat\" not in table.colnames:\n table[\"stat\"] = 2 * table[\"loglike\"]\n\n if \"loglike_null\" in table.colnames and \"stat_null\" not in table.colnames:\n table[\"stat_null\"] = 2 * table[\"loglike_null\"]\n\n if \"dloglike_scan\" in table.colnames and \"stat_scan\" not in table.colnames:\n table[\"stat_scan\"] = 2 * table[\"dloglike_scan\"]\n\n return table\n\n @staticmethod\n def _table_guess_format(table):\n \"\"\"Format of the table to be transformed to FluxPoints.\"\"\"\n names = table.colnames\n if \"time_min\" in names:\n return \"lightcurve\"\n elif \"x_min\" in names:\n return \"profile\"\n else:\n return \"gadf-sed\"\n\n @classmethod\n def from_table(\n cls, table, sed_type=None, format=None, reference_model=None, gti=None\n ):\n \"\"\"Create flux points from a table. The table column names must be consistent with the\n sed_type.\n\n Parameters\n ----------\n table : `~astropy.table.Table`\n Table.\n sed_type : {\"dnde\", \"flux\", \"eflux\", \"e2dnde\", \"likelihood\"}, optional\n SED type. Default is None.\n format : {\"gadf-sed\", \"lightcurve\", \"profile\"}, optional\n Table format. If None, it is extracted from the table column content. Default is None.\n reference_model : `SpectralModel`, optional\n Reference spectral model. Default is None.\n gti : `GTI`, optional\n Good time intervals. Default is None.\n\n Returns\n -------\n flux_points : `FluxPoints`\n Flux points.\n \"\"\"\n table = table_standardise_units_copy(table)\n\n if format is None:\n format = cls._table_guess_format(table)\n log.info(\"Inferred format: \" + format)\n\n if sed_type is None:\n sed_type = table.meta.get(\"SED_TYPE\", None)\n\n if sed_type is None:\n sed_type = cls._guess_sed_type(table.colnames)\n\n if sed_type is None:\n raise ValueError(\"Specifying the SED type is required\")\n\n if sed_type == \"likelihood\":\n table = cls._convert_loglike_columns(table)\n if reference_model is None:\n reference_model = TemplateSpectralModel(\n energy=flat_if_equal(table[\"e_ref\"].quantity),\n values=flat_if_equal(table[\"ref_dnde\"].quantity),\n )\n\n maps = Maps()\n table.meta.setdefault(\"SED_TYPE\", sed_type)\n\n for name in cls.all_quantities(sed_type=sed_type):\n if name in table.colnames:\n maps[name] = RegionNDMap.from_table(\n table=table, colname=name, format=format\n )\n\n meta = cls._get_meta_gadf(table)\n return cls.from_maps(\n maps=maps,\n reference_model=reference_model,\n meta=meta,\n sed_type=sed_type,\n gti=gti,\n )\n\n @staticmethod\n def _get_meta_gadf(table):\n meta = {}\n meta.update(table.meta)\n conf_ul = table.meta.get(\"UL_CONF\")\n if conf_ul:\n n_sigma_ul = np.round(stats.norm.isf(0.5 * (1 - conf_ul)), 1)\n meta[\"n_sigma_ul\"] = n_sigma_ul\n meta[\"sed_type_init\"] = table.meta.get(\"SED_TYPE\")\n return meta\n\n @staticmethod\n def _format_table(table):\n \"\"\"Format table.\"\"\"\n for column in table.colnames:\n if column.startswith((\"dnde\", \"eflux\", \"flux\", \"e2dnde\", \"ref\")):\n table[column].format = \".3e\"\n elif column.startswith(\n (\"e_min\", \"e_max\", \"e_ref\", \"sqrt_ts\", \"norm\", \"ts\", \"stat\")\n ):\n table[column].format = \".3f\"\n\n return table\n\n def _guess_format(self):\n \"\"\"Format of the FluxPoints object.\"\"\"\n names = self.geom.axes.names\n if \"time\" in names:\n return \"lightcurve\"\n elif \"projected-distance\" in names:\n return \"profile\"\n else:\n return \"gadf-sed\"\n\n def to_table(self, sed_type=None, format=None, formatted=False):\n \"\"\"Create table for a given SED type.\n\n Parameters\n ----------\n sed_type : {\"likelihood\", \"dnde\", \"e2dnde\", \"flux\", \"eflux\"}\n SED type to convert to. Default is `likelihood`.\n format : {\"gadf-sed\", \"lightcurve\", \"binned-time-series\", \"profile\"}, optional\n Format specification. The following formats are supported:\n\n * \"gadf-sed\": format for SED flux points see :ref:`gadf:flux-points`\n for details\n * \"lightcurve\": Gammapy internal format to store energy dependent\n lightcurves. Basically a generalisation of the \"gadf\" format, but\n currently there is no detailed documentation available.\n * \"binned-time-series\": table format support by Astropy's\n `~astropy.timeseries.BinnedTimeSeries`.\n * \"profile\": Gammapy internal format to store energy dependent\n flux profiles. Basically a generalisation of the \"gadf\" format, but\n currently there is no detailed documentation available.\n\n If None, the format will be guessed by looking at the axes that are present in the object.\n Default is None.\n\n formatted : bool\n Formatted version with column formats applied. Numerical columns are\n formatted to .3f and .3e respectively. Default is False.\n\n Returns\n -------\n table : `~astropy.table.Table`\n Flux points table.\n\n Examples\n --------\n\n This is how to read and plot example flux points:\n\n >>> from gammapy.estimators import FluxPoints\n >>> fp = FluxPoints.read(\"$GAMMAPY_DATA/hawc_crab/HAWC19_flux_points.fits\")\n >>> table = fp.to_table(sed_type=\"flux\", formatted=True)\n >>> print(table[:2])\n e_ref e_min e_max flux flux_err flux_ul ts sqrt_ts is_ul\n TeV TeV TeV 1 / (s cm2) 1 / (s cm2) 1 / (s cm2)\n ----- ----- ----- ----------- ----------- ----------- -------- ------- -----\n 1.334 1.000 1.780 1.423e-11 3.135e-13 nan 2734.000 52.288 False\n 2.372 1.780 3.160 5.780e-12 1.082e-13 nan 4112.000 64.125 False\n \"\"\"\n if sed_type is None:\n sed_type = self.sed_type_init\n\n if format is None:\n format = self._guess_format()\n log.info(\"Inferred format: \" + format)\n\n if format == \"gadf-sed\":\n # TODO: what to do with GTI info?\n if not self.geom.axes.names == [\"energy\"]:\n raise ValueError(\n \"Only flux points with a single energy axis \"\n \"can be converted to 'gadf-sed'\"\n )\n\n idx = (Ellipsis, 0, 0)\n table = self.energy_axis.to_table(format=\"gadf-sed\")\n table.meta[\"SED_TYPE\"] = sed_type\n\n if not self.is_convertible_to_flux_sed_type:\n table.remove_columns([\"e_min\", \"e_max\"])\n\n if self.n_sigma_ul:\n table.meta[\"UL_CONF\"] = np.round(\n 1 - 2 * stats.norm.sf(self.n_sigma_ul), 7\n )\n\n if sed_type == \"likelihood\":\n table[\"ref_dnde\"] = self.dnde_ref[idx]\n table[\"ref_flux\"] = self.flux_ref[idx]\n table[\"ref_eflux\"] = self.eflux_ref[idx]\n\n for quantity in self.all_quantities(sed_type=sed_type):\n data = getattr(self, quantity, None)\n if data:\n table[quantity] = data.quantity[idx]\n\n if self.has_stat_profiles:\n norm_axis = self.stat_scan.geom.axes[\"norm\"]\n table[\"norm_scan\"] = norm_axis.center.reshape((1, -1))\n table[\"stat\"] = self.stat.data[idx]\n table[\"stat_scan\"] = self.stat_scan.data[idx]\n\n table[\"is_ul\"] = self.is_ul.data[idx]\n if not self.has_ul:\n table.remove_columns(\"is_ul\")\n\n elif format == \"lightcurve\":\n time_axis = self.geom.axes[\"time\"]\n\n tables = []\n for idx, (time_min, time_max) in enumerate(time_axis.iter_by_edges):\n table_flat = Table()\n table_flat[\"time_min\"] = [time_min.mjd]\n table_flat[\"time_max\"] = [time_max.mjd]\n\n fp = self.slice_by_idx(slices={\"time\": idx})\n table = fp.to_table(sed_type=sed_type, format=\"gadf-sed\")\n\n for column in table.columns:\n table_flat[column] = table[column][np.newaxis]\n\n tables.append(table_flat)\n\n table = vstack(tables)\n\n # serialize with reference time set to mjd=0.0\n ref_time = Time(0.0, format=\"mjd\", scale=time_axis.reference_time.scale)\n table.meta.update(time_ref_to_dict(ref_time, scale=ref_time.scale))\n table.meta[\"TIMEUNIT\"] = \"d\"\n\n elif format == \"binned-time-series\":\n message = (\n \"Format 'binned-time-series' support a single time axis \"\n f\"only. Got {self.geom.axes.names}\"\n )\n\n if not self.geom.axes.is_unidimensional:\n raise ValueError(message)\n\n axis = self.geom.axes.primary_axis\n\n if not isinstance(axis, TimeMapAxis):\n raise ValueError(message)\n\n table = Table()\n table[\"time_bin_start\"] = axis.time_min\n table[\"time_bin_size\"] = axis.time_delta\n\n for quantity in self.all_quantities(sed_type=sed_type):\n data = getattr(self, quantity, None)\n if data:\n table[quantity] = data.quantity.squeeze()\n elif format == \"profile\":\n x_axis = self.geom.axes[\"projected-distance\"]\n\n tables = []\n for idx, (x_min, x_max) in enumerate(x_axis.iter_by_edges):\n table_flat = Table()\n table_flat[\"x_min\"] = [x_min]\n table_flat[\"x_max\"] = [x_max]\n table_flat[\"x_ref\"] = [(x_max + x_min) / 2]\n\n fp = self.slice_by_idx(slices={\"projected-distance\": idx})\n table = fp.to_table(sed_type=sed_type, format=\"gadf-sed\")\n\n for column in table.columns:\n table_flat[column] = table[column][np.newaxis]\n\n tables.append(table_flat)\n\n table = vstack(tables)\n\n else:\n raise ValueError(f\"Not a supported format {format}\")\n\n if formatted:\n table = self._format_table(table=table)\n\n return table\n\n @staticmethod\n def _energy_ref_lafferty(model, energy_min, energy_max):\n \"\"\"Helper for `to_sed_type`.\n\n Compute energy_ref that the value at energy_ref corresponds\n to the mean value between energy_min and energy_max.\n \"\"\"\n flux = model.integral(energy_min, energy_max)\n dnde_mean = flux / (energy_max - energy_min)\n return model.inverse(dnde_mean)\n\n def _plot_get_flux_err(self, sed_type=None):\n \"\"\"Compute flux error for given SED type\"\"\"\n y_errn, y_errp = None, None\n\n if \"norm_err\" in self.available_quantities:\n # symmetric error\n y_errn = getattr(self, sed_type + \"_err\")\n y_errp = y_errn.copy()\n\n if \"norm_errp\" in self.available_quantities:\n y_errn = getattr(self, sed_type + \"_errn\")\n y_errp = getattr(self, sed_type + \"_errp\")\n\n return y_errn, y_errp\n\n def plot(self, ax=None, sed_type=None, energy_power=0, time_format=\"iso\", **kwargs):\n \"\"\"Plot flux points.\n\n Parameters\n ----------\n ax : `~matplotlib.axes.Axes`, optional\n Axis object to plot on. Default is None.\n sed_type : {\"dnde\", \"flux\", \"eflux\", \"e2dnde\"}, optional\n SED type. Default is None.\n energy_power : float, optional\n Power of energy to multiply flux axis with. Default is 0.\n time_format : {\"iso\", \"mjd\"}\n Used time format is a time axis is present. Default is \"iso\".\n **kwargs : dict, optional\n Keyword arguments passed to `~RegionNDMap.plot`.\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes`\n Axis object.\n \"\"\"\n if sed_type is None:\n sed_type = self.sed_type_plot_default\n\n if not self.norm.geom.is_region:\n raise ValueError(\"Plotting only supported for region based flux points\")\n\n if ax is None:\n ax = plt.gca()\n\n flux_unit = DEFAULT_UNIT[sed_type]\n\n flux = getattr(self, sed_type)\n\n # get errors and ul\n y_errn, y_errp = self._plot_get_flux_err(sed_type=sed_type)\n is_ul = self.is_ul.data\n\n if self.has_ul and y_errn and is_ul.any():\n flux_ul = getattr(self, sed_type + \"_ul\").quantity\n y_errn.data[is_ul] = np.clip(\n 0.5 * flux_ul[is_ul].to_value(y_errn.unit), 0, np.inf\n )\n y_errp.data[is_ul] = 0\n flux.data[is_ul] = flux_ul[is_ul].to_value(flux.unit)\n kwargs.setdefault(\"uplims\", is_ul)\n\n # set flux points plotting defaults\n if y_errp and y_errn:\n y_errp = np.clip(\n scale_plot_flux(y_errp, energy_power=energy_power).quantity, 0, np.inf\n )\n y_errn = np.clip(\n scale_plot_flux(y_errn, energy_power=energy_power).quantity, 0, np.inf\n )\n kwargs.setdefault(\"yerr\", (y_errn, y_errp))\n else:\n kwargs.setdefault(\"yerr\", None)\n\n flux = scale_plot_flux(flux=flux.to_unit(flux_unit), energy_power=energy_power)\n if \"time\" in flux.geom.axes_names:\n flux.geom.axes[\"time\"].time_format = time_format\n ax = flux.plot(ax=ax, **kwargs)\n ax.set_ylabel(f\"{sed_type} [{ax.yaxis.units.to_string(UNIT_STRING_FORMAT)}]\")\n ax.set_yscale(\"log\")\n return ax\n\n def plot_ts_profiles(\n self,\n ax=None,\n sed_type=None,\n add_cbar=True,\n **kwargs,\n ):\n \"\"\"Plot fit statistic SED profiles as a density plot.\n\n Parameters\n ----------\n ax : `~matplotlib.axes.Axes`, optional\n Axis object to plot on. Default is None.\n sed_type : {\"dnde\", \"flux\", \"eflux\", \"e2dnde\"}, optional\n SED type. Default is None.\n add_cbar : bool, optional\n Whether to add a colorbar to the plot. Default is True.\n **kwargs : dict, optional\n Keyword arguments passed to `~matplotlib.pyplot.pcolormesh`.\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes`\n Axis object.\n \"\"\"\n if ax is None:\n ax = plt.gca()\n\n if sed_type is None:\n sed_type = self.sed_type_plot_default\n\n if not self.norm.geom.is_region:\n raise ValueError(\"Plotting only supported for region based flux points\")\n\n if not self.geom.axes.is_unidimensional:\n raise ValueError(\n \"Profile plotting is only supported for unidimensional maps\"\n )\n\n axis = self.geom.axes.primary_axis\n\n if isinstance(axis, TimeMapAxis) and not axis.is_contiguous:\n axis = axis.to_contiguous()\n\n if ax.yaxis.units is None:\n yunits = DEFAULT_UNIT[sed_type]\n else:\n yunits = ax.yaxis.units\n\n ax.yaxis.set_units(yunits)\n\n flux_ref = getattr(self, sed_type + \"_ref\").to(yunits)\n\n ts = self.ts_scan\n\n norm_min, norm_max = ts.geom.axes[\"norm\"].bounds.to_value(\"\")\n\n flux = MapAxis.from_bounds(\n norm_min * flux_ref.value.min(),\n norm_max * flux_ref.value.max(),\n nbin=500,\n interp=axis.interp,\n unit=flux_ref.unit,\n )\n\n norm = flux.center / flux_ref.reshape((-1, 1))\n\n coords = ts.geom.get_coord()\n coords[\"norm\"] = norm\n coords[axis.name] = axis.center.reshape((-1, 1))\n\n z = ts.interp_by_coord(coords, values_scale=\"stat-profile\")\n\n kwargs.setdefault(\"vmax\", 0)\n kwargs.setdefault(\"vmin\", -4)\n kwargs.setdefault(\"zorder\", 0)\n kwargs.setdefault(\"cmap\", \"Blues\")\n kwargs.setdefault(\"linewidths\", 0)\n kwargs.setdefault(\"shading\", \"auto\")\n\n # clipped values are set to NaN so that they appear white on the plot\n z[-z < kwargs[\"vmin\"]] = np.nan\n\n with quantity_support():\n caxes = ax.pcolormesh(axis.as_plot_edges, flux.edges, -z.T, **kwargs)\n\n axis.format_plot_xaxis(ax=ax)\n\n ax.set_ylabel(f\"{sed_type} [{ax.yaxis.units.to_string(UNIT_STRING_FORMAT)}]\")\n ax.set_yscale(\"log\")\n\n if add_cbar:\n label = \"Fit statistic difference\"\n ax.figure.colorbar(caxes, ax=ax, label=label)\n\n return ax\n\n def recompute_ul(self, n_sigma_ul=2, **kwargs):\n \"\"\"Recompute upper limits corresponding to the given value.\n The pre-computed statistic profiles must exist for the re-computation.\n\n Parameters\n ----------\n n_sigma_ul : int\n Number of sigma to use for upper limit computation. Default is 2.\n **kwargs : dict, optional\n Keyword arguments passed to `~scipy.optimize.brentq`.\n\n Returns\n -------\n flux_points : `~gammapy.estimators.FluxPoints`\n A new FluxPoints object with modified upper limits.\n\n Examples\n --------\n >>> from gammapy.estimators import FluxPoints\n >>> filename = '$GAMMAPY_DATA/tests/spectrum/flux_points/binlike.fits'\n >>> flux_points = FluxPoints.read(filename)\n >>> flux_points_recomputed = flux_points.recompute_ul(n_sigma_ul=3)\n >>> print(flux_points.meta[\"n_sigma_ul\"], flux_points.flux_ul.data[0])\n 2.0 [[3.95451985e-09]]\n >>> print(flux_points_recomputed.meta[\"n_sigma_ul\"], flux_points_recomputed.flux_ul.data[0])\n 3 [[6.22245374e-09]]\n \"\"\"\n\n if not self.has_stat_profiles:\n raise ValueError(\n \"Stat profiles not present. Upper limit computation is not possible\"\n )\n\n delta_ts = n_sigma_ul**2\n\n flux_points = deepcopy(self)\n\n value_scan = self.stat_scan.geom.axes[\"norm\"].center\n shape_axes = self.stat_scan.geom._shape[slice(3, None)][::-1]\n for idx in np.ndindex(shape_axes):\n stat_scan = np.abs(\n self.stat_scan.data[idx].squeeze() - self.stat.data[idx].squeeze()\n )\n flux_points.norm_ul.data[idx] = stat_profile_ul_scipy(\n value_scan, stat_scan, delta_ts=delta_ts, **kwargs\n )\n flux_points.meta[\"n_sigma_ul\"] = n_sigma_ul\n return flux_points\n\n def resample_axis(self, axis_new):\n \"\"\"Rebin the flux point object along the new axis.\n The log-likelihoods profiles in each bin are summed\n to compute the resultant quantities.\n Stat profiles must be present on the fluxpoints object for\n this method to work.\n\n For now, works only for flat fluxpoints.\n\n Parameters\n ----------\n axis_new : `MapAxis` or `TimeMapAxis`\n The new axis to resample along\n\n Returns\n -------\n flux_points : `~gammapy.estimators.FluxPoints`\n A new FluxPoints object with modified axis.\n \"\"\"\n\n if not self.has_stat_profiles:\n raise ValueError(\"Stat profiles not present, rebinning is not possible\")\n\n fluxpoints = []\n for edge_min, edge_max in zip(axis_new.edges_min, axis_new.edges_max):\n if isinstance(axis_new, TimeMapAxis):\n edge_min = edge_min + axis_new.reference_time\n edge_max = edge_max + axis_new.reference_time\n fp = self.slice_by_coord({axis_new.name: slice(edge_min, edge_max)})\n fp_new = squash_fluxpoints(fp, axis_new)\n fluxpoints.append(fp_new)\n\n return self.__class__.from_stack(fluxpoints, axis=axis_new)\n",
"path": "gammapy/estimators/points/core.py"
}
] | diff --git a/gammapy/estimators/points/core.py b/gammapy/estimators/points/core.py
index 87dd6856c4..fb3aa0e7f5 100644
--- a/gammapy/estimators/points/core.py
+++ b/gammapy/estimators/points/core.py
@@ -267,7 +267,7 @@ def write(
table = self.to_table(sed_type=sed_type, format=format)
if ".fits" not in filename.suffixes:
- table.write(filename)
+ table.write(filename, overwrite=overwrite)
return
primary_hdu = fits.PrimaryHDU()
diff --git a/gammapy/estimators/points/tests/test_core.py b/gammapy/estimators/points/tests/test_core.py
index 867e35e17f..dc2e023f06 100644
--- a/gammapy/estimators/points/tests/test_core.py
+++ b/gammapy/estimators/points/tests/test_core.py
@@ -211,12 +211,14 @@ def test_write_fits(self, tmp_path, flux_points):
assert str(flux_points) == str(actual)
def test_write_ecsv(self, tmp_path, flux_points):
+ filename = tmp_path / "flux_points.ecsv"
+ filename.touch()
flux_points.write(
- tmp_path / "flux_points.ecsv",
+ filename,
sed_type=flux_points.sed_type_init,
overwrite=True,
)
- actual = FluxPoints.read(tmp_path / "flux_points.ecsv")
+ actual = FluxPoints.read(filename)
actual._data.pop("is_ul", None)
flux_points._data.pop("is_ul", None)
assert str(flux_points) == str(actual)
|
bookwyrm-social__bookwyrm-2387 | Internal Server Errors (e.g. on delete of user)
**Describe the bug**
Internal server error for some actions. I have set up a dockerless installation and am able to access the application and the admin pages. However, some actions create errors. For example:
**To Reproduce**
Steps to reproduce the behavior:
1. Clicking delete user after providing admin password. Browser shows internal server error. Error in application is:
```
Internal Server Error: /settings/reports/2/delete
Traceback (most recent call last):
File "/opt/bookwyrm/venv/lib/python3.10/site-packages/django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "/opt/bookwyrm/venv/lib/python3.10/site-packages/django/core/handlers/base.py", line 204, in _get_response
response = response.render()
File "/opt/bookwyrm/venv/lib/python3.10/site-packages/django/template/response.py", line 105, in render
self.content = self.rendered_content
File "/opt/bookwyrm/venv/lib/python3.10/site-packages/django/template/response.py", line 81, in rendered_content
template = self.resolve_template(self.template_name)
File "/opt/bookwyrm/venv/lib/python3.10/site-packages/django/template/response.py", line 65, in resolve_template
return get_template(template, using=self.using)
File "/opt/bookwyrm/venv/lib/python3.10/site-packages/django/template/loader.py", line 19, in get_template
raise TemplateDoesNotExist(template_name, chain=chain)
django.template.exceptions.TemplateDoesNotExist: user_admin/user.html
```
| [
{
"content": "\"\"\" moderation via flagged posts and users \"\"\"\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.core.paginator import Paginator\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.settings import PAGE_LENGTH\n\n\n# pylint: disable=no-self-use\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.moderate_user\", raise_exception=True),\n name=\"dispatch\",\n)\n@method_decorator(\n permission_required(\"bookwyrm.moderate_post\", raise_exception=True),\n name=\"dispatch\",\n)\nclass ReportsAdmin(View):\n \"\"\"list of reports\"\"\"\n\n def get(self, request):\n \"\"\"view current reports\"\"\"\n filters = {}\n\n resolved = request.GET.get(\"resolved\") == \"true\"\n server = request.GET.get(\"server\")\n if server:\n filters[\"user__federated_server__server_name\"] = server\n username = request.GET.get(\"username\")\n if username:\n filters[\"user__username__icontains\"] = username\n filters[\"resolved\"] = resolved\n\n reports = models.Report.objects.filter(**filters)\n paginated = Paginator(reports, PAGE_LENGTH)\n page = paginated.get_page(request.GET.get(\"page\"))\n data = {\n \"resolved\": resolved,\n \"server\": server,\n \"reports\": page,\n \"page_range\": paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n ),\n }\n return TemplateResponse(request, \"settings/reports/reports.html\", data)\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.moderate_user\", raise_exception=True),\n name=\"dispatch\",\n)\n@method_decorator(\n permission_required(\"bookwyrm.moderate_post\", raise_exception=True),\n name=\"dispatch\",\n)\nclass ReportAdmin(View):\n \"\"\"view a specific report\"\"\"\n\n def get(self, request, report_id):\n \"\"\"load a report\"\"\"\n data = {\n \"report\": get_object_or_404(models.Report, id=report_id),\n \"group_form\": forms.UserGroupForm(),\n }\n return TemplateResponse(request, \"settings/reports/report.html\", data)\n\n def post(self, request, report_id):\n \"\"\"comment on a report\"\"\"\n report = get_object_or_404(models.Report, id=report_id)\n models.ReportComment.objects.create(\n user=request.user,\n report=report,\n note=request.POST.get(\"note\"),\n )\n return redirect(\"settings-report\", report.id)\n\n\n@login_required\n@permission_required(\"bookwyrm.moderate_user\")\ndef suspend_user(_, user_id):\n \"\"\"mark an account as inactive\"\"\"\n user = get_object_or_404(models.User, id=user_id)\n user.is_active = False\n user.deactivation_reason = \"moderator_suspension\"\n # this isn't a full deletion, so we don't want to tell the world\n user.save(broadcast=False)\n return redirect(\"settings-user\", user.id)\n\n\n@login_required\n@permission_required(\"bookwyrm.moderate_user\")\ndef unsuspend_user(_, user_id):\n \"\"\"mark an account as inactive\"\"\"\n user = get_object_or_404(models.User, id=user_id)\n user.is_active = True\n user.deactivation_reason = None\n # this isn't a full deletion, so we don't want to tell the world\n user.save(broadcast=False)\n return redirect(\"settings-user\", user.id)\n\n\n@login_required\n@permission_required(\"bookwyrm.moderate_user\")\ndef moderator_delete_user(request, user_id):\n \"\"\"permanently delete a user\"\"\"\n user = get_object_or_404(models.User, id=user_id)\n\n # we can't delete users on other instances\n if not user.local:\n raise PermissionDenied()\n\n form = forms.DeleteUserForm(request.POST, instance=user)\n\n moderator = models.User.objects.get(id=request.user.id)\n # check the moderator's password\n if form.is_valid() and moderator.check_password(form.cleaned_data[\"password\"]):\n user.deactivation_reason = \"moderator_deletion\"\n user.delete()\n return redirect(\"settings-user\", user.id)\n\n form.errors[\"password\"] = [\"Invalid password\"]\n\n data = {\"user\": user, \"group_form\": forms.UserGroupForm(), \"form\": form}\n return TemplateResponse(request, \"user_admin/user.html\", data)\n\n\n@login_required\n@permission_required(\"bookwyrm.moderate_post\")\ndef resolve_report(_, report_id):\n \"\"\"mark a report as (un)resolved\"\"\"\n report = get_object_or_404(models.Report, id=report_id)\n report.resolved = not report.resolved\n report.save()\n if not report.resolved:\n return redirect(\"settings-report\", report.id)\n return redirect(\"settings-reports\")\n",
"path": "bookwyrm/views/admin/reports.py"
}
] | [
{
"content": "\"\"\" moderation via flagged posts and users \"\"\"\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.core.paginator import Paginator\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.settings import PAGE_LENGTH\n\n\n# pylint: disable=no-self-use\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.moderate_user\", raise_exception=True),\n name=\"dispatch\",\n)\n@method_decorator(\n permission_required(\"bookwyrm.moderate_post\", raise_exception=True),\n name=\"dispatch\",\n)\nclass ReportsAdmin(View):\n \"\"\"list of reports\"\"\"\n\n def get(self, request):\n \"\"\"view current reports\"\"\"\n filters = {}\n\n resolved = request.GET.get(\"resolved\") == \"true\"\n server = request.GET.get(\"server\")\n if server:\n filters[\"user__federated_server__server_name\"] = server\n username = request.GET.get(\"username\")\n if username:\n filters[\"user__username__icontains\"] = username\n filters[\"resolved\"] = resolved\n\n reports = models.Report.objects.filter(**filters)\n paginated = Paginator(reports, PAGE_LENGTH)\n page = paginated.get_page(request.GET.get(\"page\"))\n data = {\n \"resolved\": resolved,\n \"server\": server,\n \"reports\": page,\n \"page_range\": paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n ),\n }\n return TemplateResponse(request, \"settings/reports/reports.html\", data)\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.moderate_user\", raise_exception=True),\n name=\"dispatch\",\n)\n@method_decorator(\n permission_required(\"bookwyrm.moderate_post\", raise_exception=True),\n name=\"dispatch\",\n)\nclass ReportAdmin(View):\n \"\"\"view a specific report\"\"\"\n\n def get(self, request, report_id):\n \"\"\"load a report\"\"\"\n data = {\n \"report\": get_object_or_404(models.Report, id=report_id),\n \"group_form\": forms.UserGroupForm(),\n }\n return TemplateResponse(request, \"settings/reports/report.html\", data)\n\n def post(self, request, report_id):\n \"\"\"comment on a report\"\"\"\n report = get_object_or_404(models.Report, id=report_id)\n models.ReportComment.objects.create(\n user=request.user,\n report=report,\n note=request.POST.get(\"note\"),\n )\n return redirect(\"settings-report\", report.id)\n\n\n@login_required\n@permission_required(\"bookwyrm.moderate_user\")\ndef suspend_user(_, user_id):\n \"\"\"mark an account as inactive\"\"\"\n user = get_object_or_404(models.User, id=user_id)\n user.is_active = False\n user.deactivation_reason = \"moderator_suspension\"\n # this isn't a full deletion, so we don't want to tell the world\n user.save(broadcast=False)\n return redirect(\"settings-user\", user.id)\n\n\n@login_required\n@permission_required(\"bookwyrm.moderate_user\")\ndef unsuspend_user(_, user_id):\n \"\"\"mark an account as inactive\"\"\"\n user = get_object_or_404(models.User, id=user_id)\n user.is_active = True\n user.deactivation_reason = None\n # this isn't a full deletion, so we don't want to tell the world\n user.save(broadcast=False)\n return redirect(\"settings-user\", user.id)\n\n\n@login_required\n@permission_required(\"bookwyrm.moderate_user\")\ndef moderator_delete_user(request, user_id):\n \"\"\"permanently delete a user\"\"\"\n user = get_object_or_404(models.User, id=user_id)\n\n # we can't delete users on other instances\n if not user.local:\n raise PermissionDenied()\n\n form = forms.DeleteUserForm(request.POST, instance=user)\n\n moderator = models.User.objects.get(id=request.user.id)\n # check the moderator's password\n if form.is_valid() and moderator.check_password(form.cleaned_data[\"password\"]):\n user.deactivation_reason = \"moderator_deletion\"\n user.delete()\n return redirect(\"settings-user\", user.id)\n\n form.errors[\"password\"] = [\"Invalid password\"]\n\n data = {\"user\": user, \"group_form\": forms.UserGroupForm(), \"form\": form}\n return TemplateResponse(request, \"settings/users/user.html\", data)\n\n\n@login_required\n@permission_required(\"bookwyrm.moderate_post\")\ndef resolve_report(_, report_id):\n \"\"\"mark a report as (un)resolved\"\"\"\n report = get_object_or_404(models.Report, id=report_id)\n report.resolved = not report.resolved\n report.save()\n if not report.resolved:\n return redirect(\"settings-report\", report.id)\n return redirect(\"settings-reports\")\n",
"path": "bookwyrm/views/admin/reports.py"
}
] | diff --git a/bookwyrm/tests/views/admin/test_reports.py b/bookwyrm/tests/views/admin/test_reports.py
index e93b343413..6b31175d8e 100644
--- a/bookwyrm/tests/views/admin/test_reports.py
+++ b/bookwyrm/tests/views/admin/test_reports.py
@@ -15,6 +15,7 @@
class ReportViews(TestCase):
"""every response to a get request, html or json"""
+ # pylint: disable=invalid-name
def setUp(self):
"""we need basic test data and mocks"""
self.factory = RequestFactory()
@@ -147,3 +148,16 @@ def test_delete_user(self, *_):
self.rat.refresh_from_db()
self.assertFalse(self.rat.is_active)
self.assertEqual(self.rat.deactivation_reason, "moderator_deletion")
+
+ def test_delete_user_error(self, *_):
+ """toggle whether a user is able to log in"""
+ self.assertTrue(self.rat.is_active)
+ request = self.factory.post("", {"password": "wrong password"})
+ request.user = self.local_user
+
+ result = views.moderator_delete_user(request, self.rat.id)
+ self.assertIsInstance(result, TemplateResponse)
+ validate_html(result.render())
+
+ self.rat.refresh_from_db()
+ self.assertTrue(self.rat.is_active)
diff --git a/bookwyrm/views/admin/reports.py b/bookwyrm/views/admin/reports.py
index a0b222ebe4..cf91299d97 100644
--- a/bookwyrm/views/admin/reports.py
+++ b/bookwyrm/views/admin/reports.py
@@ -128,7 +128,7 @@ def moderator_delete_user(request, user_id):
form.errors["password"] = ["Invalid password"]
data = {"user": user, "group_form": forms.UserGroupForm(), "form": form}
- return TemplateResponse(request, "user_admin/user.html", data)
+ return TemplateResponse(request, "settings/users/user.html", data)
@login_required
|
openai__gym-1708 | Bug in PixelObservationWrapper
Error log
```
env = PixelObservationWrapper(env, pixels_only=True)
File "/home/tsan/Desktop/gym/gym/wrappers/pixel_observation.py", line 89, in __init__
pixels = self.env.render(**render_kwargs)
File "/home/tsan/Desktop/gym/gym/core.py", line 233, in render
return self.env.render(mode, **kwargs)
TypeError: render() got an unexpected keyword argument 'pixels'
```
Can be reproduced by running
```
import gym
from gym.wrappers.pixel_observation import PixelObservationWrapper # pylint: disable=E0401
env = gym.make('Acrobot-v1')
env.reset()
env = PixelObservationWrapper(env, pixels_only=True)
env.step(0)
```
| [
{
"content": "\"\"\"An observation wrapper that augments observations by pixel values.\"\"\"\n\nimport collections\nimport copy\n\nimport numpy as np\n\nfrom gym import spaces\nfrom gym import ObservationWrapper\n\nSTATE_KEY = 'state'\n\n\nclass PixelObservationWrapper(ObservationWrapper):\n \"\"\"Augment observations by pixel values.\"\"\"\n\n def __init__(self,\n env,\n pixels_only=True,\n render_kwargs=None,\n pixel_keys=('pixels', )):\n \"\"\"Initializes a new pixel Wrapper.\n\n Args:\n env: The environment to wrap.\n pixels_only: If `True` (default), the original observation returned\n by the wrapped environment will be discarded, and a dictionary\n observation will only include pixels. If `False`, the\n observation dictionary will contain both the original\n observations and the pixel observations.\n render_kwargs: Optional `dict` containing keyword arguments passed\n to the `self.render` method.\n pixel_keys: Optional custom string specifying the pixel\n observation's key in the `OrderedDict` of observations.\n Defaults to 'pixels'.\n\n Raises:\n ValueError: If `env`'s observation spec is not compatible with the\n wrapper. Supported formats are a single array, or a dict of\n arrays.\n ValueError: If `env`'s observation already contains any of the\n specified `pixel_keys`.\n \"\"\"\n\n super(PixelObservationWrapper, self).__init__(env)\n\n if render_kwargs is None:\n render_kwargs = {}\n\n for key in pixel_keys:\n render_kwargs.setdefault(key, {})\n\n render_mode = render_kwargs[key].pop('mode', 'rgb_array')\n assert render_mode == 'rgb_array', render_mode\n render_kwargs[key]['mode'] = 'rgb_array'\n\n wrapped_observation_space = env.observation_space\n\n if isinstance(wrapped_observation_space, spaces.Box):\n self._observation_is_dict = False\n invalid_keys = set([STATE_KEY])\n elif isinstance(wrapped_observation_space,\n (spaces.Dict, collections.MutableMapping)):\n self._observation_is_dict = True\n invalid_keys = set(wrapped_observation_space.spaces.keys())\n else:\n raise ValueError(\"Unsupported observation space structure.\")\n\n if not pixels_only:\n # Make sure that now keys in the `pixel_keys` overlap with\n # `observation_keys`\n overlapping_keys = set(pixel_keys) & set(invalid_keys)\n if overlapping_keys:\n raise ValueError(\"Duplicate or reserved pixel keys {!r}.\"\n .format(overlapping_keys))\n\n if pixels_only:\n self.observation_space = spaces.Dict()\n elif self._observation_is_dict:\n self.observation_space = copy.deepcopy(wrapped_observation_space)\n else:\n self.observation_space = spaces.Dict()\n self.observation_space.spaces[STATE_KEY] = wrapped_observation_space\n\n # Extend observation space with pixels.\n\n pixels_spaces = {}\n for pixel_key in pixel_keys:\n pixels = self.env.render(**render_kwargs)\n\n if np.issubdtype(pixels.dtype, np.integer):\n low, high = (0, 255)\n elif np.issubdtype(pixels.dtype, np.float):\n low, high = (-float('inf'), float('inf'))\n else:\n raise TypeError(pixels.dtype)\n\n pixels_space = spaces.Box(\n shape=pixels.shape, low=low, high=high, dtype=pixels.dtype)\n pixels_spaces[pixel_key] = pixels_space\n\n self.observation_space.spaces.update(pixels_spaces)\n\n self._env = env\n self._pixels_only = pixels_only\n self._render_kwargs = render_kwargs\n self._pixel_keys = pixel_keys\n\n def observation(self, observation):\n pixel_observation = self._add_pixel_observation(observation)\n return pixel_observation\n\n def _add_pixel_observation(self, observation):\n if self._pixels_only:\n observation = collections.OrderedDict()\n elif self._observation_is_dict:\n observation = type(observation)(observation)\n else:\n observation = collections.OrderedDict()\n observation[STATE_KEY] = observation\n\n pixel_observations = {\n pixel_key: self.env.render(**self._render_kwargs[pixel_key])\n for pixel_key in self._pixel_keys\n }\n\n observation.update(pixel_observations)\n\n return observation\n",
"path": "gym/wrappers/pixel_observation.py"
}
] | [
{
"content": "\"\"\"An observation wrapper that augments observations by pixel values.\"\"\"\n\nimport collections\nimport copy\n\nimport numpy as np\n\nfrom gym import spaces\nfrom gym import ObservationWrapper\n\nSTATE_KEY = 'state'\n\n\nclass PixelObservationWrapper(ObservationWrapper):\n \"\"\"Augment observations by pixel values.\"\"\"\n\n def __init__(self,\n env,\n pixels_only=True,\n render_kwargs=None,\n pixel_keys=('pixels', )):\n \"\"\"Initializes a new pixel Wrapper.\n\n Args:\n env: The environment to wrap.\n pixels_only: If `True` (default), the original observation returned\n by the wrapped environment will be discarded, and a dictionary\n observation will only include pixels. If `False`, the\n observation dictionary will contain both the original\n observations and the pixel observations.\n render_kwargs: Optional `dict` containing keyword arguments passed\n to the `self.render` method.\n pixel_keys: Optional custom string specifying the pixel\n observation's key in the `OrderedDict` of observations.\n Defaults to 'pixels'.\n\n Raises:\n ValueError: If `env`'s observation spec is not compatible with the\n wrapper. Supported formats are a single array, or a dict of\n arrays.\n ValueError: If `env`'s observation already contains any of the\n specified `pixel_keys`.\n \"\"\"\n\n super(PixelObservationWrapper, self).__init__(env)\n\n if render_kwargs is None:\n render_kwargs = {}\n\n for key in pixel_keys:\n render_kwargs.setdefault(key, {})\n\n render_mode = render_kwargs[key].pop('mode', 'rgb_array')\n assert render_mode == 'rgb_array', render_mode\n render_kwargs[key]['mode'] = 'rgb_array'\n\n wrapped_observation_space = env.observation_space\n\n if isinstance(wrapped_observation_space, spaces.Box):\n self._observation_is_dict = False\n invalid_keys = set([STATE_KEY])\n elif isinstance(wrapped_observation_space,\n (spaces.Dict, collections.MutableMapping)):\n self._observation_is_dict = True\n invalid_keys = set(wrapped_observation_space.spaces.keys())\n else:\n raise ValueError(\"Unsupported observation space structure.\")\n\n if not pixels_only:\n # Make sure that now keys in the `pixel_keys` overlap with\n # `observation_keys`\n overlapping_keys = set(pixel_keys) & set(invalid_keys)\n if overlapping_keys:\n raise ValueError(\"Duplicate or reserved pixel keys {!r}.\"\n .format(overlapping_keys))\n\n if pixels_only:\n self.observation_space = spaces.Dict()\n elif self._observation_is_dict:\n self.observation_space = copy.deepcopy(wrapped_observation_space)\n else:\n self.observation_space = spaces.Dict()\n self.observation_space.spaces[STATE_KEY] = wrapped_observation_space\n\n # Extend observation space with pixels.\n\n pixels_spaces = {}\n for pixel_key in pixel_keys:\n pixels = self.env.render(**render_kwargs[pixel_key])\n\n if np.issubdtype(pixels.dtype, np.integer):\n low, high = (0, 255)\n elif np.issubdtype(pixels.dtype, np.float):\n low, high = (-float('inf'), float('inf'))\n else:\n raise TypeError(pixels.dtype)\n\n pixels_space = spaces.Box(\n shape=pixels.shape, low=low, high=high, dtype=pixels.dtype)\n pixels_spaces[pixel_key] = pixels_space\n\n self.observation_space.spaces.update(pixels_spaces)\n\n self._env = env\n self._pixels_only = pixels_only\n self._render_kwargs = render_kwargs\n self._pixel_keys = pixel_keys\n\n def observation(self, observation):\n pixel_observation = self._add_pixel_observation(observation)\n return pixel_observation\n\n def _add_pixel_observation(self, observation):\n if self._pixels_only:\n observation = collections.OrderedDict()\n elif self._observation_is_dict:\n observation = type(observation)(observation)\n else:\n observation = collections.OrderedDict()\n observation[STATE_KEY] = observation\n\n pixel_observations = {\n pixel_key: self.env.render(**self._render_kwargs[pixel_key])\n for pixel_key in self._pixel_keys\n }\n\n observation.update(pixel_observations)\n\n return observation\n",
"path": "gym/wrappers/pixel_observation.py"
}
] | diff --git a/gym/wrappers/pixel_observation.py b/gym/wrappers/pixel_observation.py
index 1c282771dbb..7641cbf84f9 100644
--- a/gym/wrappers/pixel_observation.py
+++ b/gym/wrappers/pixel_observation.py
@@ -86,7 +86,7 @@ def __init__(self,
pixels_spaces = {}
for pixel_key in pixel_keys:
- pixels = self.env.render(**render_kwargs)
+ pixels = self.env.render(**render_kwargs[pixel_key])
if np.issubdtype(pixels.dtype, np.integer):
low, high = (0, 255)
|
Parsl__parsl-613 | TorqueProvider fails on NSCC
The following patch is required in order to run the `TorqueProvider` on NSCC:
```
[nscc04] ~/libsubmit >git diff
diff --git a/libsubmit/providers/torque/template.py b/libsubmit/providers/torque/template.py
index a00ce7c..056c648 100644
--- a/libsubmit/providers/torque/template.py
+++ b/libsubmit/providers/torque/template.py
@@ -8,7 +8,6 @@ template_string = '''#!/bin/bash
#PBS -l nodes=${nodes_per_block}:ppn=${tasks_per_node}
#PBS -o ${submit_script_dir}/${jobname}.submit.stdout
#PBS -e ${submit_script_dir}/${jobname}.submit.stderr
-#PBS -v WORKER_LOGGING_LEVEL
${overrides}
export JOBNAME="${jobname}"
```
Otherwise, the job fails with `qsub: cannot send environment with the job`. Could we just merge the patch, or should we make this configurable somehow?
| [
{
"content": "template_string = '''#!/bin/bash\n\n#PBS -S /bin/bash\n#PBS -N ${jobname}\n#PBS -m n\n#PBS -k eo\n#PBS -l walltime=$walltime\n#PBS -l nodes=${nodes_per_block}:ppn=${tasks_per_node}\n#PBS -o ${submit_script_dir}/${jobname}.submit.stdout\n#PBS -e ${submit_script_dir}/${jobname}.submit.stderr\n#PBS -v WORKER_LOGGING_LEVEL\n${overrides}\n\nexport JOBNAME=\"${jobname}\"\n\n${user_script}\n\n'''\n",
"path": "parsl/providers/torque/template.py"
}
] | [
{
"content": "template_string = '''#!/bin/bash\n\n#PBS -S /bin/bash\n#PBS -N ${jobname}\n#PBS -m n\n#PBS -k eo\n#PBS -l walltime=$walltime\n#PBS -l nodes=${nodes_per_block}:ppn=${tasks_per_node}\n#PBS -o ${submit_script_dir}/${jobname}.submit.stdout\n#PBS -e ${submit_script_dir}/${jobname}.submit.stderr\n${overrides}\n\nexport JOBNAME=\"${jobname}\"\n\n${user_script}\n\n'''\n",
"path": "parsl/providers/torque/template.py"
}
] | diff --git a/parsl/providers/torque/template.py b/parsl/providers/torque/template.py
index a00ce7c096..056c6485d2 100644
--- a/parsl/providers/torque/template.py
+++ b/parsl/providers/torque/template.py
@@ -8,7 +8,6 @@
#PBS -l nodes=${nodes_per_block}:ppn=${tasks_per_node}
#PBS -o ${submit_script_dir}/${jobname}.submit.stdout
#PBS -e ${submit_script_dir}/${jobname}.submit.stderr
-#PBS -v WORKER_LOGGING_LEVEL
${overrides}
export JOBNAME="${jobname}"
|
googleapis__python-bigquery-66 | Bigquery: Model reference repr seems wrong for model_id
For `ModelReference`'s repr use project_id against model_id
https://github.com/googleapis/python-bigquery/blob/be5c8b1ede9a2d762fd5574c32587d125eca4713/google/cloud/bigquery/model.py#L432-L435
| [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Define resources for the BigQuery ML Models API.\"\"\"\n\nimport copy\n\nfrom google.protobuf import json_format\nimport six\n\nimport google.cloud._helpers\nfrom google.api_core import datetime_helpers\nfrom google.cloud.bigquery import _helpers\nfrom google.cloud.bigquery_v2 import types\nfrom google.cloud.bigquery.encryption_configuration import EncryptionConfiguration\n\n\nclass Model(object):\n \"\"\"Model represents a machine learning model resource.\n\n See\n https://cloud.google.com/bigquery/docs/reference/rest/v2/models\n\n Args:\n model_ref (Union[google.cloud.bigquery.model.ModelReference, str]):\n A pointer to a model. If ``model_ref`` is a string, it must\n included a project ID, dataset ID, and model ID, each separated\n by ``.``.\n \"\"\"\n\n _PROPERTY_TO_API_FIELD = {\n \"expires\": \"expirationTime\",\n \"friendly_name\": \"friendlyName\",\n # Even though it's not necessary for field mapping to map when the\n # property name equals the resource name, we add these here so that we\n # have an exhaustive list of all mutable properties.\n \"labels\": \"labels\",\n \"description\": \"description\",\n \"encryption_configuration\": \"encryptionConfiguration\",\n }\n\n def __init__(self, model_ref):\n # Use _proto on read-only properties to use it's built-in type\n # conversion.\n self._proto = types.Model()\n\n # Use _properties on read-write properties to match the REST API\n # semantics. The BigQuery API makes a distinction between an unset\n # value, a null value, and a default value (0 or \"\"), but the protocol\n # buffer classes do not.\n self._properties = {}\n\n if isinstance(model_ref, six.string_types):\n model_ref = ModelReference.from_string(model_ref)\n\n if model_ref:\n self._proto.model_reference.CopyFrom(model_ref._proto)\n\n @property\n def reference(self):\n \"\"\"A :class:`~google.cloud.bigquery.model.ModelReference` pointing to\n this model.\n\n Read-only.\n\n Returns:\n google.cloud.bigquery.model.ModelReference: pointer to this model.\n \"\"\"\n ref = ModelReference()\n ref._proto = self._proto.model_reference\n return ref\n\n @property\n def project(self):\n \"\"\"str: Project bound to the model\"\"\"\n return self.reference.project\n\n @property\n def dataset_id(self):\n \"\"\"str: ID of dataset containing the model.\"\"\"\n return self.reference.dataset_id\n\n @property\n def model_id(self):\n \"\"\"str: The model ID.\"\"\"\n return self.reference.model_id\n\n @property\n def path(self):\n \"\"\"str: URL path for the model's APIs.\"\"\"\n return self.reference.path\n\n @property\n def location(self):\n \"\"\"str: The geographic location where the model resides. This value\n is inherited from the dataset.\n\n Read-only.\n \"\"\"\n return self._proto.location\n\n @property\n def etag(self):\n \"\"\"str: ETag for the model resource (:data:`None` until\n set from the server).\n\n Read-only.\n \"\"\"\n return self._proto.etag\n\n @property\n def created(self):\n \"\"\"Union[datetime.datetime, None]: Datetime at which the model was\n created (:data:`None` until set from the server).\n\n Read-only.\n \"\"\"\n value = self._proto.creation_time\n if value is not None and value != 0:\n # value will be in milliseconds.\n return google.cloud._helpers._datetime_from_microseconds(\n 1000.0 * float(value)\n )\n\n @property\n def modified(self):\n \"\"\"Union[datetime.datetime, None]: Datetime at which the model was last\n modified (:data:`None` until set from the server).\n\n Read-only.\n \"\"\"\n value = self._proto.last_modified_time\n if value is not None and value != 0:\n # value will be in milliseconds.\n return google.cloud._helpers._datetime_from_microseconds(\n 1000.0 * float(value)\n )\n\n @property\n def model_type(self):\n \"\"\"google.cloud.bigquery_v2.gapic.enums.Model.ModelType: Type of the\n model resource.\n\n Read-only.\n\n The value is one of elements of the\n :class:`~google.cloud.bigquery_v2.gapic.enums.Model.ModelType`\n enumeration.\n \"\"\"\n return self._proto.model_type\n\n @property\n def training_runs(self):\n \"\"\"Sequence[google.cloud.bigquery_v2.types.Model.TrainingRun]: Information\n for all training runs in increasing order of start time.\n\n Read-only.\n\n An iterable of :class:`~google.cloud.bigquery_v2.types.Model.TrainingRun`.\n \"\"\"\n return self._proto.training_runs\n\n @property\n def feature_columns(self):\n \"\"\"Sequence[google.cloud.bigquery_v2.types.StandardSqlField]: Input\n feature columns that were used to train this model.\n\n Read-only.\n\n An iterable of :class:`~google.cloud.bigquery_v2.types.StandardSqlField`.\n \"\"\"\n return self._proto.feature_columns\n\n @property\n def label_columns(self):\n \"\"\"Sequence[google.cloud.bigquery_v2.types.StandardSqlField]: Label\n columns that were used to train this model. The output of the model\n will have a ``predicted_`` prefix to these columns.\n\n Read-only.\n\n An iterable of :class:`~google.cloud.bigquery_v2.types.StandardSqlField`.\n \"\"\"\n return self._proto.label_columns\n\n @property\n def expires(self):\n \"\"\"Union[datetime.datetime, None]: The datetime when this model\n expires. If not present, the model will persist indefinitely. Expired\n models will be deleted and their storage reclaimed.\n \"\"\"\n value = self._properties.get(\"expirationTime\")\n if value is not None:\n # value will be in milliseconds.\n return google.cloud._helpers._datetime_from_microseconds(\n 1000.0 * float(value)\n )\n\n @expires.setter\n def expires(self, value):\n if value is not None:\n value = str(google.cloud._helpers._millis_from_datetime(value))\n self._properties[\"expirationTime\"] = value\n\n @property\n def description(self):\n \"\"\"Optional[str]: Description of the model (defaults to\n :data:`None`).\n \"\"\"\n return self._properties.get(\"description\")\n\n @description.setter\n def description(self, value):\n self._properties[\"description\"] = value\n\n @property\n def friendly_name(self):\n \"\"\"Union[str, None]: Title of the table (defaults to :data:`None`).\n\n Raises:\n ValueError: For invalid value types.\n \"\"\"\n return self._properties.get(\"friendlyName\")\n\n @friendly_name.setter\n def friendly_name(self, value):\n self._properties[\"friendlyName\"] = value\n\n @property\n def labels(self):\n \"\"\"Dict[str, str]: Labels for the table.\n\n This method always returns a dict. To change a model's labels,\n modify the dict, then call ``Client.update_model``. To delete a\n label, set its value to :data:`None` before updating.\n \"\"\"\n return self._properties.setdefault(\"labels\", {})\n\n @labels.setter\n def labels(self, value):\n if value is None:\n value = {}\n self._properties[\"labels\"] = value\n\n @property\n def encryption_configuration(self):\n \"\"\"google.cloud.bigquery.encryption_configuration.EncryptionConfiguration: Custom\n encryption configuration for the model.\n\n Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`\n if using default encryption.\n\n See `protecting data with Cloud KMS keys\n <https://cloud.google.com/bigquery/docs/customer-managed-encryption>`_\n in the BigQuery documentation.\n \"\"\"\n prop = self._properties.get(\"encryptionConfiguration\")\n if prop:\n prop = EncryptionConfiguration.from_api_repr(prop)\n return prop\n\n @encryption_configuration.setter\n def encryption_configuration(self, value):\n api_repr = value\n if value:\n api_repr = value.to_api_repr()\n self._properties[\"encryptionConfiguration\"] = api_repr\n\n @classmethod\n def from_api_repr(cls, resource):\n \"\"\"Factory: construct a model resource given its API representation\n\n Args:\n resource (Dict[str, object]):\n Model resource representation from the API\n\n Returns:\n google.cloud.bigquery.model.Model: Model parsed from ``resource``.\n \"\"\"\n this = cls(None)\n # Keep a reference to the resource as a workaround to find unknown\n # field values.\n this._properties = resource\n\n # Convert from millis-from-epoch to timestamp well-known type.\n # TODO: Remove this hack once CL 238585470 hits prod.\n resource = copy.deepcopy(resource)\n for training_run in resource.get(\"trainingRuns\", ()):\n start_time = training_run.get(\"startTime\")\n if not start_time or \"-\" in start_time: # Already right format?\n continue\n start_time = datetime_helpers.from_microseconds(1e3 * float(start_time))\n training_run[\"startTime\"] = datetime_helpers.to_rfc3339(start_time)\n\n this._proto = json_format.ParseDict(\n resource, types.Model(), ignore_unknown_fields=True\n )\n return this\n\n def _build_resource(self, filter_fields):\n \"\"\"Generate a resource for ``update``.\"\"\"\n return _helpers._build_resource_from_properties(self, filter_fields)\n\n def __repr__(self):\n return \"Model(reference={})\".format(repr(self.reference))\n\n\nclass ModelReference(object):\n \"\"\"ModelReferences are pointers to models.\n\n See\n https://cloud.google.com/bigquery/docs/reference/rest/v2/models#modelreference\n \"\"\"\n\n def __init__(self):\n self._proto = types.ModelReference()\n self._properties = {}\n\n @property\n def project(self):\n \"\"\"str: Project bound to the model\"\"\"\n return self._proto.project_id\n\n @property\n def dataset_id(self):\n \"\"\"str: ID of dataset containing the model.\"\"\"\n return self._proto.dataset_id\n\n @property\n def model_id(self):\n \"\"\"str: The model ID.\"\"\"\n return self._proto.model_id\n\n @property\n def path(self):\n \"\"\"str: URL path for the model's APIs.\"\"\"\n return \"/projects/%s/datasets/%s/models/%s\" % (\n self._proto.project_id,\n self._proto.dataset_id,\n self._proto.model_id,\n )\n\n @classmethod\n def from_api_repr(cls, resource):\n \"\"\"Factory: construct a model reference given its API representation\n\n Args:\n resource (Dict[str, object]):\n Model reference representation returned from the API\n\n Returns:\n google.cloud.bigquery.model.ModelReference:\n Model reference parsed from ``resource``.\n \"\"\"\n ref = cls()\n # Keep a reference to the resource as a workaround to find unknown\n # field values.\n ref._properties = resource\n ref._proto = json_format.ParseDict(\n resource, types.ModelReference(), ignore_unknown_fields=True\n )\n return ref\n\n @classmethod\n def from_string(cls, model_id, default_project=None):\n \"\"\"Construct a model reference from model ID string.\n\n Args:\n model_id (str):\n A model ID in standard SQL format. If ``default_project``\n is not specified, this must included a project ID, dataset\n ID, and model ID, each separated by ``.``.\n default_project (str):\n Optional. The project ID to use when ``model_id`` does not\n include a project ID.\n\n Returns:\n google.cloud.bigquery.model.ModelReference:\n Model reference parsed from ``model_id``.\n\n Raises:\n ValueError:\n If ``model_id`` is not a fully-qualified table ID in\n standard SQL format.\n \"\"\"\n proj, dset, model = _helpers._parse_3_part_id(\n model_id, default_project=default_project, property_name=\"model_id\"\n )\n return cls.from_api_repr(\n {\"projectId\": proj, \"datasetId\": dset, \"modelId\": model}\n )\n\n def to_api_repr(self):\n \"\"\"Construct the API resource representation of this model reference.\n\n Returns:\n Dict[str, object]: Model reference represented as an API resource\n \"\"\"\n return json_format.MessageToDict(self._proto)\n\n def _key(self):\n \"\"\"Unique key for this model.\n\n This is used for hashing a ModelReference.\n \"\"\"\n return self.project, self.dataset_id, self.model_id\n\n def __eq__(self, other):\n if not isinstance(other, ModelReference):\n return NotImplemented\n return self._proto == other._proto\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash(self._key())\n\n def __repr__(self):\n return \"ModelReference(project='{}', dataset_id='{}', project_id='{}')\".format(\n self.project, self.dataset_id, self.model_id\n )\n",
"path": "google/cloud/bigquery/model.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Define resources for the BigQuery ML Models API.\"\"\"\n\nimport copy\n\nfrom google.protobuf import json_format\nimport six\n\nimport google.cloud._helpers\nfrom google.api_core import datetime_helpers\nfrom google.cloud.bigquery import _helpers\nfrom google.cloud.bigquery_v2 import types\nfrom google.cloud.bigquery.encryption_configuration import EncryptionConfiguration\n\n\nclass Model(object):\n \"\"\"Model represents a machine learning model resource.\n\n See\n https://cloud.google.com/bigquery/docs/reference/rest/v2/models\n\n Args:\n model_ref (Union[google.cloud.bigquery.model.ModelReference, str]):\n A pointer to a model. If ``model_ref`` is a string, it must\n included a project ID, dataset ID, and model ID, each separated\n by ``.``.\n \"\"\"\n\n _PROPERTY_TO_API_FIELD = {\n \"expires\": \"expirationTime\",\n \"friendly_name\": \"friendlyName\",\n # Even though it's not necessary for field mapping to map when the\n # property name equals the resource name, we add these here so that we\n # have an exhaustive list of all mutable properties.\n \"labels\": \"labels\",\n \"description\": \"description\",\n \"encryption_configuration\": \"encryptionConfiguration\",\n }\n\n def __init__(self, model_ref):\n # Use _proto on read-only properties to use it's built-in type\n # conversion.\n self._proto = types.Model()\n\n # Use _properties on read-write properties to match the REST API\n # semantics. The BigQuery API makes a distinction between an unset\n # value, a null value, and a default value (0 or \"\"), but the protocol\n # buffer classes do not.\n self._properties = {}\n\n if isinstance(model_ref, six.string_types):\n model_ref = ModelReference.from_string(model_ref)\n\n if model_ref:\n self._proto.model_reference.CopyFrom(model_ref._proto)\n\n @property\n def reference(self):\n \"\"\"A :class:`~google.cloud.bigquery.model.ModelReference` pointing to\n this model.\n\n Read-only.\n\n Returns:\n google.cloud.bigquery.model.ModelReference: pointer to this model.\n \"\"\"\n ref = ModelReference()\n ref._proto = self._proto.model_reference\n return ref\n\n @property\n def project(self):\n \"\"\"str: Project bound to the model\"\"\"\n return self.reference.project\n\n @property\n def dataset_id(self):\n \"\"\"str: ID of dataset containing the model.\"\"\"\n return self.reference.dataset_id\n\n @property\n def model_id(self):\n \"\"\"str: The model ID.\"\"\"\n return self.reference.model_id\n\n @property\n def path(self):\n \"\"\"str: URL path for the model's APIs.\"\"\"\n return self.reference.path\n\n @property\n def location(self):\n \"\"\"str: The geographic location where the model resides. This value\n is inherited from the dataset.\n\n Read-only.\n \"\"\"\n return self._proto.location\n\n @property\n def etag(self):\n \"\"\"str: ETag for the model resource (:data:`None` until\n set from the server).\n\n Read-only.\n \"\"\"\n return self._proto.etag\n\n @property\n def created(self):\n \"\"\"Union[datetime.datetime, None]: Datetime at which the model was\n created (:data:`None` until set from the server).\n\n Read-only.\n \"\"\"\n value = self._proto.creation_time\n if value is not None and value != 0:\n # value will be in milliseconds.\n return google.cloud._helpers._datetime_from_microseconds(\n 1000.0 * float(value)\n )\n\n @property\n def modified(self):\n \"\"\"Union[datetime.datetime, None]: Datetime at which the model was last\n modified (:data:`None` until set from the server).\n\n Read-only.\n \"\"\"\n value = self._proto.last_modified_time\n if value is not None and value != 0:\n # value will be in milliseconds.\n return google.cloud._helpers._datetime_from_microseconds(\n 1000.0 * float(value)\n )\n\n @property\n def model_type(self):\n \"\"\"google.cloud.bigquery_v2.gapic.enums.Model.ModelType: Type of the\n model resource.\n\n Read-only.\n\n The value is one of elements of the\n :class:`~google.cloud.bigquery_v2.gapic.enums.Model.ModelType`\n enumeration.\n \"\"\"\n return self._proto.model_type\n\n @property\n def training_runs(self):\n \"\"\"Sequence[google.cloud.bigquery_v2.types.Model.TrainingRun]: Information\n for all training runs in increasing order of start time.\n\n Read-only.\n\n An iterable of :class:`~google.cloud.bigquery_v2.types.Model.TrainingRun`.\n \"\"\"\n return self._proto.training_runs\n\n @property\n def feature_columns(self):\n \"\"\"Sequence[google.cloud.bigquery_v2.types.StandardSqlField]: Input\n feature columns that were used to train this model.\n\n Read-only.\n\n An iterable of :class:`~google.cloud.bigquery_v2.types.StandardSqlField`.\n \"\"\"\n return self._proto.feature_columns\n\n @property\n def label_columns(self):\n \"\"\"Sequence[google.cloud.bigquery_v2.types.StandardSqlField]: Label\n columns that were used to train this model. The output of the model\n will have a ``predicted_`` prefix to these columns.\n\n Read-only.\n\n An iterable of :class:`~google.cloud.bigquery_v2.types.StandardSqlField`.\n \"\"\"\n return self._proto.label_columns\n\n @property\n def expires(self):\n \"\"\"Union[datetime.datetime, None]: The datetime when this model\n expires. If not present, the model will persist indefinitely. Expired\n models will be deleted and their storage reclaimed.\n \"\"\"\n value = self._properties.get(\"expirationTime\")\n if value is not None:\n # value will be in milliseconds.\n return google.cloud._helpers._datetime_from_microseconds(\n 1000.0 * float(value)\n )\n\n @expires.setter\n def expires(self, value):\n if value is not None:\n value = str(google.cloud._helpers._millis_from_datetime(value))\n self._properties[\"expirationTime\"] = value\n\n @property\n def description(self):\n \"\"\"Optional[str]: Description of the model (defaults to\n :data:`None`).\n \"\"\"\n return self._properties.get(\"description\")\n\n @description.setter\n def description(self, value):\n self._properties[\"description\"] = value\n\n @property\n def friendly_name(self):\n \"\"\"Union[str, None]: Title of the table (defaults to :data:`None`).\n\n Raises:\n ValueError: For invalid value types.\n \"\"\"\n return self._properties.get(\"friendlyName\")\n\n @friendly_name.setter\n def friendly_name(self, value):\n self._properties[\"friendlyName\"] = value\n\n @property\n def labels(self):\n \"\"\"Dict[str, str]: Labels for the table.\n\n This method always returns a dict. To change a model's labels,\n modify the dict, then call ``Client.update_model``. To delete a\n label, set its value to :data:`None` before updating.\n \"\"\"\n return self._properties.setdefault(\"labels\", {})\n\n @labels.setter\n def labels(self, value):\n if value is None:\n value = {}\n self._properties[\"labels\"] = value\n\n @property\n def encryption_configuration(self):\n \"\"\"google.cloud.bigquery.encryption_configuration.EncryptionConfiguration: Custom\n encryption configuration for the model.\n\n Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`\n if using default encryption.\n\n See `protecting data with Cloud KMS keys\n <https://cloud.google.com/bigquery/docs/customer-managed-encryption>`_\n in the BigQuery documentation.\n \"\"\"\n prop = self._properties.get(\"encryptionConfiguration\")\n if prop:\n prop = EncryptionConfiguration.from_api_repr(prop)\n return prop\n\n @encryption_configuration.setter\n def encryption_configuration(self, value):\n api_repr = value\n if value:\n api_repr = value.to_api_repr()\n self._properties[\"encryptionConfiguration\"] = api_repr\n\n @classmethod\n def from_api_repr(cls, resource):\n \"\"\"Factory: construct a model resource given its API representation\n\n Args:\n resource (Dict[str, object]):\n Model resource representation from the API\n\n Returns:\n google.cloud.bigquery.model.Model: Model parsed from ``resource``.\n \"\"\"\n this = cls(None)\n # Keep a reference to the resource as a workaround to find unknown\n # field values.\n this._properties = resource\n\n # Convert from millis-from-epoch to timestamp well-known type.\n # TODO: Remove this hack once CL 238585470 hits prod.\n resource = copy.deepcopy(resource)\n for training_run in resource.get(\"trainingRuns\", ()):\n start_time = training_run.get(\"startTime\")\n if not start_time or \"-\" in start_time: # Already right format?\n continue\n start_time = datetime_helpers.from_microseconds(1e3 * float(start_time))\n training_run[\"startTime\"] = datetime_helpers.to_rfc3339(start_time)\n\n this._proto = json_format.ParseDict(\n resource, types.Model(), ignore_unknown_fields=True\n )\n return this\n\n def _build_resource(self, filter_fields):\n \"\"\"Generate a resource for ``update``.\"\"\"\n return _helpers._build_resource_from_properties(self, filter_fields)\n\n def __repr__(self):\n return \"Model(reference={})\".format(repr(self.reference))\n\n\nclass ModelReference(object):\n \"\"\"ModelReferences are pointers to models.\n\n See\n https://cloud.google.com/bigquery/docs/reference/rest/v2/models#modelreference\n \"\"\"\n\n def __init__(self):\n self._proto = types.ModelReference()\n self._properties = {}\n\n @property\n def project(self):\n \"\"\"str: Project bound to the model\"\"\"\n return self._proto.project_id\n\n @property\n def dataset_id(self):\n \"\"\"str: ID of dataset containing the model.\"\"\"\n return self._proto.dataset_id\n\n @property\n def model_id(self):\n \"\"\"str: The model ID.\"\"\"\n return self._proto.model_id\n\n @property\n def path(self):\n \"\"\"str: URL path for the model's APIs.\"\"\"\n return \"/projects/%s/datasets/%s/models/%s\" % (\n self._proto.project_id,\n self._proto.dataset_id,\n self._proto.model_id,\n )\n\n @classmethod\n def from_api_repr(cls, resource):\n \"\"\"Factory: construct a model reference given its API representation\n\n Args:\n resource (Dict[str, object]):\n Model reference representation returned from the API\n\n Returns:\n google.cloud.bigquery.model.ModelReference:\n Model reference parsed from ``resource``.\n \"\"\"\n ref = cls()\n # Keep a reference to the resource as a workaround to find unknown\n # field values.\n ref._properties = resource\n ref._proto = json_format.ParseDict(\n resource, types.ModelReference(), ignore_unknown_fields=True\n )\n return ref\n\n @classmethod\n def from_string(cls, model_id, default_project=None):\n \"\"\"Construct a model reference from model ID string.\n\n Args:\n model_id (str):\n A model ID in standard SQL format. If ``default_project``\n is not specified, this must included a project ID, dataset\n ID, and model ID, each separated by ``.``.\n default_project (str):\n Optional. The project ID to use when ``model_id`` does not\n include a project ID.\n\n Returns:\n google.cloud.bigquery.model.ModelReference:\n Model reference parsed from ``model_id``.\n\n Raises:\n ValueError:\n If ``model_id`` is not a fully-qualified table ID in\n standard SQL format.\n \"\"\"\n proj, dset, model = _helpers._parse_3_part_id(\n model_id, default_project=default_project, property_name=\"model_id\"\n )\n return cls.from_api_repr(\n {\"projectId\": proj, \"datasetId\": dset, \"modelId\": model}\n )\n\n def to_api_repr(self):\n \"\"\"Construct the API resource representation of this model reference.\n\n Returns:\n Dict[str, object]: Model reference represented as an API resource\n \"\"\"\n return json_format.MessageToDict(self._proto)\n\n def _key(self):\n \"\"\"Unique key for this model.\n\n This is used for hashing a ModelReference.\n \"\"\"\n return self.project, self.dataset_id, self.model_id\n\n def __eq__(self, other):\n if not isinstance(other, ModelReference):\n return NotImplemented\n return self._proto == other._proto\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash(self._key())\n\n def __repr__(self):\n return \"ModelReference(project_id='{}', dataset_id='{}', model_id='{}')\".format(\n self.project, self.dataset_id, self.model_id\n )\n",
"path": "google/cloud/bigquery/model.py"
}
] | diff --git a/google/cloud/bigquery/model.py b/google/cloud/bigquery/model.py
index d39ec5f2f..a2510e86c 100644
--- a/google/cloud/bigquery/model.py
+++ b/google/cloud/bigquery/model.py
@@ -430,6 +430,6 @@ def __hash__(self):
return hash(self._key())
def __repr__(self):
- return "ModelReference(project='{}', dataset_id='{}', project_id='{}')".format(
+ return "ModelReference(project_id='{}', dataset_id='{}', model_id='{}')".format(
self.project, self.dataset_id, self.model_id
)
diff --git a/tests/unit/model/test_model.py b/tests/unit/model/test_model.py
index bbb93ef9e..90fc09e66 100644
--- a/tests/unit/model/test_model.py
+++ b/tests/unit/model/test_model.py
@@ -316,5 +316,5 @@ def test_repr(target_class):
got = repr(model)
assert got == (
"Model(reference=ModelReference("
- "project='my-proj', dataset_id='my_dset', project_id='my_model'))"
+ "project_id='my-proj', dataset_id='my_dset', model_id='my_model'))"
)
diff --git a/tests/unit/model/test_model_reference.py b/tests/unit/model/test_model_reference.py
index ff1d1df7d..39dabb55d 100644
--- a/tests/unit/model/test_model_reference.py
+++ b/tests/unit/model/test_model_reference.py
@@ -136,5 +136,5 @@ def test_repr(target_class):
got = repr(model)
assert (
got
- == "ModelReference(project='my-proj', dataset_id='my_dset', project_id='my_model')"
+ == "ModelReference(project_id='my-proj', dataset_id='my_dset', model_id='my_model')"
)
|
paperless-ngx__paperless-ngx-3161 | [BUG] mail consumption fails if action is delete and no criteria are given
### Description
A mail consumption rule containing no criteria and has the action "delete" fails with this messages:
```
today at 22:30:00[2023-04-24 22:30:00,569] [ERROR] [paperless_mail] Rule XXX.YYY: Error while processing rule: AND expects params
today at 22:30:00Traceback (most recent call last):
today at 22:30:00 File "/usr/src/paperless/src/paperless_mail/mail.py", line 290, in handle_mail_account
today at 22:30:00 total_processed_files += self.handle_mail_rule(
today at 22:30:00 File "/usr/src/paperless/src/paperless_mail/mail.py", line 357, in handle_mail_rule
today at 22:30:00 criterias_imap = AND(**criterias)
today at 22:30:00 File "/usr/local/lib/python3.9/site-packages/imap_tools/query.py", line 97, in __init__
today at 22:30:00 raise ValueError('{} expects params'.format(self.__class__.__name__))
today at 22:30:00ValueError: AND expects params
```
When switching to action "Tag" it is successful even with empty criteria. On the other hand, "Delete" action gives no error if any criteria is given.
See this screenshot with removed Name and Account information:

### Possible Cause
Looking at the code of [`make_criterias`](https://github.com/paperless-ngx/paperless-ngx/blob/dev/src/paperless_mail/mail.py#L364) in `mail.py` and the missing `get_criteria` of [`DeleteMailAction`](https://github.com/paperless-ngx/paperless-ngx/blob/dev/src/paperless_mail/mail.py#L100), it seems like in this special situation no criteria at all is given to [`LogicOperator()`](imap_tools/query.py) in [`imap_tools/query.py`](https://github.com/ikvk/imap_tools/blob/master/imap_tools/query.py) and thus resulting in the [`ValueError`](https://github.com/ikvk/imap_tools/blob/master/imap_tools/query.py#L97) from above.
This might also be the case for [`MoveMailAction`](https://github.com/paperless-ngx/paperless-ngx/blob/dev/src/paperless_mail/mail.py#L121) as `get_criteria` is missing there also.
### Steps to reproduce
1. Create mail-account
2. Create mail rule for this account
3. Leave fields empty for "from", "to", "subject" and "body" and set "maximum_age" to 0
4. Set "delete" as action
5. Save
6. Wait for the mail task cron to see the error message
7. Add any criteria or change action to "tag" will create no error
### Webserver logs
```bash
today at 22:30:00[2023-04-24 22:30:00,569] [ERROR] [paperless_mail] Rule XXX.YYY: Error while processing rule: AND expects params
today at 22:30:00Traceback (most recent call last):
today at 22:30:00 File "/usr/src/paperless/src/paperless_mail/mail.py", line 290, in handle_mail_account
today at 22:30:00 total_processed_files += self.handle_mail_rule(
today at 22:30:00 File "/usr/src/paperless/src/paperless_mail/mail.py", line 357, in handle_mail_rule
today at 22:30:00 criterias_imap = AND(**criterias)
today at 22:30:00 File "/usr/local/lib/python3.9/site-packages/imap_tools/query.py", line 97, in __init__
today at 22:30:00 raise ValueError('{} expects params'.format(self.__class__.__name__))
today at 22:30:00ValueError: AND expects params
```
### Browser logs
_No response_
### Paperless-ngx version
1.13.0
### Host OS
x86_64 on QNAP
### Installation method
Docker - official image
### Browser
Firefox
### Configuration changes
_No response_
### Other
_No response_
| [
{
"content": "import datetime\nimport itertools\nimport logging\nimport os\nimport re\nimport tempfile\nimport traceback\nfrom datetime import date\nfrom datetime import timedelta\nfrom fnmatch import fnmatch\nfrom typing import Dict\nfrom typing import List\nfrom typing import Union\n\nimport magic\nimport pathvalidate\nfrom celery import chord\nfrom celery import shared_task\nfrom celery.canvas import Signature\nfrom django.conf import settings\nfrom django.db import DatabaseError\nfrom django.utils.timezone import is_naive\nfrom django.utils.timezone import make_aware\nfrom documents.data_models import ConsumableDocument\nfrom documents.data_models import DocumentMetadataOverrides\nfrom documents.data_models import DocumentSource\nfrom documents.loggers import LoggingMixin\nfrom documents.models import Correspondent\nfrom documents.parsers import is_mime_type_supported\nfrom documents.tasks import consume_file\nfrom imap_tools import AND\nfrom imap_tools import MailBox\nfrom imap_tools import MailboxFolderSelectError\nfrom imap_tools import MailBoxUnencrypted\nfrom imap_tools import MailMessage\nfrom imap_tools import MailMessageFlags\nfrom imap_tools import NOT\nfrom imap_tools.mailbox import MailBoxTls\nfrom imap_tools.query import LogicOperator\nfrom paperless_mail.models import MailAccount\nfrom paperless_mail.models import MailRule\nfrom paperless_mail.models import ProcessedMail\n\n# Apple Mail sets multiple IMAP KEYWORD and the general \"\\Flagged\" FLAG\n# imaplib => conn.fetch(b\"<message_id>\", \"FLAGS\")\n\n# no flag - (FLAGS (\\\\Seen $NotJunk NotJunk))'\n# red - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk))'\n# orange - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk $MailFlagBit0))'\n# yellow - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk $MailFlagBit1))'\n# blue - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk $MailFlagBit2))'\n# green - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk $MailFlagBit0 $MailFlagBit1))'\n# violet - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk $MailFlagBit0 $MailFlagBit2))'\n# grey - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk $MailFlagBit1 $MailFlagBit2))'\n\nAPPLE_MAIL_TAG_COLORS = {\n \"red\": [],\n \"orange\": [\"$MailFlagBit0\"],\n \"yellow\": [\"$MailFlagBit1\"],\n \"blue\": [\"$MailFlagBit2\"],\n \"green\": [\"$MailFlagBit0\", \"$MailFlagBit1\"],\n \"violet\": [\"$MailFlagBit0\", \"$MailFlagBit2\"],\n \"grey\": [\"$MailFlagBit1\", \"$MailFlagBit2\"],\n}\n\n\nclass MailError(Exception):\n pass\n\n\nclass BaseMailAction:\n \"\"\"\n Base class for mail actions. A mail action is performed on a mail after\n consumption of the document is complete and is used to signal to the user\n that this mail was processed by paperless via the mail client.\n\n Furthermore, mail actions reduce the amount of mails to be analyzed by\n excluding mails on which the action was already performed (i.e., excluding\n read mails when the action is to mark mails as read).\n \"\"\"\n\n def get_criteria(self) -> Union[Dict, LogicOperator]:\n \"\"\"\n Returns filtering criteria/query for this mail action.\n \"\"\"\n return {}\n\n def post_consume(\n self,\n M: MailBox,\n message_uid: str,\n parameter: str,\n ): # pragma: nocover\n \"\"\"\n Perform mail action on the given mail uid in the mailbox.\n \"\"\"\n raise NotImplementedError\n\n\nclass DeleteMailAction(BaseMailAction):\n \"\"\"\n A mail action that deletes mails after processing.\n \"\"\"\n\n def post_consume(self, M: MailBox, message_uid: str, parameter: str):\n M.delete(message_uid)\n\n\nclass MarkReadMailAction(BaseMailAction):\n \"\"\"\n A mail action that marks mails as read after processing.\n \"\"\"\n\n def get_criteria(self):\n return {\"seen\": False}\n\n def post_consume(self, M: MailBox, message_uid: str, parameter: str):\n M.flag(message_uid, [MailMessageFlags.SEEN], True)\n\n\nclass MoveMailAction(BaseMailAction):\n \"\"\"\n A mail action that moves mails to a different folder after processing.\n \"\"\"\n\n def post_consume(self, M, message_uid, parameter):\n M.move(message_uid, parameter)\n\n\nclass FlagMailAction(BaseMailAction):\n \"\"\"\n A mail action that marks mails as important (\"star\") after processing.\n \"\"\"\n\n def get_criteria(self):\n return {\"flagged\": False}\n\n def post_consume(self, M: MailBox, message_uid: str, parameter: str):\n M.flag(message_uid, [MailMessageFlags.FLAGGED], True)\n\n\nclass TagMailAction(BaseMailAction):\n \"\"\"\n A mail action that tags mails after processing.\n \"\"\"\n\n def __init__(self, parameter):\n\n # The custom tag should look like \"apple:<color>\"\n if \"apple:\" in parameter.lower():\n\n _, self.color = parameter.split(\":\")\n self.color = self.color.strip()\n\n if self.color.lower() not in APPLE_MAIL_TAG_COLORS.keys():\n raise MailError(\"Not a valid AppleMail tag color.\")\n\n self.keyword = None\n\n else:\n self.keyword = parameter\n self.color = None\n\n def get_criteria(self):\n\n # AppleMail: We only need to check if mails are \\Flagged\n if self.color:\n return {\"flagged\": False}\n elif self.keyword:\n return AND(NOT(gmail_label=self.keyword), no_keyword=self.keyword)\n else: # pragma: nocover\n raise ValueError(\"This should never happen.\")\n\n def post_consume(self, M: MailBox, message_uid: str, parameter: str):\n if re.search(r\"gmail\\.com$|googlemail\\.com$\", M._host):\n M.client.uid(\"STORE\", message_uid, \"+X-GM-LABELS\", self.keyword)\n\n # AppleMail\n elif self.color:\n\n # Remove all existing $MailFlagBits\n M.flag(\n message_uid,\n set(itertools.chain(*APPLE_MAIL_TAG_COLORS.values())),\n False,\n )\n\n # Set new $MailFlagBits\n M.flag(message_uid, APPLE_MAIL_TAG_COLORS.get(self.color), True)\n\n # Set the general \\Flagged\n # This defaults to the \"red\" flag in AppleMail and\n # \"stars\" in Thunderbird or GMail\n M.flag(message_uid, [MailMessageFlags.FLAGGED], True)\n\n elif self.keyword:\n M.flag(message_uid, [self.keyword], True)\n\n else:\n raise MailError(\"No keyword specified.\")\n\n\ndef mailbox_login(mailbox: MailBox, account: MailAccount):\n logger = logging.getLogger(\"paperless_mail\")\n\n try:\n\n if account.is_token:\n mailbox.xoauth2(account.username, account.password)\n else:\n try:\n _ = account.password.encode(\"ascii\")\n use_ascii_login = True\n except UnicodeEncodeError:\n use_ascii_login = False\n\n if use_ascii_login:\n mailbox.login(account.username, account.password)\n else:\n logger.debug(\"Falling back to AUTH=PLAIN\")\n mailbox.login_utf8(account.username, account.password)\n\n except Exception as e:\n logger.error(\n f\"Error while authenticating account {account}: {e}\",\n exc_info=False,\n )\n raise MailError(\n f\"Error while authenticating account {account}\",\n ) from e\n\n\n@shared_task\ndef apply_mail_action(\n result: List[str],\n rule_id: int,\n message_uid: str,\n message_subject: str,\n message_date: datetime.datetime,\n):\n \"\"\"\n This shared task applies the mail action of a particular mail rule to the\n given mail. Creates a ProcessedMail object, so that the mail won't be\n processed in the future.\n \"\"\"\n\n rule = MailRule.objects.get(pk=rule_id)\n account = MailAccount.objects.get(pk=rule.account.pk)\n\n # Ensure the date is properly timezone aware\n if is_naive(message_date):\n message_date = make_aware(message_date)\n\n try:\n\n action = get_rule_action(rule)\n\n with get_mailbox(\n server=account.imap_server,\n port=account.imap_port,\n security=account.imap_security,\n ) as M:\n mailbox_login(M, account)\n M.folder.set(rule.folder)\n action.post_consume(M, message_uid, rule.action_parameter)\n\n ProcessedMail.objects.create(\n owner=rule.owner,\n rule=rule,\n folder=rule.folder,\n uid=message_uid,\n subject=message_subject,\n received=message_date,\n status=\"SUCCESS\",\n )\n\n except Exception:\n ProcessedMail.objects.create(\n owner=rule.owner,\n rule=rule,\n folder=rule.folder,\n uid=message_uid,\n subject=message_subject,\n received=message_date,\n status=\"FAILED\",\n error=traceback.format_exc(),\n )\n raise\n\n\n@shared_task\ndef error_callback(\n request,\n exc,\n tb,\n rule_id: int,\n message_uid: str,\n message_subject: str,\n message_date: datetime.datetime,\n):\n \"\"\"\n A shared task that is called whenever something goes wrong during\n consumption of a file. See queue_consumption_tasks.\n \"\"\"\n rule = MailRule.objects.get(pk=rule_id)\n\n ProcessedMail.objects.create(\n rule=rule,\n folder=rule.folder,\n uid=message_uid,\n subject=message_subject,\n received=message_date,\n status=\"FAILED\",\n error=traceback.format_exc(),\n )\n\n\ndef queue_consumption_tasks(\n *,\n consume_tasks: List[Signature],\n rule: MailRule,\n message: MailMessage,\n):\n \"\"\"\n Queue a list of consumption tasks (Signatures for the consume_file shared\n task) with celery.\n \"\"\"\n\n mail_action_task = apply_mail_action.s(\n rule_id=rule.pk,\n message_uid=message.uid,\n message_subject=message.subject,\n message_date=message.date,\n )\n chord(header=consume_tasks, body=mail_action_task).on_error(\n error_callback.s(\n rule_id=rule.pk,\n message_uid=message.uid,\n message_subject=message.subject,\n message_date=message.date,\n ),\n ).delay()\n\n\ndef get_rule_action(rule) -> BaseMailAction:\n \"\"\"\n Returns a BaseMailAction instance for the given rule.\n \"\"\"\n\n if rule.action == MailRule.MailAction.FLAG:\n return FlagMailAction()\n elif rule.action == MailRule.MailAction.DELETE:\n return DeleteMailAction()\n elif rule.action == MailRule.MailAction.MOVE:\n return MoveMailAction()\n elif rule.action == MailRule.MailAction.MARK_READ:\n return MarkReadMailAction()\n elif rule.action == MailRule.MailAction.TAG:\n return TagMailAction(rule.action_parameter)\n else:\n raise NotImplementedError(\"Unknown action.\") # pragma: nocover\n\n\ndef make_criterias(rule):\n \"\"\"\n Returns criteria to be applied to MailBox.fetch for the given rule.\n \"\"\"\n\n maximum_age = date.today() - timedelta(days=rule.maximum_age)\n criterias = {}\n if rule.maximum_age > 0:\n criterias[\"date_gte\"] = maximum_age\n if rule.filter_from:\n criterias[\"from_\"] = rule.filter_from\n if rule.filter_to:\n criterias[\"to\"] = rule.filter_to\n if rule.filter_subject:\n criterias[\"subject\"] = rule.filter_subject\n if rule.filter_body:\n criterias[\"body\"] = rule.filter_body\n\n rule_query = get_rule_action(rule).get_criteria()\n if isinstance(rule_query, dict):\n return AND(**rule_query, **criterias)\n else:\n return AND(rule_query, **criterias)\n\n\ndef get_mailbox(server, port, security) -> MailBox:\n \"\"\"\n Returns the correct MailBox instance for the given configuration.\n \"\"\"\n\n if security == MailAccount.ImapSecurity.NONE:\n mailbox = MailBoxUnencrypted(server, port)\n elif security == MailAccount.ImapSecurity.STARTTLS:\n mailbox = MailBoxTls(server, port)\n elif security == MailAccount.ImapSecurity.SSL:\n mailbox = MailBox(server, port)\n else:\n raise NotImplementedError(\"Unknown IMAP security\") # pragma: nocover\n return mailbox\n\n\nclass MailAccountHandler(LoggingMixin):\n \"\"\"\n The main class that handles mail accounts.\n\n * processes all rules for a given mail account\n * for each mail rule, fetches relevant mails, and queues documents from\n matching mails for consumption\n * marks processed mails in the database, so that they won't be processed\n again\n * runs mail actions on the mail server, when consumption is completed\n \"\"\"\n\n logging_name = \"paperless_mail\"\n\n def _correspondent_from_name(self, name):\n try:\n return Correspondent.objects.get_or_create(name=name)[0]\n except DatabaseError as e:\n self.log(\"error\", f\"Error while retrieving correspondent {name}: {e}\")\n return None\n\n def _get_title(self, message, att, rule):\n if rule.assign_title_from == MailRule.TitleSource.FROM_SUBJECT:\n return message.subject\n\n elif rule.assign_title_from == MailRule.TitleSource.FROM_FILENAME:\n return os.path.splitext(os.path.basename(att.filename))[0]\n\n else:\n raise NotImplementedError(\n \"Unknown title selector.\",\n ) # pragma: nocover\n\n def _get_correspondent(self, message: MailMessage, rule):\n c_from = rule.assign_correspondent_from\n\n if c_from == MailRule.CorrespondentSource.FROM_NOTHING:\n return None\n\n elif c_from == MailRule.CorrespondentSource.FROM_EMAIL:\n return self._correspondent_from_name(message.from_)\n\n elif c_from == MailRule.CorrespondentSource.FROM_NAME:\n from_values = message.from_values\n if from_values is not None and len(from_values.name) > 0:\n return self._correspondent_from_name(from_values.name)\n else:\n return self._correspondent_from_name(message.from_)\n\n elif c_from == MailRule.CorrespondentSource.FROM_CUSTOM:\n return rule.assign_correspondent\n\n else:\n raise NotImplementedError(\n \"Unknown correspondent selector\",\n ) # pragma: nocover\n\n def handle_mail_account(self, account: MailAccount):\n \"\"\"\n Main entry method to handle a specific mail account.\n \"\"\"\n\n self.renew_logging_group()\n\n self.log(\"debug\", f\"Processing mail account {account}\")\n\n total_processed_files = 0\n try:\n with get_mailbox(\n account.imap_server,\n account.imap_port,\n account.imap_security,\n ) as M:\n\n supports_gmail_labels = \"X-GM-EXT-1\" in M.client.capabilities\n supports_auth_plain = \"AUTH=PLAIN\" in M.client.capabilities\n\n self.log(\"debug\", f\"GMAIL Label Support: {supports_gmail_labels}\")\n self.log(\"debug\", f\"AUTH=PLAIN Support: {supports_auth_plain}\")\n\n mailbox_login(M, account)\n\n self.log(\n \"debug\",\n f\"Account {account}: Processing \"\n f\"{account.rules.count()} rule(s)\",\n )\n\n for rule in account.rules.order_by(\"order\"):\n try:\n total_processed_files += self._handle_mail_rule(\n M,\n rule,\n )\n except Exception as e:\n self.log(\n \"error\",\n f\"Rule {rule}: Error while processing rule: {e}\",\n exc_info=True,\n )\n except MailError:\n raise\n except Exception as e:\n self.log(\n \"error\",\n f\"Error while retrieving mailbox {account}: {e}\",\n exc_info=False,\n )\n\n return total_processed_files\n\n def _handle_mail_rule(\n self,\n M: MailBox,\n rule: MailRule,\n ):\n\n self.log(\"debug\", f\"Rule {rule}: Selecting folder {rule.folder}\")\n\n try:\n M.folder.set(rule.folder)\n except MailboxFolderSelectError as err:\n\n self.log(\n \"error\",\n f\"Unable to access folder {rule.folder}, attempting folder listing\",\n )\n try:\n for folder_info in M.folder.list():\n self.log(\"info\", f\"Located folder: {folder_info.name}\")\n except Exception as e:\n self.log(\n \"error\",\n \"Exception during folder listing, unable to provide list folders: \"\n + str(e),\n )\n\n raise MailError(\n f\"Rule {rule}: Folder {rule.folder} \"\n f\"does not exist in account {rule.account}\",\n ) from err\n\n criterias = make_criterias(rule)\n\n self.log(\n \"debug\",\n f\"Rule {rule}: Searching folder with criteria {str(criterias)}\",\n )\n\n try:\n messages = M.fetch(\n criteria=criterias,\n mark_seen=False,\n charset=rule.account.character_set,\n )\n except Exception as err:\n raise MailError(\n f\"Rule {rule}: Error while fetching folder {rule.folder}\",\n ) from err\n\n mails_processed = 0\n total_processed_files = 0\n\n for message in messages:\n if ProcessedMail.objects.filter(\n rule=rule,\n uid=message.uid,\n folder=rule.folder,\n ).exists():\n self.log(\"debug\", f\"Skipping mail {message}, already processed.\")\n continue\n\n try:\n processed_files = self._handle_message(message, rule)\n\n total_processed_files += processed_files\n mails_processed += 1\n except Exception as e:\n self.log(\n \"error\",\n f\"Rule {rule}: Error while processing mail {message.uid}: {e}\",\n exc_info=True,\n )\n\n self.log(\"debug\", f\"Rule {rule}: Processed {mails_processed} matching mail(s)\")\n\n return total_processed_files\n\n def _handle_message(self, message, rule: MailRule) -> int:\n processed_elements = 0\n\n # Skip Message handling when only attachments are to be processed but\n # message doesn't have any.\n if (\n not message.attachments\n and rule.consumption_scope == MailRule.ConsumptionScope.ATTACHMENTS_ONLY\n ):\n return processed_elements\n\n self.log(\n \"debug\",\n f\"Rule {rule}: \"\n f\"Processing mail {message.subject} from {message.from_} with \"\n f\"{len(message.attachments)} attachment(s)\",\n )\n\n correspondent = self._get_correspondent(message, rule)\n tag_ids = [tag.id for tag in rule.assign_tags.all()]\n doc_type = rule.assign_document_type\n\n if (\n rule.consumption_scope == MailRule.ConsumptionScope.EML_ONLY\n or rule.consumption_scope == MailRule.ConsumptionScope.EVERYTHING\n ):\n processed_elements += self._process_eml(\n message,\n rule,\n correspondent,\n tag_ids,\n doc_type,\n )\n\n if (\n rule.consumption_scope == MailRule.ConsumptionScope.ATTACHMENTS_ONLY\n or rule.consumption_scope == MailRule.ConsumptionScope.EVERYTHING\n ):\n processed_elements += self._process_attachments(\n message,\n rule,\n correspondent,\n tag_ids,\n doc_type,\n )\n\n return processed_elements\n\n def _process_attachments(\n self,\n message: MailMessage,\n rule: MailRule,\n correspondent,\n tag_ids,\n doc_type,\n ):\n processed_attachments = 0\n\n consume_tasks = list()\n\n for att in message.attachments:\n\n if (\n att.content_disposition != \"attachment\"\n and rule.attachment_type\n == MailRule.AttachmentProcessing.ATTACHMENTS_ONLY\n ):\n self.log(\n \"debug\",\n f\"Rule {rule}: \"\n f\"Skipping attachment {att.filename} \"\n f\"with content disposition {att.content_disposition}\",\n )\n continue\n\n if rule.filter_attachment_filename and not fnmatch(\n att.filename.lower(),\n rule.filter_attachment_filename.lower(),\n ):\n # Force the filename and pattern to the lowercase\n # as this is system dependent otherwise\n continue\n\n title = self._get_title(message, att, rule)\n\n # don't trust the content type of the attachment. Could be\n # generic application/octet-stream.\n mime_type = magic.from_buffer(att.payload, mime=True)\n\n if is_mime_type_supported(mime_type):\n\n os.makedirs(settings.SCRATCH_DIR, exist_ok=True)\n _, temp_filename = tempfile.mkstemp(\n prefix=\"paperless-mail-\",\n dir=settings.SCRATCH_DIR,\n )\n with open(temp_filename, \"wb\") as f:\n f.write(att.payload)\n\n self.log(\n \"info\",\n f\"Rule {rule}: \"\n f\"Consuming attachment {att.filename} from mail \"\n f\"{message.subject} from {message.from_}\",\n )\n\n input_doc = ConsumableDocument(\n source=DocumentSource.MailFetch,\n original_file=temp_filename,\n )\n doc_overrides = DocumentMetadataOverrides(\n title=title,\n filename=pathvalidate.sanitize_filename(att.filename),\n correspondent_id=correspondent.id if correspondent else None,\n document_type_id=doc_type.id if doc_type else None,\n tag_ids=tag_ids,\n owner_id=rule.owner.id if rule.owner else None,\n )\n\n consume_task = consume_file.s(\n input_doc,\n doc_overrides,\n )\n\n consume_tasks.append(consume_task)\n\n processed_attachments += 1\n else:\n self.log(\n \"debug\",\n f\"Rule {rule}: \"\n f\"Skipping attachment {att.filename} \"\n f\"since guessed mime type {mime_type} is not supported \"\n f\"by paperless\",\n )\n\n queue_consumption_tasks(\n consume_tasks=consume_tasks,\n rule=rule,\n message=message,\n )\n\n return processed_attachments\n\n def _process_eml(\n self,\n message: MailMessage,\n rule: MailRule,\n correspondent,\n tag_ids,\n doc_type,\n ):\n os.makedirs(settings.SCRATCH_DIR, exist_ok=True)\n _, temp_filename = tempfile.mkstemp(\n prefix=\"paperless-mail-\",\n dir=settings.SCRATCH_DIR,\n suffix=\".eml\",\n )\n with open(temp_filename, \"wb\") as f:\n # Move \"From\"-header to beginning of file\n # TODO: This ugly workaround is needed because the parser is\n # chosen only by the mime_type detected via magic\n # (see documents/consumer.py \"mime_type = magic.from_file\")\n # Unfortunately magic sometimes fails to detect the mime\n # type of .eml files correctly as message/rfc822 and instead\n # detects text/plain.\n # This also effects direct file consumption of .eml files\n # which are not treated with this workaround.\n from_element = None\n for i, header in enumerate(message.obj._headers):\n if header[0] == \"From\":\n from_element = i\n if from_element:\n new_headers = [message.obj._headers.pop(from_element)]\n new_headers += message.obj._headers\n message.obj._headers = new_headers\n\n f.write(message.obj.as_bytes())\n\n self.log(\n \"info\",\n f\"Rule {rule}: \"\n f\"Consuming eml from mail \"\n f\"{message.subject} from {message.from_}\",\n )\n\n input_doc = ConsumableDocument(\n source=DocumentSource.MailFetch,\n original_file=temp_filename,\n )\n doc_overrides = DocumentMetadataOverrides(\n title=message.subject,\n filename=pathvalidate.sanitize_filename(f\"{message.subject}.eml\"),\n correspondent_id=correspondent.id if correspondent else None,\n document_type_id=doc_type.id if doc_type else None,\n tag_ids=tag_ids,\n owner_id=rule.owner.id if rule.owner else None,\n )\n\n consume_task = consume_file.s(\n input_doc,\n doc_overrides,\n )\n\n queue_consumption_tasks(\n consume_tasks=[consume_task],\n rule=rule,\n message=message,\n )\n\n processed_elements = 1\n return processed_elements\n",
"path": "src/paperless_mail/mail.py"
}
] | [
{
"content": "import datetime\nimport itertools\nimport logging\nimport os\nimport re\nimport tempfile\nimport traceback\nfrom datetime import date\nfrom datetime import timedelta\nfrom fnmatch import fnmatch\nfrom typing import Dict\nfrom typing import List\nfrom typing import Union\n\nimport magic\nimport pathvalidate\nfrom celery import chord\nfrom celery import shared_task\nfrom celery.canvas import Signature\nfrom django.conf import settings\nfrom django.db import DatabaseError\nfrom django.utils.timezone import is_naive\nfrom django.utils.timezone import make_aware\nfrom documents.data_models import ConsumableDocument\nfrom documents.data_models import DocumentMetadataOverrides\nfrom documents.data_models import DocumentSource\nfrom documents.loggers import LoggingMixin\nfrom documents.models import Correspondent\nfrom documents.parsers import is_mime_type_supported\nfrom documents.tasks import consume_file\nfrom imap_tools import AND\nfrom imap_tools import MailBox\nfrom imap_tools import MailboxFolderSelectError\nfrom imap_tools import MailBoxUnencrypted\nfrom imap_tools import MailMessage\nfrom imap_tools import MailMessageFlags\nfrom imap_tools import NOT\nfrom imap_tools.mailbox import MailBoxTls\nfrom imap_tools.query import LogicOperator\nfrom paperless_mail.models import MailAccount\nfrom paperless_mail.models import MailRule\nfrom paperless_mail.models import ProcessedMail\n\n# Apple Mail sets multiple IMAP KEYWORD and the general \"\\Flagged\" FLAG\n# imaplib => conn.fetch(b\"<message_id>\", \"FLAGS\")\n\n# no flag - (FLAGS (\\\\Seen $NotJunk NotJunk))'\n# red - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk))'\n# orange - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk $MailFlagBit0))'\n# yellow - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk $MailFlagBit1))'\n# blue - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk $MailFlagBit2))'\n# green - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk $MailFlagBit0 $MailFlagBit1))'\n# violet - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk $MailFlagBit0 $MailFlagBit2))'\n# grey - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk $MailFlagBit1 $MailFlagBit2))'\n\nAPPLE_MAIL_TAG_COLORS = {\n \"red\": [],\n \"orange\": [\"$MailFlagBit0\"],\n \"yellow\": [\"$MailFlagBit1\"],\n \"blue\": [\"$MailFlagBit2\"],\n \"green\": [\"$MailFlagBit0\", \"$MailFlagBit1\"],\n \"violet\": [\"$MailFlagBit0\", \"$MailFlagBit2\"],\n \"grey\": [\"$MailFlagBit1\", \"$MailFlagBit2\"],\n}\n\n\nclass MailError(Exception):\n pass\n\n\nclass BaseMailAction:\n \"\"\"\n Base class for mail actions. A mail action is performed on a mail after\n consumption of the document is complete and is used to signal to the user\n that this mail was processed by paperless via the mail client.\n\n Furthermore, mail actions reduce the amount of mails to be analyzed by\n excluding mails on which the action was already performed (i.e., excluding\n read mails when the action is to mark mails as read).\n \"\"\"\n\n def get_criteria(self) -> Union[Dict, LogicOperator]:\n \"\"\"\n Returns filtering criteria/query for this mail action.\n \"\"\"\n return {}\n\n def post_consume(\n self,\n M: MailBox,\n message_uid: str,\n parameter: str,\n ): # pragma: nocover\n \"\"\"\n Perform mail action on the given mail uid in the mailbox.\n \"\"\"\n raise NotImplementedError\n\n\nclass DeleteMailAction(BaseMailAction):\n \"\"\"\n A mail action that deletes mails after processing.\n \"\"\"\n\n def post_consume(self, M: MailBox, message_uid: str, parameter: str):\n M.delete(message_uid)\n\n\nclass MarkReadMailAction(BaseMailAction):\n \"\"\"\n A mail action that marks mails as read after processing.\n \"\"\"\n\n def get_criteria(self):\n return {\"seen\": False}\n\n def post_consume(self, M: MailBox, message_uid: str, parameter: str):\n M.flag(message_uid, [MailMessageFlags.SEEN], True)\n\n\nclass MoveMailAction(BaseMailAction):\n \"\"\"\n A mail action that moves mails to a different folder after processing.\n \"\"\"\n\n def post_consume(self, M, message_uid, parameter):\n M.move(message_uid, parameter)\n\n\nclass FlagMailAction(BaseMailAction):\n \"\"\"\n A mail action that marks mails as important (\"star\") after processing.\n \"\"\"\n\n def get_criteria(self):\n return {\"flagged\": False}\n\n def post_consume(self, M: MailBox, message_uid: str, parameter: str):\n M.flag(message_uid, [MailMessageFlags.FLAGGED], True)\n\n\nclass TagMailAction(BaseMailAction):\n \"\"\"\n A mail action that tags mails after processing.\n \"\"\"\n\n def __init__(self, parameter):\n\n # The custom tag should look like \"apple:<color>\"\n if \"apple:\" in parameter.lower():\n\n _, self.color = parameter.split(\":\")\n self.color = self.color.strip()\n\n if self.color.lower() not in APPLE_MAIL_TAG_COLORS.keys():\n raise MailError(\"Not a valid AppleMail tag color.\")\n\n self.keyword = None\n\n else:\n self.keyword = parameter\n self.color = None\n\n def get_criteria(self):\n\n # AppleMail: We only need to check if mails are \\Flagged\n if self.color:\n return {\"flagged\": False}\n elif self.keyword:\n return AND(NOT(gmail_label=self.keyword), no_keyword=self.keyword)\n else: # pragma: nocover\n raise ValueError(\"This should never happen.\")\n\n def post_consume(self, M: MailBox, message_uid: str, parameter: str):\n if re.search(r\"gmail\\.com$|googlemail\\.com$\", M._host):\n M.client.uid(\"STORE\", message_uid, \"+X-GM-LABELS\", self.keyword)\n\n # AppleMail\n elif self.color:\n\n # Remove all existing $MailFlagBits\n M.flag(\n message_uid,\n set(itertools.chain(*APPLE_MAIL_TAG_COLORS.values())),\n False,\n )\n\n # Set new $MailFlagBits\n M.flag(message_uid, APPLE_MAIL_TAG_COLORS.get(self.color), True)\n\n # Set the general \\Flagged\n # This defaults to the \"red\" flag in AppleMail and\n # \"stars\" in Thunderbird or GMail\n M.flag(message_uid, [MailMessageFlags.FLAGGED], True)\n\n elif self.keyword:\n M.flag(message_uid, [self.keyword], True)\n\n else:\n raise MailError(\"No keyword specified.\")\n\n\ndef mailbox_login(mailbox: MailBox, account: MailAccount):\n logger = logging.getLogger(\"paperless_mail\")\n\n try:\n\n if account.is_token:\n mailbox.xoauth2(account.username, account.password)\n else:\n try:\n _ = account.password.encode(\"ascii\")\n use_ascii_login = True\n except UnicodeEncodeError:\n use_ascii_login = False\n\n if use_ascii_login:\n mailbox.login(account.username, account.password)\n else:\n logger.debug(\"Falling back to AUTH=PLAIN\")\n mailbox.login_utf8(account.username, account.password)\n\n except Exception as e:\n logger.error(\n f\"Error while authenticating account {account}: {e}\",\n exc_info=False,\n )\n raise MailError(\n f\"Error while authenticating account {account}\",\n ) from e\n\n\n@shared_task\ndef apply_mail_action(\n result: List[str],\n rule_id: int,\n message_uid: str,\n message_subject: str,\n message_date: datetime.datetime,\n):\n \"\"\"\n This shared task applies the mail action of a particular mail rule to the\n given mail. Creates a ProcessedMail object, so that the mail won't be\n processed in the future.\n \"\"\"\n\n rule = MailRule.objects.get(pk=rule_id)\n account = MailAccount.objects.get(pk=rule.account.pk)\n\n # Ensure the date is properly timezone aware\n if is_naive(message_date):\n message_date = make_aware(message_date)\n\n try:\n\n action = get_rule_action(rule)\n\n with get_mailbox(\n server=account.imap_server,\n port=account.imap_port,\n security=account.imap_security,\n ) as M:\n mailbox_login(M, account)\n M.folder.set(rule.folder)\n action.post_consume(M, message_uid, rule.action_parameter)\n\n ProcessedMail.objects.create(\n owner=rule.owner,\n rule=rule,\n folder=rule.folder,\n uid=message_uid,\n subject=message_subject,\n received=message_date,\n status=\"SUCCESS\",\n )\n\n except Exception:\n ProcessedMail.objects.create(\n owner=rule.owner,\n rule=rule,\n folder=rule.folder,\n uid=message_uid,\n subject=message_subject,\n received=message_date,\n status=\"FAILED\",\n error=traceback.format_exc(),\n )\n raise\n\n\n@shared_task\ndef error_callback(\n request,\n exc,\n tb,\n rule_id: int,\n message_uid: str,\n message_subject: str,\n message_date: datetime.datetime,\n):\n \"\"\"\n A shared task that is called whenever something goes wrong during\n consumption of a file. See queue_consumption_tasks.\n \"\"\"\n rule = MailRule.objects.get(pk=rule_id)\n\n ProcessedMail.objects.create(\n rule=rule,\n folder=rule.folder,\n uid=message_uid,\n subject=message_subject,\n received=message_date,\n status=\"FAILED\",\n error=traceback.format_exc(),\n )\n\n\ndef queue_consumption_tasks(\n *,\n consume_tasks: List[Signature],\n rule: MailRule,\n message: MailMessage,\n):\n \"\"\"\n Queue a list of consumption tasks (Signatures for the consume_file shared\n task) with celery.\n \"\"\"\n\n mail_action_task = apply_mail_action.s(\n rule_id=rule.pk,\n message_uid=message.uid,\n message_subject=message.subject,\n message_date=message.date,\n )\n chord(header=consume_tasks, body=mail_action_task).on_error(\n error_callback.s(\n rule_id=rule.pk,\n message_uid=message.uid,\n message_subject=message.subject,\n message_date=message.date,\n ),\n ).delay()\n\n\ndef get_rule_action(rule) -> BaseMailAction:\n \"\"\"\n Returns a BaseMailAction instance for the given rule.\n \"\"\"\n\n if rule.action == MailRule.MailAction.FLAG:\n return FlagMailAction()\n elif rule.action == MailRule.MailAction.DELETE:\n return DeleteMailAction()\n elif rule.action == MailRule.MailAction.MOVE:\n return MoveMailAction()\n elif rule.action == MailRule.MailAction.MARK_READ:\n return MarkReadMailAction()\n elif rule.action == MailRule.MailAction.TAG:\n return TagMailAction(rule.action_parameter)\n else:\n raise NotImplementedError(\"Unknown action.\") # pragma: nocover\n\n\ndef make_criterias(rule):\n \"\"\"\n Returns criteria to be applied to MailBox.fetch for the given rule.\n \"\"\"\n\n maximum_age = date.today() - timedelta(days=rule.maximum_age)\n criterias = {}\n if rule.maximum_age > 0:\n criterias[\"date_gte\"] = maximum_age\n if rule.filter_from:\n criterias[\"from_\"] = rule.filter_from\n if rule.filter_to:\n criterias[\"to\"] = rule.filter_to\n if rule.filter_subject:\n criterias[\"subject\"] = rule.filter_subject\n if rule.filter_body:\n criterias[\"body\"] = rule.filter_body\n\n rule_query = get_rule_action(rule).get_criteria()\n if isinstance(rule_query, dict):\n if len(rule_query) or len(criterias):\n return AND(**rule_query, **criterias)\n else:\n return AND(rule_query, **criterias)\n\n\ndef get_mailbox(server, port, security) -> MailBox:\n \"\"\"\n Returns the correct MailBox instance for the given configuration.\n \"\"\"\n\n if security == MailAccount.ImapSecurity.NONE:\n mailbox = MailBoxUnencrypted(server, port)\n elif security == MailAccount.ImapSecurity.STARTTLS:\n mailbox = MailBoxTls(server, port)\n elif security == MailAccount.ImapSecurity.SSL:\n mailbox = MailBox(server, port)\n else:\n raise NotImplementedError(\"Unknown IMAP security\") # pragma: nocover\n return mailbox\n\n\nclass MailAccountHandler(LoggingMixin):\n \"\"\"\n The main class that handles mail accounts.\n\n * processes all rules for a given mail account\n * for each mail rule, fetches relevant mails, and queues documents from\n matching mails for consumption\n * marks processed mails in the database, so that they won't be processed\n again\n * runs mail actions on the mail server, when consumption is completed\n \"\"\"\n\n logging_name = \"paperless_mail\"\n\n def _correspondent_from_name(self, name):\n try:\n return Correspondent.objects.get_or_create(name=name)[0]\n except DatabaseError as e:\n self.log(\"error\", f\"Error while retrieving correspondent {name}: {e}\")\n return None\n\n def _get_title(self, message, att, rule):\n if rule.assign_title_from == MailRule.TitleSource.FROM_SUBJECT:\n return message.subject\n\n elif rule.assign_title_from == MailRule.TitleSource.FROM_FILENAME:\n return os.path.splitext(os.path.basename(att.filename))[0]\n\n else:\n raise NotImplementedError(\n \"Unknown title selector.\",\n ) # pragma: nocover\n\n def _get_correspondent(self, message: MailMessage, rule):\n c_from = rule.assign_correspondent_from\n\n if c_from == MailRule.CorrespondentSource.FROM_NOTHING:\n return None\n\n elif c_from == MailRule.CorrespondentSource.FROM_EMAIL:\n return self._correspondent_from_name(message.from_)\n\n elif c_from == MailRule.CorrespondentSource.FROM_NAME:\n from_values = message.from_values\n if from_values is not None and len(from_values.name) > 0:\n return self._correspondent_from_name(from_values.name)\n else:\n return self._correspondent_from_name(message.from_)\n\n elif c_from == MailRule.CorrespondentSource.FROM_CUSTOM:\n return rule.assign_correspondent\n\n else:\n raise NotImplementedError(\n \"Unknown correspondent selector\",\n ) # pragma: nocover\n\n def handle_mail_account(self, account: MailAccount):\n \"\"\"\n Main entry method to handle a specific mail account.\n \"\"\"\n\n self.renew_logging_group()\n\n self.log(\"debug\", f\"Processing mail account {account}\")\n\n total_processed_files = 0\n try:\n with get_mailbox(\n account.imap_server,\n account.imap_port,\n account.imap_security,\n ) as M:\n\n supports_gmail_labels = \"X-GM-EXT-1\" in M.client.capabilities\n supports_auth_plain = \"AUTH=PLAIN\" in M.client.capabilities\n\n self.log(\"debug\", f\"GMAIL Label Support: {supports_gmail_labels}\")\n self.log(\"debug\", f\"AUTH=PLAIN Support: {supports_auth_plain}\")\n\n mailbox_login(M, account)\n\n self.log(\n \"debug\",\n f\"Account {account}: Processing \"\n f\"{account.rules.count()} rule(s)\",\n )\n\n for rule in account.rules.order_by(\"order\"):\n try:\n total_processed_files += self._handle_mail_rule(\n M,\n rule,\n )\n except Exception as e:\n self.log(\n \"error\",\n f\"Rule {rule}: Error while processing rule: {e}\",\n exc_info=True,\n )\n except MailError:\n raise\n except Exception as e:\n self.log(\n \"error\",\n f\"Error while retrieving mailbox {account}: {e}\",\n exc_info=False,\n )\n\n return total_processed_files\n\n def _handle_mail_rule(\n self,\n M: MailBox,\n rule: MailRule,\n ):\n\n self.log(\"debug\", f\"Rule {rule}: Selecting folder {rule.folder}\")\n\n try:\n M.folder.set(rule.folder)\n except MailboxFolderSelectError as err:\n\n self.log(\n \"error\",\n f\"Unable to access folder {rule.folder}, attempting folder listing\",\n )\n try:\n for folder_info in M.folder.list():\n self.log(\"info\", f\"Located folder: {folder_info.name}\")\n except Exception as e:\n self.log(\n \"error\",\n \"Exception during folder listing, unable to provide list folders: \"\n + str(e),\n )\n\n raise MailError(\n f\"Rule {rule}: Folder {rule.folder} \"\n f\"does not exist in account {rule.account}\",\n ) from err\n\n criterias = make_criterias(rule)\n\n self.log(\n \"debug\",\n f\"Rule {rule}: Searching folder with criteria {str(criterias)}\",\n )\n\n try:\n messages = M.fetch(\n criteria=criterias,\n mark_seen=False,\n charset=rule.account.character_set,\n )\n except Exception as err:\n raise MailError(\n f\"Rule {rule}: Error while fetching folder {rule.folder}\",\n ) from err\n\n mails_processed = 0\n total_processed_files = 0\n\n for message in messages:\n if ProcessedMail.objects.filter(\n rule=rule,\n uid=message.uid,\n folder=rule.folder,\n ).exists():\n self.log(\"debug\", f\"Skipping mail {message}, already processed.\")\n continue\n\n try:\n processed_files = self._handle_message(message, rule)\n\n total_processed_files += processed_files\n mails_processed += 1\n except Exception as e:\n self.log(\n \"error\",\n f\"Rule {rule}: Error while processing mail {message.uid}: {e}\",\n exc_info=True,\n )\n\n self.log(\"debug\", f\"Rule {rule}: Processed {mails_processed} matching mail(s)\")\n\n return total_processed_files\n\n def _handle_message(self, message, rule: MailRule) -> int:\n processed_elements = 0\n\n # Skip Message handling when only attachments are to be processed but\n # message doesn't have any.\n if (\n not message.attachments\n and rule.consumption_scope == MailRule.ConsumptionScope.ATTACHMENTS_ONLY\n ):\n return processed_elements\n\n self.log(\n \"debug\",\n f\"Rule {rule}: \"\n f\"Processing mail {message.subject} from {message.from_} with \"\n f\"{len(message.attachments)} attachment(s)\",\n )\n\n correspondent = self._get_correspondent(message, rule)\n tag_ids = [tag.id for tag in rule.assign_tags.all()]\n doc_type = rule.assign_document_type\n\n if (\n rule.consumption_scope == MailRule.ConsumptionScope.EML_ONLY\n or rule.consumption_scope == MailRule.ConsumptionScope.EVERYTHING\n ):\n processed_elements += self._process_eml(\n message,\n rule,\n correspondent,\n tag_ids,\n doc_type,\n )\n\n if (\n rule.consumption_scope == MailRule.ConsumptionScope.ATTACHMENTS_ONLY\n or rule.consumption_scope == MailRule.ConsumptionScope.EVERYTHING\n ):\n processed_elements += self._process_attachments(\n message,\n rule,\n correspondent,\n tag_ids,\n doc_type,\n )\n\n return processed_elements\n\n def _process_attachments(\n self,\n message: MailMessage,\n rule: MailRule,\n correspondent,\n tag_ids,\n doc_type,\n ):\n processed_attachments = 0\n\n consume_tasks = list()\n\n for att in message.attachments:\n\n if (\n att.content_disposition != \"attachment\"\n and rule.attachment_type\n == MailRule.AttachmentProcessing.ATTACHMENTS_ONLY\n ):\n self.log(\n \"debug\",\n f\"Rule {rule}: \"\n f\"Skipping attachment {att.filename} \"\n f\"with content disposition {att.content_disposition}\",\n )\n continue\n\n if rule.filter_attachment_filename and not fnmatch(\n att.filename.lower(),\n rule.filter_attachment_filename.lower(),\n ):\n # Force the filename and pattern to the lowercase\n # as this is system dependent otherwise\n continue\n\n title = self._get_title(message, att, rule)\n\n # don't trust the content type of the attachment. Could be\n # generic application/octet-stream.\n mime_type = magic.from_buffer(att.payload, mime=True)\n\n if is_mime_type_supported(mime_type):\n\n os.makedirs(settings.SCRATCH_DIR, exist_ok=True)\n _, temp_filename = tempfile.mkstemp(\n prefix=\"paperless-mail-\",\n dir=settings.SCRATCH_DIR,\n )\n with open(temp_filename, \"wb\") as f:\n f.write(att.payload)\n\n self.log(\n \"info\",\n f\"Rule {rule}: \"\n f\"Consuming attachment {att.filename} from mail \"\n f\"{message.subject} from {message.from_}\",\n )\n\n input_doc = ConsumableDocument(\n source=DocumentSource.MailFetch,\n original_file=temp_filename,\n )\n doc_overrides = DocumentMetadataOverrides(\n title=title,\n filename=pathvalidate.sanitize_filename(att.filename),\n correspondent_id=correspondent.id if correspondent else None,\n document_type_id=doc_type.id if doc_type else None,\n tag_ids=tag_ids,\n owner_id=rule.owner.id if rule.owner else None,\n )\n\n consume_task = consume_file.s(\n input_doc,\n doc_overrides,\n )\n\n consume_tasks.append(consume_task)\n\n processed_attachments += 1\n else:\n self.log(\n \"debug\",\n f\"Rule {rule}: \"\n f\"Skipping attachment {att.filename} \"\n f\"since guessed mime type {mime_type} is not supported \"\n f\"by paperless\",\n )\n\n queue_consumption_tasks(\n consume_tasks=consume_tasks,\n rule=rule,\n message=message,\n )\n\n return processed_attachments\n\n def _process_eml(\n self,\n message: MailMessage,\n rule: MailRule,\n correspondent,\n tag_ids,\n doc_type,\n ):\n os.makedirs(settings.SCRATCH_DIR, exist_ok=True)\n _, temp_filename = tempfile.mkstemp(\n prefix=\"paperless-mail-\",\n dir=settings.SCRATCH_DIR,\n suffix=\".eml\",\n )\n with open(temp_filename, \"wb\") as f:\n # Move \"From\"-header to beginning of file\n # TODO: This ugly workaround is needed because the parser is\n # chosen only by the mime_type detected via magic\n # (see documents/consumer.py \"mime_type = magic.from_file\")\n # Unfortunately magic sometimes fails to detect the mime\n # type of .eml files correctly as message/rfc822 and instead\n # detects text/plain.\n # This also effects direct file consumption of .eml files\n # which are not treated with this workaround.\n from_element = None\n for i, header in enumerate(message.obj._headers):\n if header[0] == \"From\":\n from_element = i\n if from_element:\n new_headers = [message.obj._headers.pop(from_element)]\n new_headers += message.obj._headers\n message.obj._headers = new_headers\n\n f.write(message.obj.as_bytes())\n\n self.log(\n \"info\",\n f\"Rule {rule}: \"\n f\"Consuming eml from mail \"\n f\"{message.subject} from {message.from_}\",\n )\n\n input_doc = ConsumableDocument(\n source=DocumentSource.MailFetch,\n original_file=temp_filename,\n )\n doc_overrides = DocumentMetadataOverrides(\n title=message.subject,\n filename=pathvalidate.sanitize_filename(f\"{message.subject}.eml\"),\n correspondent_id=correspondent.id if correspondent else None,\n document_type_id=doc_type.id if doc_type else None,\n tag_ids=tag_ids,\n owner_id=rule.owner.id if rule.owner else None,\n )\n\n consume_task = consume_file.s(\n input_doc,\n doc_overrides,\n )\n\n queue_consumption_tasks(\n consume_tasks=[consume_task],\n rule=rule,\n message=message,\n )\n\n processed_elements = 1\n return processed_elements\n",
"path": "src/paperless_mail/mail.py"
}
] | diff --git a/src/paperless_mail/mail.py b/src/paperless_mail/mail.py
index d792b5a9776..1014e4035fa 100644
--- a/src/paperless_mail/mail.py
+++ b/src/paperless_mail/mail.py
@@ -381,7 +381,8 @@ def make_criterias(rule):
rule_query = get_rule_action(rule).get_criteria()
if isinstance(rule_query, dict):
- return AND(**rule_query, **criterias)
+ if len(rule_query) or len(criterias):
+ return AND(**rule_query, **criterias)
else:
return AND(rule_query, **criterias)
diff --git a/src/paperless_mail/tests/test_mail.py b/src/paperless_mail/tests/test_mail.py
index 1f482f3367c..4ad79563b78 100644
--- a/src/paperless_mail/tests/test_mail.py
+++ b/src/paperless_mail/tests/test_mail.py
@@ -612,6 +612,29 @@ def test_handle_mail_account_delete(self):
self.assertEqual(len(self.bogus_mailbox.messages), 1)
+ def test_handle_mail_account_delete_no_filters(self):
+
+ account = MailAccount.objects.create(
+ name="test",
+ imap_server="",
+ username="admin",
+ password="secret",
+ )
+
+ _ = MailRule.objects.create(
+ name="testrule",
+ account=account,
+ action=MailRule.MailAction.DELETE,
+ maximum_age=0,
+ )
+
+ self.assertEqual(len(self.bogus_mailbox.messages), 3)
+
+ self.mail_account_handler.handle_mail_account(account)
+ self.apply_mail_actions()
+
+ self.assertEqual(len(self.bogus_mailbox.messages), 0)
+
def test_handle_mail_account_flag(self):
account = MailAccount.objects.create(
name="test",
|
learningequality__kolibri-10078 | Kolibri 0.16 - Resources of type HTML5 and exercises are not displayed
## Observed behavior
This is a follow up to https://github.com/learningequality/kolibri/pull/9724#issuecomment-1408889097
In the latest develop build both exercises and html resources are not being displayed when a user is navigating through the Library.
## Expected behavior
It should be possible to preview the resource.
## Steps to reproduce the issue
1. Install the the following [0. 16 build ](https://buildkite.com/learningequality/kolibri-debian/builds/5813#018603a8-a7d9-4c79-98d0-e2a0db6a7c69) and import the QA channel.
2. Go to Library > QA Channel
3. Click on any resource withing the HTML5 folder or the Exercises folder
## Videos
HTML5:
https://user-images.githubusercontent.com/79847249/215529161-a0e88738-b221-416a-beea-cf0c6192450f.mp4
EXERCISES:
https://user-images.githubusercontent.com/79847249/215529190-28ecdf59-db72-4b3a-a6df-2c72ab2f395c.mp4
## Console error
```
pluginMediator.js:122 Kolibri Modules: kolibri.plugins.learn.app registered
pluginMediator.js:122 Kolibri Modules: kolibri.plugins.media_player.main registered
pluginMediator.js:122 Kolibri Modules: kolibri.plugins.pdf_viewer.main registered
pluginMediator.js:122 Kolibri Modules: kolibri.plugins.epub_viewer.main registered
pluginMediator.js:122 Kolibri Modules: kolibri.plugins.html5_viewer.main registered
vue.runtime.esm.js:5753 GET http://127.0.0.1:51957/content/static/hashi/hashi-0efeb19f7e4ded20c73f.html 404 (Not Found)
insertBefore @ vue.runtime.esm.js:5753
insert @ vue.runtime.esm.js:6083
(anonymous) @ vue.runtime.esm.js:6030
createElm @ vue.runtime.esm.js:5969
(anonymous) @ vue.runtime.esm.js:6560
Vue._update @ vue.runtime.esm.js:3963
updateComponent @ vue.runtime.esm.js:4081
Watcher.get @ vue.runtime.esm.js:4495
Watcher.run @ vue.runtime.esm.js:4570
flushSchedulerQueue @ vue.runtime.esm.js:4326
(anonymous) @ vue.runtime.esm.js:1989
flushCallbacks @ vue.runtime.esm.js:1915
Promise.then (async)
timerFunc @ vue.runtime.esm.js:1942
nextTick @ vue.runtime.esm.js:1999
(anonymous) @ vue.runtime.esm.js:4418
Watcher.update @ vue.runtime.esm.js:4560
Vue.$forceUpdate @ vue.runtime.esm.js:3984
forceRender @ vue.runtime.esm.js:3668
(anonymous) @ vue.runtime.esm.js:3690
(anonymous) @ vue.runtime.esm.js:336
vue.runtime.esm.js:5753 GET http://127.0.0.1:51957/content/static/hashi/hashi-0efeb19f7e4ded20c73f.html 404 (Not Found)
insertBefore @ vue.runtime.esm.js:5753
insert @ vue.runtime.esm.js:6083
(anonymous) @ vue.runtime.esm.js:6030
createElm @ vue.runtime.esm.js:5969
(anonymous) @ vue.runtime.esm.js:6260
patchVnode @ vue.runtime.esm.js:6363
(anonymous) @ vue.runtime.esm.js:6526
Vue._update @ vue.runtime.esm.js:3963
updateComponent @ vue.runtime.esm.js:4081
Watcher.get @ vue.runtime.esm.js:4495
Watcher.run @ vue.runtime.esm.js:4570
flushSchedulerQueue @ vue.runtime.esm.js:4326
(anonymous) @ vue.runtime.esm.js:1989
flushCallbacks @ vue.runtime.esm.js:1915
Promise.then (async)
timerFunc @ vue.runtime.esm.js:1942
nextTick @ vue.runtime.esm.js:1999
(anonymous) @ vue.runtime.esm.js:4418
Watcher.update @ vue.runtime.esm.js:4560
Dep.notify @ vue.runtime.esm.js:730
set @ vue.runtime.esm.js:1055
sharedPropertyDefinition.set @ vue.runtime.esm.js:4644
(anonymous) @ ContentPage.vue:312
pluginMediator.js:122 Kolibri Modules: kolibri.plugins.perseus_viewer.main registered
```
## Usage Details
Windows 10, Ubuntu - Chrome, Firefox
| [
{
"content": "\"\"\"\nWSGI config for the alternate origin server used for serving\nsandboxed content\n\"\"\"\nimport os\n\nimport kolibri.core.content\nfrom kolibri.core.content.utils import paths\nfrom kolibri.core.content.zip_wsgi import get_application\nfrom kolibri.utils.kolibri_whitenoise import DynamicWhiteNoise\n\nos.environ.setdefault(\n \"DJANGO_SETTINGS_MODULE\", \"kolibri.deployment.default.settings.base\"\n)\n\n\ndef generate_alt_wsgi_application():\n alt_content_path = \"/\" + paths.get_content_url(\n paths.zip_content_path_prefix()\n ).lstrip(\"/\")\n\n content_dirs = [paths.get_content_dir_path()] + paths.get_content_fallback_paths()\n\n content_static_path = os.path.join(\n os.path.dirname(kolibri.core.content.__file__), \"static\"\n )\n\n # Mount static files\n return DynamicWhiteNoise(\n get_application(),\n dynamic_locations=[\n (alt_content_path, content_dir) for content_dir in content_dirs\n ]\n + [(paths.zip_content_static_root(), content_static_path)],\n app_paths=paths.get_zip_content_base_path(),\n )\n\n\nalt_application = generate_alt_wsgi_application()\n",
"path": "kolibri/deployment/default/alt_wsgi.py"
}
] | [
{
"content": "\"\"\"\nWSGI config for the alternate origin server used for serving\nsandboxed content\n\"\"\"\nimport os\n\nimport kolibri.core.content\nfrom kolibri.core.content.utils import paths\nfrom kolibri.core.content.zip_wsgi import get_application\nfrom kolibri.utils.kolibri_whitenoise import DynamicWhiteNoise\n\nos.environ.setdefault(\n \"DJANGO_SETTINGS_MODULE\", \"kolibri.deployment.default.settings.base\"\n)\n\n\ndef generate_alt_wsgi_application():\n alt_content_path = \"/\" + paths.get_content_url(\n paths.zip_content_path_prefix()\n ).lstrip(\"/\")\n\n content_dirs = [paths.get_content_dir_path()] + paths.get_content_fallback_paths()\n\n content_static_path = os.path.join(\n os.path.dirname(kolibri.core.content.__file__), \"static\"\n )\n\n # Mount static files\n return DynamicWhiteNoise(\n get_application(),\n dynamic_locations=[\n (alt_content_path, content_dir) for content_dir in content_dirs\n ]\n + [(paths.zip_content_static_root(), content_static_path)],\n app_paths=[paths.get_zip_content_base_path()],\n )\n\n\nalt_application = generate_alt_wsgi_application()\n",
"path": "kolibri/deployment/default/alt_wsgi.py"
}
] | diff --git a/kolibri/deployment/default/alt_wsgi.py b/kolibri/deployment/default/alt_wsgi.py
index b26d3f582ea..3b36a1469b5 100644
--- a/kolibri/deployment/default/alt_wsgi.py
+++ b/kolibri/deployment/default/alt_wsgi.py
@@ -32,7 +32,7 @@ def generate_alt_wsgi_application():
(alt_content_path, content_dir) for content_dir in content_dirs
]
+ [(paths.zip_content_static_root(), content_static_path)],
- app_paths=paths.get_zip_content_base_path(),
+ app_paths=[paths.get_zip_content_base_path()],
)
diff --git a/kolibri/plugins/learn/assets/src/views/ContentPage.vue b/kolibri/plugins/learn/assets/src/views/ContentPage.vue
index 8acc215d9bb..83e51a1fdc4 100644
--- a/kolibri/plugins/learn/assets/src/views/ContentPage.vue
+++ b/kolibri/plugins/learn/assets/src/views/ContentPage.vue
@@ -4,7 +4,7 @@
<template v-if="sessionReady">
<KContentRenderer
- v-if="!content.assessment"
+ v-if="!content.assessmentmetadata"
class="content-renderer"
:kind="content.kind"
:lang="content.lang"
@@ -54,9 +54,9 @@
:kind="content.kind"
:files="content.files"
:lang="content.lang"
- :randomize="content.randomize"
- :masteryModel="content.masteryModel"
- :assessmentIds="content.assessmentIds"
+ :randomize="content.assessmentmetadata.randomize"
+ :masteryModel="content.assessmentmetadata.mastery_model"
+ :assessmentIds="content.assessmentmetadata.assessment_item_ids"
:available="content.available"
:extraFields="extra_fields"
:progress="progress"
diff --git a/kolibri/plugins/learn/assets/src/views/TopicsContentPage.vue b/kolibri/plugins/learn/assets/src/views/TopicsContentPage.vue
index 87d5dd35fe3..ee38be1a418 100644
--- a/kolibri/plugins/learn/assets/src/views/TopicsContentPage.vue
+++ b/kolibri/plugins/learn/assets/src/views/TopicsContentPage.vue
@@ -60,7 +60,9 @@
data-test="contentPage"
:content="content"
:lessonId="lessonId"
- :style="{ backgroundColor: ( content.assessment ? '' : $themeTokens.textInverted ) }"
+ :style="{
+ backgroundColor: ( content.assessmentmetadata ? '' : $themeTokens.textInverted )
+ }"
:allowMarkComplete="allowMarkComplete"
@mounted="contentPageMounted = true"
@finished="$refs.activityBar && $refs.activityBar.animateNextSteps()"
|
docker__docker-py-3004 | installing latest 5.0.3 on windows machines is still using pywin32==227 but not pywin32==301
[Bump pywin32 from 227 to 301 ]( https://github.com/docker/docker-py/commit/e0d186d754693feb7d27c2352e455c5febb4a5cd) was already merged in to bump pywin32 from 227 to 301. But, when installing latest 5.0.3 on windows machines is resulting in install of pywin32==227
Most likely extras_require needs updated
https://github.com/docker/docker-py/blob/a48a5a9647761406d66e8271f19fab7fa0c5f582/setup.py#L19
Pywin32 upgrade
Fix issue #2902
@aiordache @ulyssessouza, please, accept this PR to fix this annoying bug
Don't pin to pywin32 227
The hard pin to 227 is keeping us from using docker with other projects that depend on a newer version of pywin32.
| [
{
"content": "#!/usr/bin/env python\n\nimport codecs\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\nrequirements = [\n 'websocket-client >= 0.32.0',\n 'requests >= 2.14.2, != 2.18.0',\n]\n\nextras_require = {\n # win32 APIs if on Windows (required for npipe support)\n ':sys_platform == \"win32\"': 'pywin32==227',\n\n # If using docker-py over TLS, highly recommend this option is\n # pip-installed or pinned.\n\n # TODO: if pip installing both \"requests\" and \"requests[security]\", the\n # extra package from the \"security\" option are not installed (see\n # https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of\n # installing the extra dependencies, install the following instead:\n # 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'\n 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=3.4.7', 'idna>=2.0.0'],\n\n # Only required when connecting using the ssh:// protocol\n 'ssh': ['paramiko>=2.4.3'],\n\n}\n\nversion = None\nexec(open('docker/version.py').read())\n\nwith open('./test-requirements.txt') as test_reqs_txt:\n test_requirements = [line for line in test_reqs_txt]\n\n\nlong_description = ''\nwith codecs.open('./README.md', encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nsetup(\n name=\"docker\",\n version=version,\n description=\"A Python library for the Docker Engine API.\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/docker/docker-py',\n project_urls={\n 'Documentation': 'https://docker-py.readthedocs.io',\n 'Changelog': 'https://docker-py.readthedocs.io/en/stable/change-log.html', # noqa: E501\n 'Source': 'https://github.com/docker/docker-py',\n 'Tracker': 'https://github.com/docker/docker-py/issues',\n },\n packages=find_packages(exclude=[\"tests.*\", \"tests\"]),\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require=extras_require,\n python_requires='>=3.6',\n zip_safe=False,\n test_suite='tests',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n maintainer='Ulysses Souza',\n maintainer_email='[email protected]',\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n\nimport codecs\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\nrequirements = [\n 'websocket-client >= 0.32.0',\n 'requests >= 2.14.2, != 2.18.0',\n]\n\nextras_require = {\n # win32 APIs if on Windows (required for npipe support)\n ':sys_platform == \"win32\"': 'pywin32>=304',\n\n # If using docker-py over TLS, highly recommend this option is\n # pip-installed or pinned.\n\n # TODO: if pip installing both \"requests\" and \"requests[security]\", the\n # extra package from the \"security\" option are not installed (see\n # https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of\n # installing the extra dependencies, install the following instead:\n # 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'\n 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=3.4.7', 'idna>=2.0.0'],\n\n # Only required when connecting using the ssh:// protocol\n 'ssh': ['paramiko>=2.4.3'],\n\n}\n\nversion = None\nexec(open('docker/version.py').read())\n\nwith open('./test-requirements.txt') as test_reqs_txt:\n test_requirements = [line for line in test_reqs_txt]\n\n\nlong_description = ''\nwith codecs.open('./README.md', encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nsetup(\n name=\"docker\",\n version=version,\n description=\"A Python library for the Docker Engine API.\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/docker/docker-py',\n project_urls={\n 'Documentation': 'https://docker-py.readthedocs.io',\n 'Changelog': 'https://docker-py.readthedocs.io/en/stable/change-log.html', # noqa: E501\n 'Source': 'https://github.com/docker/docker-py',\n 'Tracker': 'https://github.com/docker/docker-py/issues',\n },\n packages=find_packages(exclude=[\"tests.*\", \"tests\"]),\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require=extras_require,\n python_requires='>=3.6',\n zip_safe=False,\n test_suite='tests',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n maintainer='Ulysses Souza',\n maintainer_email='[email protected]',\n)\n",
"path": "setup.py"
}
] | diff --git a/requirements.txt b/requirements.txt
index a0eb53198..c74d8cea2 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -11,7 +11,7 @@ paramiko==2.10.1
pycparser==2.17
pyOpenSSL==18.0.0
pyparsing==2.2.0
-pywin32==301; sys_platform == 'win32'
+pywin32==304; sys_platform == 'win32'
requests==2.26.0
urllib3==1.26.5
websocket-client==0.56.0
diff --git a/setup.py b/setup.py
index db2d6ebc4..3be63ba65 100644
--- a/setup.py
+++ b/setup.py
@@ -16,7 +16,7 @@
extras_require = {
# win32 APIs if on Windows (required for npipe support)
- ':sys_platform == "win32"': 'pywin32==227',
+ ':sys_platform == "win32"': 'pywin32>=304',
# If using docker-py over TLS, highly recommend this option is
# pip-installed or pinned.
|
apluslms__a-plus-1299 | Binaryornot library sometimes incorrectly classifies PDF files as non-binary
When large PDF files are detected as non-binary data, the inspect submission page tries to render them and causes the page to load slowly.
| [
{
"content": "import itertools\nimport json\nimport logging\nfrom mimetypes import guess_type\nimport os\nfrom typing import IO, Dict, Iterable, List, Tuple, TYPE_CHECKING, Callable\nfrom urllib.parse import urlparse\n\nfrom binaryornot.check import is_binary\nfrom django.conf import settings\nfrom django.db import models, DatabaseError\nfrom django.db.models import F\nfrom django.db.models.signals import post_delete\nfrom django.http.request import HttpRequest\nfrom django.utils import timezone\nfrom django.utils.translation import get_language, gettext_lazy as _\n\nfrom exercise.protocol.exercise_page import ExercisePage\nfrom authorization.models import JWTAccessible\nfrom authorization.object_permissions import register_jwt_accessible_class\nfrom lib.fields import DefaultForeignKey, JSONField, PercentField\nfrom lib.helpers import (\n get_random_string,\n query_dict_to_list_of_tuples,\n pairs_to_dict,\n safe_file_name,\n Enum,\n)\nfrom lib.localization_syntax import pick_localized\nfrom lib.models import UrlMixin\nfrom lti_tool.utils import has_lti_access_to_course\nfrom userprofile.models import UserProfile\nfrom aplus.celery import retry_submissions\nfrom . import exercise_models\nfrom .exercise_models import LearningObjectProto\n\nlogger = logging.getLogger('aplus.exercise')\n\n\nclass SubmissionQuerySet(models.QuerySet):\n def passes(self) -> \"SubmissionQuerySet\":\n \"\"\"Filter only submissions that pass the exercise\"\"\"\n return self.filter(grade__gte=F(\"exercise__points_to_pass\"))\n\n def exclude_errors(self):\n return self.exclude(status__in=(\n Submission.STATUS.ERROR,\n Submission.STATUS.REJECTED,\n ))\n\n def exclude_unofficial(self):\n return self.exclude(status=Submission.STATUS.UNOFFICIAL)\n\n def annotate_submitter_points(\n self,\n field_name: str = 'total',\n revealed_ids: Iterable[int] = None,\n include_unofficial: bool = False,\n ) -> 'SubmissionQuerySet':\n \"\"\"\n Annotates the total points earned by the submitter in the exercise to\n the queryset. Chain after `values` and before `order_by` to ensure one\n points row per submitter and exercise.\n\n The result will be assigned to a field named by `field_name`.\n\n Provide `revealed_ids`, if you want to hide unrevealed points from the\n queryset.\n\n If `include_unofficial` is `False`, only the submissions with status\n `READY` are included. Otherwise, `READY` and `UNOFFICIAL` are included.\n \"\"\"\n # Building a case expression for calculating the total points. There\n # are 4 cases:\n # 1) If revealed_ids was provided, and the exercise id is not in it,\n # return 0.\n # 2) If a submission has the force_exercise_points flag set to True,\n # return that submission's points.\n # 3) If the grading_mode field of the exercise is set to LAST, return\n # the points of the latest submission.\n # 4) In any other case, return the points of the best submission.\n # If none of the submissions are in an expected status (READY or\n # UNOFFICIAL, depending on the include_unofficial parameter, return 0).\n force_zero = False\n cases = []\n if include_unofficial:\n statuses = (Submission.STATUS.READY, Submission.STATUS.UNOFFICIAL)\n else:\n statuses = (Submission.STATUS.READY,)\n if revealed_ids is not None:\n # revealed_ids may be an empty set.\n if revealed_ids:\n # This When clause crashes if the revealed_ids set is empty.\n cases.append(\n models.When(\n ~models.Q(exercise__in=revealed_ids),\n then=0,\n )\n )\n else:\n # No exercise is revealed, thus always return grade zero.\n force_zero = True\n cases.append(\n models.When(\n forced_points__isnull=False,\n then=models.F('forced_points'),\n )\n )\n cases.append(\n models.When(\n exercise__grading_mode=exercise_models.BaseExercise.GRADING_MODE.LAST,\n then=models.Subquery(\n self.filter(\n exercise=models.OuterRef('exercise_id'),\n submitters=models.OuterRef('submitters__id'),\n status__in=statuses,\n )\n .order_by('-submission_time')\n .values('grade')[:1]\n ),\n )\n )\n return (\n self.alias(\n forced_points=models.Max('grade', filter=models.Q(force_exercise_points=True)),\n )\n .annotate(**{\n # Coalesce ensures that 0 is returned instead of None, if none\n # of the submissions are in an expected status.\n field_name: models.functions.Coalesce(\n models.Case(\n *cases,\n default=models.Max(\n 'grade',\n filter=models.Q(\n status__in=statuses,\n ),\n ),\n ),\n 0,\n ) if not force_zero else models.Value(0),\n })\n )\n\n def annotate_best_submitter_points(\n self,\n field_name: str = 'total',\n revealed_ids: Iterable[int] = None,\n include_unofficial: bool = False,\n ) -> 'SubmissionQuerySet':\n \"\"\"\n Annotates the total points earned by the submitter in the exercise to\n the queryset. Chain after `values` and before `order_by` to ensure one\n points row per submitter and exercise.\n\n The result will be assigned to a field named by `field_name`.\n\n Provide `revealed_ids`, if you want to hide unrevealed points from the\n queryset.\n\n If `include_unofficial` is `False`, only the submissions with status\n `READY` are included. Otherwise, `READY` and `UNOFFICIAL` are included.\n\n This method performs better than `annotate_submitter_points()`, but\n this method ignores the exercise grading mode LAST. This method assumes\n that exercise grading mode BEST is always used. This is a hacky and\n temporary workaround for database performance issues.\n \"\"\"\n # Building a case expression for calculating the total points. There\n # are 3 cases:\n # 1) If revealed_ids was provided, and the exercise id is not in it,\n # return 0.\n # 2) If a submission has the force_exercise_points flag set to True,\n # return that submission's points.\n # 3) In any other case, return the points of the best submission.\n # If none of the submissions are in an expected status (READY or\n # UNOFFICIAL, depending on the include_unofficial parameter, return 0).\n force_zero = False\n cases = []\n if include_unofficial:\n statuses = (Submission.STATUS.READY, Submission.STATUS.UNOFFICIAL)\n else:\n statuses = (Submission.STATUS.READY,)\n if revealed_ids is not None:\n # revealed_ids may be an empty set.\n if revealed_ids:\n # This When clause crashes if the revealed_ids set is empty.\n cases.append(\n models.When(\n ~models.Q(exercise__in=revealed_ids),\n then=0,\n )\n )\n else:\n # No exercise is revealed, thus always return grade zero.\n force_zero = True\n cases.append(\n models.When(\n forced_points__isnull=False,\n then=models.F('forced_points'),\n )\n )\n return (\n self.alias(\n forced_points=models.Max('grade', filter=models.Q(force_exercise_points=True)),\n )\n .annotate(**{\n # Coalesce ensures that 0 is returned instead of None, if none\n # of the submissions are in an expected status.\n field_name: models.functions.Coalesce(\n models.Case(\n *cases,\n default=models.Max(\n 'grade',\n filter=models.Q(\n status__in=statuses,\n ),\n ),\n ),\n 0,\n ) if not force_zero else models.Value(0),\n })\n )\n\n def defer_text_fields(self):\n return self.defer(\n 'feedback',\n 'assistant_feedback',\n 'grading_data',\n 'submission_data',\n 'meta_data',\n )\n\n\nclass SubmissionManager(JWTAccessible[\"Submission\"], models.Manager):\n _queryset_class = SubmissionQuerySet\n\n # Hints the correct return type for .filter(...)\n filter: Callable[..., SubmissionQuerySet]\n\n def get_queryset(self):\n return super().get_queryset()\\\n .prefetch_related('submitters')\n\n def create_from_post(self, exercise, submitters, request):\n\n submission_data_list = [\n (key, value) for (key, value) in query_dict_to_list_of_tuples(request.POST)\n if key != '__aplus__'\n ]\n try:\n meta_data_dict = json.loads(request.POST.get('__aplus__', '{}'))\n except json.JSONDecodeError as exc:\n raise ValueError(\"The content of the field __aplus__ is not valid json\") from exc\n if 'lang' not in meta_data_dict:\n meta_data_dict['lang'] = get_language()\n\n try:\n if ('lti-launch-id' in request.session\n and has_lti_access_to_course(request, None, exercise.course_instance)):\n meta_data_dict['lti-launch-id'] = request.session.get('lti-launch-id')\n if 'lti1p3-session-id' in request.COOKIES:\n meta_data_dict['lti-session-id'] = request.COOKIES.get('lti1p3-session-id')\n\n new_submission = Submission.objects.create(\n exercise=exercise,\n submission_data=submission_data_list,\n meta_data=meta_data_dict,\n )\n new_submission.submitters.set(submitters)\n except DatabaseError as error:\n logger.exception(\"Failed to create submission: %s %s\",\n request.user.username, exercise);\n raise DatabaseError from error\n try:\n new_submission.add_files(request.FILES)\n except DatabaseError as error:\n logger.exception(\"Failed to save submitted files: %s %s\",\n request.user.username, exercise);\n new_submission.delete()\n raise DatabaseError from error\n return new_submission\n\n def exclude_errors(self):\n return self.exclude(status__in=(\n Submission.STATUS.ERROR,\n Submission.STATUS.REJECTED,\n ))\n\n def exclude_unofficial(self):\n return self.exclude(status=Submission.STATUS.UNOFFICIAL)\n\n def get_combined_enrollment_submission_data(self, user):\n \"\"\"Retrieve the user's submissions to enrollment exercises and combine\n their submission data into a single dictionary.\n The most recent value (based on submission time) is used for data keys\n that are present in multiple submissions.\n\n The values in the returned dictionary are lists since some form inputs\n accept multiple values (e.g., checkboxes). (The original submission_data\n is stored as a list of key-value pairs, but multiple pairs may repeat\n the same key.)\n \"\"\"\n submissions = Submission.objects.filter(\n exercise__status__in=(\n exercise_models.LearningObject.STATUS.ENROLLMENT,\n exercise_models.LearningObject.STATUS.ENROLLMENT_EXTERNAL\n ),\n submitters__user__id=user.id\n ).order_by('submission_time').only('submission_data')[:10]\n # Retrieve the ten latest submissions since older submissions likely\n # do not have any useful data.\n enrollment_data = {}\n # pylint: disable-next=unnecessary-lambda-assignment\n keyfunc = lambda t: t[0] # the key in a key-value pair\n for sbms in submissions:\n # submission_data should be a list of key-value pairs, but\n # nothing guarantees it in the database level.\n # Checkbox inputs may produce multiple values for the same key, thus\n # the list of pairs may use the same key in different pairs.\n # For each submission, group the submission data by the keys so that\n # multiple values can be preserved for a key when all submissions\n # are combined.\n single_sbms_grouped_data = {} # dict maps keys to the list of one or more values\n try:\n for key, pairs in itertools.groupby(\n sorted(sbms.submission_data, key=keyfunc),\n key=keyfunc):\n single_sbms_grouped_data[key] = [val for k, val in pairs]\n\n # Update the combined enrollment submission data.\n # Later submissions overwrite previous values for the same keys.\n # The keys are combined from many submissions, but the value list\n # for one key always originates from one submission.\n enrollment_data.update(single_sbms_grouped_data)\n except Exception:\n # submission_data was not a list of pairs\n pass\n return enrollment_data\n\nclass SubmissionProto(UrlMixin):\n ABSOLUTE_URL_NAME = \"submission\"\n id: int\n exercise: LearningObjectProto\n\n def get_url_kwargs(self):\n return {\"submission_id\": self.id, **self.exercise.get_url_kwargs()}\n\n def get_inspect_url(self):\n return self.get_url(\"submission-inspect\")\n\n\n@register_jwt_accessible_class(\"submission\")\nclass Submission(SubmissionProto, models.Model):\n \"\"\"\n A submission to some course exercise from one or more submitters.\n \"\"\"\n STATUS = Enum([\n ('INITIALIZED', 'initialized', _('STATUS_INITIALIZED')),\n ('WAITING', 'waiting', _('STATUS_WAITING')),\n ('READY', 'ready', _('STATUS_READY')), # graded normally\n ('ERROR', 'error', _('STATUS_ERROR')),\n ('REJECTED', 'rejected', _('STATUS_REJECTED')), # missing fields etc\n ('UNOFFICIAL', 'unofficial', _('STATUS_UNOFFICIAL')),\n # unofficial: graded after the deadline or after exceeding the submission limit\n ])\n submission_time = models.DateTimeField(\n verbose_name=_('LABEL_SUBMISSION_TIME'),\n auto_now_add=True,\n )\n hash = models.CharField(\n verbose_name=_('LABEL_HASH'),\n max_length=32,\n default=get_random_string,\n )\n\n # Relations\n exercise: exercise_models.BaseExercise = DefaultForeignKey(exercise_models.BaseExercise, # type: ignore\n verbose_name=_('LABEL_EXERCISE'),\n on_delete=models.CASCADE,\n related_name=\"submissions\")\n submitters = models.ManyToManyField(UserProfile,\n verbose_name=_('LABEL_SUBMITTERS'),\n related_name=\"submissions\")\n grader = models.ForeignKey(UserProfile,\n verbose_name=_('LABEL_GRADER'),\n on_delete=models.SET_NULL,\n related_name=\"graded_submissions\",\n blank=True, null=True,\n )\n\n # Grading and feedback\n feedback = models.TextField(\n verbose_name=_('LABEL_FEEDBACK'),\n blank=True,\n )\n assistant_feedback = models.TextField(\n verbose_name=_('LABEL_STAFF_FEEDBACK'),\n blank=True,\n )\n status = models.CharField(\n verbose_name=_('LABEL_STATUS'),\n max_length=32,\n choices=STATUS.choices, default=STATUS.INITIALIZED,\n )\n grade = models.IntegerField(\n verbose_name=_('LABEL_GRADE'),\n default=0,\n )\n grading_time = models.DateTimeField(\n verbose_name=_('LABEL_GRADING_TIME'),\n blank=True, null=True,\n )\n late_penalty_applied = PercentField(\n verbose_name=_('LABEL_LATE_PENALTY_APPLIED'),\n blank=True, null=True,\n )\n force_exercise_points = models.BooleanField(\n verbose_name=_('LABEL_FORCE_EXERCISE_POINTS'),\n default=False,\n )\n\n # Points received from assessment, before scaled to grade\n service_points = models.IntegerField(\n verbose_name=_('LABEL_SERVICE_POINTS'),\n default=0,\n )\n service_max_points = models.IntegerField(\n verbose_name=_('LABEL_SERVICE_MAX_POINTS'),\n default=0,\n )\n\n # Additional data\n submission_data = JSONField(\n verbose_name=_('LABEL_SUBMISSION_DATA'),\n blank=True,\n )\n grading_data = JSONField(\n verbose_name=_('LABEL_GRADING_DATA'),\n blank=True,\n )\n meta_data = JSONField(\n verbose_name=_('LABEL_META_DATA'),\n blank=True,\n )\n\n objects = SubmissionManager()\n\n if TYPE_CHECKING:\n id: int\n submitters: models.ManyToManyField[UserProfile, 'Submission']\n\n class Meta:\n verbose_name = _('MODEL_NAME_SUBMISSION')\n verbose_name_plural = _('MODEL_NAME_SUBMISSION_PLURAL')\n app_label = 'exercise'\n ordering = ['-id']\n\n def __str__(self):\n return str(self.id)\n\n def ordinal_number(self):\n return self.submitters.first().submissions.exclude_errors().filter(\n exercise=self.exercise,\n submission_time__lt=self.submission_time\n ).count() + 1\n\n def is_submitter(self, user):\n return user and user.is_authenticated and \\\n self.submitters.filter(id=user.userprofile.id).exists()\n\n def add_files(self, files):\n \"\"\"\n Adds the given files to this submission as SubmittedFile objects.\n\n @param files: a QueryDict containing files from a POST request\n \"\"\"\n for key in files:\n for uploaded_file in files.getlist(key):\n self.files.create(\n file_object=uploaded_file,\n param_name=key,\n )\n\n def load(\n self,\n request: HttpRequest,\n allow_submit: bool = True,\n feedback_revealed: bool = True,\n ) -> ExercisePage:\n \"\"\"\n Loads the submission page, i.e. the exercise form with the submitted\n answers filled in. Not the same as the graded form, which is stored in\n `feedback`.\n\n The `allow_submit` argument determines if the submit button will be\n shown on the page.\n The `feedback_revealed` argument controls whether file inputs\n in the exercise form are disabled.\n \"\"\"\n # Load the exercise page and parse its contents\n submitters = list(self.submitters.all())\n page = self.exercise.as_leaf_class().load(\n request,\n submitters,\n url_name='exercise',\n ordinal=self.ordinal_number(),\n )\n data = pairs_to_dict(self.submission_data) if self.submission_data else None\n page.populate_form(\n field_values=data,\n allow_submit=allow_submit,\n feedback_revealed=feedback_revealed,\n )\n\n return page\n\n def get_post_parameters(\n self,\n request: HttpRequest, url: str\n ) -> Tuple[Dict[str, List[str]], Dict[str, Tuple[str, IO]]]:\n \"\"\"\n Produces submission data for POST as (data_dict, files_dict).\n \"\"\"\n if self.submission_data:\n self._data = pairs_to_dict(self.submission_data)\n else:\n self._data = {}\n\n self._files = {}\n for file in self.files.all().order_by(\"id\"):\n # Requests supports only one file per name in a multipart post.\n self._files[file.param_name] = (\n file.filename,\n open(file.file_object.path, \"rb\") # pylint: disable=consider-using-with\n )\n\n students = list(self.submitters.all())\n if request and self.is_submitter(request.user):\n user = request.user\n else:\n user = students[0].user if students else None\n self.exercise.as_leaf_class().modify_post_parameters(\n self._data, self._files, user, students, request, url)\n return (self._data, self._files)\n\n def clean_post_parameters(self):\n for key in self._files.keys(): # pylint: disable=consider-iterating-dictionary consider-using-dict-items\n self._files[key][1].close()\n del self._files\n del self._data\n\n def approve_penalized_submission(self):\n \"\"\"\n Remove the late penalty and set the status to ready for this submission.\n\n The points of this submission are reset based on the original service points.\n This method is used to approve a late or unofficial submission as\n a normal, graded submission.\n \"\"\"\n self.set_points(self.service_points, self.service_max_points, no_penalties=True)\n self.set_ready(approve_unofficial=True)\n\n def set_points(self, points, max_points, no_penalties=False):\n \"\"\"\n Sets the points and maximum points for this submissions. If the given\n maximum points are different than the ones for the exercise this\n submission is for, the points will be scaled.\n\n The method also checks if the submission is late and if it is, by\n default applies the late_submission_penalty set for the\n exercise.course_module. If no_penalties is True, the penalty is not\n applied.\n \"\"\"\n exercise = self.exercise\n\n # Evade bad max points in remote service.\n if max_points == 0 and points > 0:\n max_points = exercise.max_points\n\n # The given points must be between zero and max points\n assert 0 <= points <= max_points\n\n # If service max points is zero, then exercise max points must be zero\n # too because otherwise adjusted_grade would be ambiguous.\n # Disabled: Teacher is always responsible the exercise can be passed.\n #assert not (max_points == 0 and self.exercise.max_points != 0)\n\n self.service_points = points\n self.service_max_points = max_points\n self.late_penalty_applied = None\n\n # Scale the given points to the maximum points for the exercise\n if max_points > 0:\n adjusted_grade = (1.0 * exercise.max_points * points / max_points)\n else:\n adjusted_grade = 0.0\n\n if not no_penalties:\n timing,_ = exercise.get_timing(self.submitters.all(), self.submission_time)\n if timing in (exercise.TIMING.LATE, exercise.TIMING.CLOSED_AFTER):\n self.late_penalty_applied = (\n exercise.course_module.late_submission_penalty if\n exercise.course_module.late_submissions_allowed else 0\n )\n adjusted_grade -= (adjusted_grade * self.late_penalty_applied)\n elif timing == exercise.TIMING.UNOFFICIAL:\n self.status = self.STATUS.UNOFFICIAL\n if self.exercise.no_submissions_left(self.submitters.all()):\n self.status = self.STATUS.UNOFFICIAL\n\n self.grade = round(adjusted_grade)\n\n # Finally check that the grade is in bounds after all the math.\n assert 0 <= self.grade <= self.exercise.max_points\n\n def scale_grade_to(self, percentage):\n percentage = float(percentage)/100\n self.grade = round(max(self.grade*percentage,0))\n self.grade = min(self.grade,self.exercise.max_points)\n\n def set_waiting(self):\n self.status = self.STATUS.WAITING\n self.mark_pending()\n\n def set_ready(self, approve_unofficial=False):\n self.grading_time = timezone.now()\n self.clear_pending()\n if self.status != self.STATUS.UNOFFICIAL or self.force_exercise_points or approve_unofficial:\n self.status = self.STATUS.READY\n\n # Fire set hooks.\n for hook in self.exercise.course_module.course_instance \\\n .course_hooks.filter(hook_type=\"post-grading\"):\n hook.trigger({\n \"submission_id\": self.id,\n \"exercise_id\": self.exercise.id,\n \"course_id\": self.exercise.course_module.course_instance.id,\n \"site\": settings.BASE_URL,\n })\n\n if not PendingSubmission.objects.is_grader_stable():\n # We have a successful grading task in the recovery state. It may be a sign that problems\n # have been resolved, so immediately retry the next pending submission, to speed up recovery\n retry_submissions()\n\n def set_rejected(self):\n self.status = self.STATUS.REJECTED\n self.clear_pending()\n\n def set_error(self):\n self.status = self.STATUS.ERROR\n self.clear_pending()\n\n @property\n def is_assessed(self) -> bool:\n \"\"\"Return whether the submission has been manually assessed\"\"\"\n return self.grader is not None\n\n @property\n def is_graded(self):\n return self.status in (self.STATUS.READY, self.STATUS.UNOFFICIAL)\n\n @property\n def lang(self):\n try:\n return self.meta_data.get('lang', None)\n except AttributeError:\n # Handle cases where database includes null or non dictionary json\n return None\n\n @property\n def is_approvable(self):\n \"\"\"Is this submission late or unofficial so that it could be approved?\"\"\"\n return (self.late_penalty_applied is not None\n or self.status == self.STATUS.UNOFFICIAL)\n\n @property\n def lti_launch_id(self):\n try:\n return self.meta_data.get('lti-launch-id')\n except AttributeError:\n return None\n\n def mark_pending(self):\n grading_host = urlparse(self.exercise.service_url).netloc\n if grading_host in settings.SUBMISSION_RETRY_SERVICES:\n pending, created = PendingSubmission.objects.get_or_create(submission=self)\n if not created:\n pending.num_retries = F('num_retries') + 1\n pending.submission_time = timezone.now()\n pending.save()\n\n def clear_pending(self):\n try:\n pending = PendingSubmission.objects.get(submission=self)\n pending.delete()\n except PendingSubmission.DoesNotExist:\n pass\n\n\nclass SubmissionDraft(models.Model):\n \"\"\"\n An incomplete submission that is saved automatically before the user\n submits it. A user can have exactly one draft per exercise instead of\n multiple. The one draft is continuously updated as the user types.\n \"\"\"\n timestamp = models.DateTimeField(\n verbose_name=_('LABEL_TIMESTAMP'),\n auto_now=True,\n )\n exercise = DefaultForeignKey(exercise_models.BaseExercise,\n verbose_name=_('LABEL_EXERCISE'),\n on_delete=models.CASCADE,\n related_name='submission_drafts'\n )\n submitter = models.ForeignKey(UserProfile,\n verbose_name=_('LABEL_SUBMITTER'),\n on_delete=models.CASCADE,\n related_name='submission_drafts'\n )\n submission_data = JSONField(\n verbose_name=_('LABEL_SUBMISSION_DATA'),\n blank=True,\n )\n # This flag is set to False when the student makes an actual submission.\n # This way the draft doesn't have to be deleted and recreated every time\n # the student makes a submission and then starts a new draft.\n active = models.BooleanField(\n verbose_name=_('LABEL_ACTIVE'),\n default=True,\n )\n\n if TYPE_CHECKING:\n objects: models.Manager['SubmissionDraft']\n id: models.AutoField\n\n class Meta:\n verbose_name = _('MODEL_NAME_SUBMISSION_DRAFT')\n verbose_name_plural = _('MODEL_NAME_SUBMISSION_DRAFT_PLURAL')\n app_label = 'exercise'\n unique_together = ('exercise', 'submitter')\n\n def load(self, request: HttpRequest) -> ExercisePage:\n \"\"\"\n Loads the draft page, i.e. the exercise form with the user's\n incomplete answers filled in.\n \"\"\"\n enrollment = self.exercise.course_instance.get_enrollment_for(request.user)\n if enrollment and enrollment.selected_group:\n students = list(enrollment.selected_group.members.all())\n else:\n students = [request.user.userprofile]\n\n page = self.exercise.as_leaf_class().load(\n request,\n students,\n url_name='exercise',\n )\n if self.submission_data:\n data = pairs_to_dict(self.submission_data)\n # Format the timestamp so that it can be used in Javascript's Date constructor\n timestamp = str(int(self.timestamp.timestamp() * 1000))\n page.populate_form(field_values=data, data_values={'draft-timestamp': timestamp}, allow_submit=True)\n\n return page\n\n\ndef build_upload_dir(instance, filename):\n \"\"\"\n Returns the path to a directory where a file should be saved.\n This is called every time a new SubmittedFile model is created.\n\n @param instance: the new SubmittedFile object\n @param filename: the actual name of the submitted file\n @return: a path where the file should be stored, relative to MEDIA_ROOT directory\n \"\"\"\n submission = instance.submission\n exercise = submission.exercise\n submitter_ids = [str(profile.id) for profile in submission.submitters.all().order_by(\"id\")]\n return \"course_instance_{:d}/submissions/exercise_{:d}/users_{}/submission_{:d}/{}\".format(\n exercise.course_instance.id,\n exercise.id,\n \"-\".join(submitter_ids),\n submission.id,\n safe_file_name(filename)\n )\n\n\nclass SubmittedFile(UrlMixin, models.Model):\n \"\"\"\n Represents a file submitted by the student as a solution to an exercise.\n Submitted files are always linked to a certain submission through a\n foreign key relation. The files are stored on the disk while models are\n stored in the database.\n \"\"\"\n submission = models.ForeignKey(Submission,\n verbose_name=_('LABEL_SUBMISSION'),\n on_delete=models.CASCADE,\n related_name=\"files\",\n )\n param_name = models.CharField(\n verbose_name=_('LABEL_PARAM_NAME'),\n max_length=128,\n )\n file_object = models.FileField(\n verbose_name=_('LABEL_FILE_OBJECT'),\n upload_to=build_upload_dir,\n max_length=255,\n )\n\n class Meta:\n verbose_name = _('MODEL_NAME_SUBMITTED_FILE')\n verbose_name_plural = _('MODEL_NAME_SUBMITTED_FILE_PLURAL')\n app_label = 'exercise'\n\n @property\n def filename(self):\n \"\"\"\n Returns the actual name of the file on the disk.\n \"\"\"\n return os.path.basename(self.file_object.path)\n\n @property\n def exists(self):\n try:\n return bool(self.file_object.size)\n except OSError:\n return False\n\n def get_mime(self):\n return guess_type(self.file_object.path)[0]\n\n def is_passed(self):\n return is_binary(self.file_object.path)\n\n\n ABSOLUTE_URL_NAME = \"submission-file\"\n\n def get_url_kwargs(self):\n return dict( # pylint: disable=use-dict-literal\n file_id=self.id,\n file_name=self.filename,\n **self.submission.get_url_kwargs()\n )\n\n\ndef _delete_file(sender, instance, **kwargs): # pylint: disable=unused-argument\n \"\"\"\n Deletes the actual submission files after the submission in database is\n removed.\n \"\"\"\n instance.file_object.delete(save=False)\n\n\npost_delete.connect(_delete_file, SubmittedFile)\n\n\nclass PendingSubmissionManager(models.Manager):\n\n def is_grader_stable(self):\n total_retries = self.aggregate(sum=models.Sum('num_retries'))['sum']\n return not (total_retries and total_retries > settings.GRADER_STABLE_THRESHOLD)\n\n\n def get_exercise_names_if_grader_is_unstable(self, instance):\n total_retries_per_exercise = self.values(\n 'submission__exercise__name',\n ).filter(\n submission__exercise__course_module__course_instance=instance.id,\n ).annotate(\n num_retries=models.Sum('num_retries'),\n ).order_by(\n '-num_retries',\n )[:10]\n total_retries = sum(entry['num_retries'] for entry in total_retries_per_exercise)\n # Check if the grader can be considered unstable on this course instance\n if total_retries > settings.GRADER_STABLE_THRESHOLD:\n lang = get_language()\n exercises = \", \".join(\n f\"'{pick_localized(entry['submission__exercise__name'], lang)}'\"\n for entry in total_retries_per_exercise\n )\n return exercises\n return ''\n\n\nclass PendingSubmission(models.Model):\n submission = models.OneToOneField(Submission,\n verbose_name=_('LABEL_SUBMISSION'),\n on_delete=models.CASCADE,\n )\n submission_time = models.DateTimeField(\n verbose_name=_('LABEL_SUBMISSION_TIME'),\n null=True, # to make usage with get_or_create easier\n )\n num_retries = models.PositiveIntegerField(\n verbose_name=_('LABEL_NUMBER_OF_RETRIES'),\n default=0,\n )\n objects = PendingSubmissionManager()\n\n class Meta:\n verbose_name = _('MODEL_NAME_PENDING_SUBMISSION')\n verbose_name_plural = _('MODEL_NAME_PENDING_SUBMISSION_PLURAL')\n",
"path": "exercise/submission_models.py"
}
] | [
{
"content": "import itertools\nimport json\nimport logging\nfrom mimetypes import guess_type\nimport os\nfrom typing import IO, Dict, Iterable, List, Tuple, TYPE_CHECKING, Callable\nfrom urllib.parse import urlparse\n\nfrom binaryornot.check import is_binary\nfrom django.conf import settings\nfrom django.db import models, DatabaseError\nfrom django.db.models import F\nfrom django.db.models.signals import post_delete\nfrom django.http.request import HttpRequest\nfrom django.utils import timezone\nfrom django.utils.translation import get_language, gettext_lazy as _\n\nfrom exercise.protocol.exercise_page import ExercisePage\nfrom authorization.models import JWTAccessible\nfrom authorization.object_permissions import register_jwt_accessible_class\nfrom lib.fields import DefaultForeignKey, JSONField, PercentField\nfrom lib.helpers import (\n get_random_string,\n query_dict_to_list_of_tuples,\n pairs_to_dict,\n safe_file_name,\n Enum,\n)\nfrom lib.localization_syntax import pick_localized\nfrom lib.models import UrlMixin\nfrom lti_tool.utils import has_lti_access_to_course\nfrom userprofile.models import UserProfile\nfrom aplus.celery import retry_submissions\nfrom . import exercise_models\nfrom .exercise_models import LearningObjectProto\n\nlogger = logging.getLogger('aplus.exercise')\n\n\nclass SubmissionQuerySet(models.QuerySet):\n def passes(self) -> \"SubmissionQuerySet\":\n \"\"\"Filter only submissions that pass the exercise\"\"\"\n return self.filter(grade__gte=F(\"exercise__points_to_pass\"))\n\n def exclude_errors(self):\n return self.exclude(status__in=(\n Submission.STATUS.ERROR,\n Submission.STATUS.REJECTED,\n ))\n\n def exclude_unofficial(self):\n return self.exclude(status=Submission.STATUS.UNOFFICIAL)\n\n def annotate_submitter_points(\n self,\n field_name: str = 'total',\n revealed_ids: Iterable[int] = None,\n include_unofficial: bool = False,\n ) -> 'SubmissionQuerySet':\n \"\"\"\n Annotates the total points earned by the submitter in the exercise to\n the queryset. Chain after `values` and before `order_by` to ensure one\n points row per submitter and exercise.\n\n The result will be assigned to a field named by `field_name`.\n\n Provide `revealed_ids`, if you want to hide unrevealed points from the\n queryset.\n\n If `include_unofficial` is `False`, only the submissions with status\n `READY` are included. Otherwise, `READY` and `UNOFFICIAL` are included.\n \"\"\"\n # Building a case expression for calculating the total points. There\n # are 4 cases:\n # 1) If revealed_ids was provided, and the exercise id is not in it,\n # return 0.\n # 2) If a submission has the force_exercise_points flag set to True,\n # return that submission's points.\n # 3) If the grading_mode field of the exercise is set to LAST, return\n # the points of the latest submission.\n # 4) In any other case, return the points of the best submission.\n # If none of the submissions are in an expected status (READY or\n # UNOFFICIAL, depending on the include_unofficial parameter, return 0).\n force_zero = False\n cases = []\n if include_unofficial:\n statuses = (Submission.STATUS.READY, Submission.STATUS.UNOFFICIAL)\n else:\n statuses = (Submission.STATUS.READY,)\n if revealed_ids is not None:\n # revealed_ids may be an empty set.\n if revealed_ids:\n # This When clause crashes if the revealed_ids set is empty.\n cases.append(\n models.When(\n ~models.Q(exercise__in=revealed_ids),\n then=0,\n )\n )\n else:\n # No exercise is revealed, thus always return grade zero.\n force_zero = True\n cases.append(\n models.When(\n forced_points__isnull=False,\n then=models.F('forced_points'),\n )\n )\n cases.append(\n models.When(\n exercise__grading_mode=exercise_models.BaseExercise.GRADING_MODE.LAST,\n then=models.Subquery(\n self.filter(\n exercise=models.OuterRef('exercise_id'),\n submitters=models.OuterRef('submitters__id'),\n status__in=statuses,\n )\n .order_by('-submission_time')\n .values('grade')[:1]\n ),\n )\n )\n return (\n self.alias(\n forced_points=models.Max('grade', filter=models.Q(force_exercise_points=True)),\n )\n .annotate(**{\n # Coalesce ensures that 0 is returned instead of None, if none\n # of the submissions are in an expected status.\n field_name: models.functions.Coalesce(\n models.Case(\n *cases,\n default=models.Max(\n 'grade',\n filter=models.Q(\n status__in=statuses,\n ),\n ),\n ),\n 0,\n ) if not force_zero else models.Value(0),\n })\n )\n\n def annotate_best_submitter_points(\n self,\n field_name: str = 'total',\n revealed_ids: Iterable[int] = None,\n include_unofficial: bool = False,\n ) -> 'SubmissionQuerySet':\n \"\"\"\n Annotates the total points earned by the submitter in the exercise to\n the queryset. Chain after `values` and before `order_by` to ensure one\n points row per submitter and exercise.\n\n The result will be assigned to a field named by `field_name`.\n\n Provide `revealed_ids`, if you want to hide unrevealed points from the\n queryset.\n\n If `include_unofficial` is `False`, only the submissions with status\n `READY` are included. Otherwise, `READY` and `UNOFFICIAL` are included.\n\n This method performs better than `annotate_submitter_points()`, but\n this method ignores the exercise grading mode LAST. This method assumes\n that exercise grading mode BEST is always used. This is a hacky and\n temporary workaround for database performance issues.\n \"\"\"\n # Building a case expression for calculating the total points. There\n # are 3 cases:\n # 1) If revealed_ids was provided, and the exercise id is not in it,\n # return 0.\n # 2) If a submission has the force_exercise_points flag set to True,\n # return that submission's points.\n # 3) In any other case, return the points of the best submission.\n # If none of the submissions are in an expected status (READY or\n # UNOFFICIAL, depending on the include_unofficial parameter, return 0).\n force_zero = False\n cases = []\n if include_unofficial:\n statuses = (Submission.STATUS.READY, Submission.STATUS.UNOFFICIAL)\n else:\n statuses = (Submission.STATUS.READY,)\n if revealed_ids is not None:\n # revealed_ids may be an empty set.\n if revealed_ids:\n # This When clause crashes if the revealed_ids set is empty.\n cases.append(\n models.When(\n ~models.Q(exercise__in=revealed_ids),\n then=0,\n )\n )\n else:\n # No exercise is revealed, thus always return grade zero.\n force_zero = True\n cases.append(\n models.When(\n forced_points__isnull=False,\n then=models.F('forced_points'),\n )\n )\n return (\n self.alias(\n forced_points=models.Max('grade', filter=models.Q(force_exercise_points=True)),\n )\n .annotate(**{\n # Coalesce ensures that 0 is returned instead of None, if none\n # of the submissions are in an expected status.\n field_name: models.functions.Coalesce(\n models.Case(\n *cases,\n default=models.Max(\n 'grade',\n filter=models.Q(\n status__in=statuses,\n ),\n ),\n ),\n 0,\n ) if not force_zero else models.Value(0),\n })\n )\n\n def defer_text_fields(self):\n return self.defer(\n 'feedback',\n 'assistant_feedback',\n 'grading_data',\n 'submission_data',\n 'meta_data',\n )\n\n\nclass SubmissionManager(JWTAccessible[\"Submission\"], models.Manager):\n _queryset_class = SubmissionQuerySet\n\n # Hints the correct return type for .filter(...)\n filter: Callable[..., SubmissionQuerySet]\n\n def get_queryset(self):\n return super().get_queryset()\\\n .prefetch_related('submitters')\n\n def create_from_post(self, exercise, submitters, request):\n\n submission_data_list = [\n (key, value) for (key, value) in query_dict_to_list_of_tuples(request.POST)\n if key != '__aplus__'\n ]\n try:\n meta_data_dict = json.loads(request.POST.get('__aplus__', '{}'))\n except json.JSONDecodeError as exc:\n raise ValueError(\"The content of the field __aplus__ is not valid json\") from exc\n if 'lang' not in meta_data_dict:\n meta_data_dict['lang'] = get_language()\n\n try:\n if ('lti-launch-id' in request.session\n and has_lti_access_to_course(request, None, exercise.course_instance)):\n meta_data_dict['lti-launch-id'] = request.session.get('lti-launch-id')\n if 'lti1p3-session-id' in request.COOKIES:\n meta_data_dict['lti-session-id'] = request.COOKIES.get('lti1p3-session-id')\n\n new_submission = Submission.objects.create(\n exercise=exercise,\n submission_data=submission_data_list,\n meta_data=meta_data_dict,\n )\n new_submission.submitters.set(submitters)\n except DatabaseError as error:\n logger.exception(\"Failed to create submission: %s %s\",\n request.user.username, exercise);\n raise DatabaseError from error\n try:\n new_submission.add_files(request.FILES)\n except DatabaseError as error:\n logger.exception(\"Failed to save submitted files: %s %s\",\n request.user.username, exercise);\n new_submission.delete()\n raise DatabaseError from error\n return new_submission\n\n def exclude_errors(self):\n return self.exclude(status__in=(\n Submission.STATUS.ERROR,\n Submission.STATUS.REJECTED,\n ))\n\n def exclude_unofficial(self):\n return self.exclude(status=Submission.STATUS.UNOFFICIAL)\n\n def get_combined_enrollment_submission_data(self, user):\n \"\"\"Retrieve the user's submissions to enrollment exercises and combine\n their submission data into a single dictionary.\n The most recent value (based on submission time) is used for data keys\n that are present in multiple submissions.\n\n The values in the returned dictionary are lists since some form inputs\n accept multiple values (e.g., checkboxes). (The original submission_data\n is stored as a list of key-value pairs, but multiple pairs may repeat\n the same key.)\n \"\"\"\n submissions = Submission.objects.filter(\n exercise__status__in=(\n exercise_models.LearningObject.STATUS.ENROLLMENT,\n exercise_models.LearningObject.STATUS.ENROLLMENT_EXTERNAL\n ),\n submitters__user__id=user.id\n ).order_by('submission_time').only('submission_data')[:10]\n # Retrieve the ten latest submissions since older submissions likely\n # do not have any useful data.\n enrollment_data = {}\n # pylint: disable-next=unnecessary-lambda-assignment\n keyfunc = lambda t: t[0] # the key in a key-value pair\n for sbms in submissions:\n # submission_data should be a list of key-value pairs, but\n # nothing guarantees it in the database level.\n # Checkbox inputs may produce multiple values for the same key, thus\n # the list of pairs may use the same key in different pairs.\n # For each submission, group the submission data by the keys so that\n # multiple values can be preserved for a key when all submissions\n # are combined.\n single_sbms_grouped_data = {} # dict maps keys to the list of one or more values\n try:\n for key, pairs in itertools.groupby(\n sorted(sbms.submission_data, key=keyfunc),\n key=keyfunc):\n single_sbms_grouped_data[key] = [val for k, val in pairs]\n\n # Update the combined enrollment submission data.\n # Later submissions overwrite previous values for the same keys.\n # The keys are combined from many submissions, but the value list\n # for one key always originates from one submission.\n enrollment_data.update(single_sbms_grouped_data)\n except Exception:\n # submission_data was not a list of pairs\n pass\n return enrollment_data\n\nclass SubmissionProto(UrlMixin):\n ABSOLUTE_URL_NAME = \"submission\"\n id: int\n exercise: LearningObjectProto\n\n def get_url_kwargs(self):\n return {\"submission_id\": self.id, **self.exercise.get_url_kwargs()}\n\n def get_inspect_url(self):\n return self.get_url(\"submission-inspect\")\n\n\n@register_jwt_accessible_class(\"submission\")\nclass Submission(SubmissionProto, models.Model):\n \"\"\"\n A submission to some course exercise from one or more submitters.\n \"\"\"\n STATUS = Enum([\n ('INITIALIZED', 'initialized', _('STATUS_INITIALIZED')),\n ('WAITING', 'waiting', _('STATUS_WAITING')),\n ('READY', 'ready', _('STATUS_READY')), # graded normally\n ('ERROR', 'error', _('STATUS_ERROR')),\n ('REJECTED', 'rejected', _('STATUS_REJECTED')), # missing fields etc\n ('UNOFFICIAL', 'unofficial', _('STATUS_UNOFFICIAL')),\n # unofficial: graded after the deadline or after exceeding the submission limit\n ])\n submission_time = models.DateTimeField(\n verbose_name=_('LABEL_SUBMISSION_TIME'),\n auto_now_add=True,\n )\n hash = models.CharField(\n verbose_name=_('LABEL_HASH'),\n max_length=32,\n default=get_random_string,\n )\n\n # Relations\n exercise: exercise_models.BaseExercise = DefaultForeignKey(exercise_models.BaseExercise, # type: ignore\n verbose_name=_('LABEL_EXERCISE'),\n on_delete=models.CASCADE,\n related_name=\"submissions\")\n submitters = models.ManyToManyField(UserProfile,\n verbose_name=_('LABEL_SUBMITTERS'),\n related_name=\"submissions\")\n grader = models.ForeignKey(UserProfile,\n verbose_name=_('LABEL_GRADER'),\n on_delete=models.SET_NULL,\n related_name=\"graded_submissions\",\n blank=True, null=True,\n )\n\n # Grading and feedback\n feedback = models.TextField(\n verbose_name=_('LABEL_FEEDBACK'),\n blank=True,\n )\n assistant_feedback = models.TextField(\n verbose_name=_('LABEL_STAFF_FEEDBACK'),\n blank=True,\n )\n status = models.CharField(\n verbose_name=_('LABEL_STATUS'),\n max_length=32,\n choices=STATUS.choices, default=STATUS.INITIALIZED,\n )\n grade = models.IntegerField(\n verbose_name=_('LABEL_GRADE'),\n default=0,\n )\n grading_time = models.DateTimeField(\n verbose_name=_('LABEL_GRADING_TIME'),\n blank=True, null=True,\n )\n late_penalty_applied = PercentField(\n verbose_name=_('LABEL_LATE_PENALTY_APPLIED'),\n blank=True, null=True,\n )\n force_exercise_points = models.BooleanField(\n verbose_name=_('LABEL_FORCE_EXERCISE_POINTS'),\n default=False,\n )\n\n # Points received from assessment, before scaled to grade\n service_points = models.IntegerField(\n verbose_name=_('LABEL_SERVICE_POINTS'),\n default=0,\n )\n service_max_points = models.IntegerField(\n verbose_name=_('LABEL_SERVICE_MAX_POINTS'),\n default=0,\n )\n\n # Additional data\n submission_data = JSONField(\n verbose_name=_('LABEL_SUBMISSION_DATA'),\n blank=True,\n )\n grading_data = JSONField(\n verbose_name=_('LABEL_GRADING_DATA'),\n blank=True,\n )\n meta_data = JSONField(\n verbose_name=_('LABEL_META_DATA'),\n blank=True,\n )\n\n objects = SubmissionManager()\n\n if TYPE_CHECKING:\n id: int\n submitters: models.ManyToManyField[UserProfile, 'Submission']\n\n class Meta:\n verbose_name = _('MODEL_NAME_SUBMISSION')\n verbose_name_plural = _('MODEL_NAME_SUBMISSION_PLURAL')\n app_label = 'exercise'\n ordering = ['-id']\n\n def __str__(self):\n return str(self.id)\n\n def ordinal_number(self):\n return self.submitters.first().submissions.exclude_errors().filter(\n exercise=self.exercise,\n submission_time__lt=self.submission_time\n ).count() + 1\n\n def is_submitter(self, user):\n return user and user.is_authenticated and \\\n self.submitters.filter(id=user.userprofile.id).exists()\n\n def add_files(self, files):\n \"\"\"\n Adds the given files to this submission as SubmittedFile objects.\n\n @param files: a QueryDict containing files from a POST request\n \"\"\"\n for key in files:\n for uploaded_file in files.getlist(key):\n self.files.create(\n file_object=uploaded_file,\n param_name=key,\n )\n\n def load(\n self,\n request: HttpRequest,\n allow_submit: bool = True,\n feedback_revealed: bool = True,\n ) -> ExercisePage:\n \"\"\"\n Loads the submission page, i.e. the exercise form with the submitted\n answers filled in. Not the same as the graded form, which is stored in\n `feedback`.\n\n The `allow_submit` argument determines if the submit button will be\n shown on the page.\n The `feedback_revealed` argument controls whether file inputs\n in the exercise form are disabled.\n \"\"\"\n # Load the exercise page and parse its contents\n submitters = list(self.submitters.all())\n page = self.exercise.as_leaf_class().load(\n request,\n submitters,\n url_name='exercise',\n ordinal=self.ordinal_number(),\n )\n data = pairs_to_dict(self.submission_data) if self.submission_data else None\n page.populate_form(\n field_values=data,\n allow_submit=allow_submit,\n feedback_revealed=feedback_revealed,\n )\n\n return page\n\n def get_post_parameters(\n self,\n request: HttpRequest, url: str\n ) -> Tuple[Dict[str, List[str]], Dict[str, Tuple[str, IO]]]:\n \"\"\"\n Produces submission data for POST as (data_dict, files_dict).\n \"\"\"\n if self.submission_data:\n self._data = pairs_to_dict(self.submission_data)\n else:\n self._data = {}\n\n self._files = {}\n for file in self.files.all().order_by(\"id\"):\n # Requests supports only one file per name in a multipart post.\n self._files[file.param_name] = (\n file.filename,\n open(file.file_object.path, \"rb\") # pylint: disable=consider-using-with\n )\n\n students = list(self.submitters.all())\n if request and self.is_submitter(request.user):\n user = request.user\n else:\n user = students[0].user if students else None\n self.exercise.as_leaf_class().modify_post_parameters(\n self._data, self._files, user, students, request, url)\n return (self._data, self._files)\n\n def clean_post_parameters(self):\n for key in self._files.keys(): # pylint: disable=consider-iterating-dictionary consider-using-dict-items\n self._files[key][1].close()\n del self._files\n del self._data\n\n def approve_penalized_submission(self):\n \"\"\"\n Remove the late penalty and set the status to ready for this submission.\n\n The points of this submission are reset based on the original service points.\n This method is used to approve a late or unofficial submission as\n a normal, graded submission.\n \"\"\"\n self.set_points(self.service_points, self.service_max_points, no_penalties=True)\n self.set_ready(approve_unofficial=True)\n\n def set_points(self, points, max_points, no_penalties=False):\n \"\"\"\n Sets the points and maximum points for this submissions. If the given\n maximum points are different than the ones for the exercise this\n submission is for, the points will be scaled.\n\n The method also checks if the submission is late and if it is, by\n default applies the late_submission_penalty set for the\n exercise.course_module. If no_penalties is True, the penalty is not\n applied.\n \"\"\"\n exercise = self.exercise\n\n # Evade bad max points in remote service.\n if max_points == 0 and points > 0:\n max_points = exercise.max_points\n\n # The given points must be between zero and max points\n assert 0 <= points <= max_points\n\n # If service max points is zero, then exercise max points must be zero\n # too because otherwise adjusted_grade would be ambiguous.\n # Disabled: Teacher is always responsible the exercise can be passed.\n #assert not (max_points == 0 and self.exercise.max_points != 0)\n\n self.service_points = points\n self.service_max_points = max_points\n self.late_penalty_applied = None\n\n # Scale the given points to the maximum points for the exercise\n if max_points > 0:\n adjusted_grade = (1.0 * exercise.max_points * points / max_points)\n else:\n adjusted_grade = 0.0\n\n if not no_penalties:\n timing,_ = exercise.get_timing(self.submitters.all(), self.submission_time)\n if timing in (exercise.TIMING.LATE, exercise.TIMING.CLOSED_AFTER):\n self.late_penalty_applied = (\n exercise.course_module.late_submission_penalty if\n exercise.course_module.late_submissions_allowed else 0\n )\n adjusted_grade -= (adjusted_grade * self.late_penalty_applied)\n elif timing == exercise.TIMING.UNOFFICIAL:\n self.status = self.STATUS.UNOFFICIAL\n if self.exercise.no_submissions_left(self.submitters.all()):\n self.status = self.STATUS.UNOFFICIAL\n\n self.grade = round(adjusted_grade)\n\n # Finally check that the grade is in bounds after all the math.\n assert 0 <= self.grade <= self.exercise.max_points\n\n def scale_grade_to(self, percentage):\n percentage = float(percentage)/100\n self.grade = round(max(self.grade*percentage,0))\n self.grade = min(self.grade,self.exercise.max_points)\n\n def set_waiting(self):\n self.status = self.STATUS.WAITING\n self.mark_pending()\n\n def set_ready(self, approve_unofficial=False):\n self.grading_time = timezone.now()\n self.clear_pending()\n if self.status != self.STATUS.UNOFFICIAL or self.force_exercise_points or approve_unofficial:\n self.status = self.STATUS.READY\n\n # Fire set hooks.\n for hook in self.exercise.course_module.course_instance \\\n .course_hooks.filter(hook_type=\"post-grading\"):\n hook.trigger({\n \"submission_id\": self.id,\n \"exercise_id\": self.exercise.id,\n \"course_id\": self.exercise.course_module.course_instance.id,\n \"site\": settings.BASE_URL,\n })\n\n if not PendingSubmission.objects.is_grader_stable():\n # We have a successful grading task in the recovery state. It may be a sign that problems\n # have been resolved, so immediately retry the next pending submission, to speed up recovery\n retry_submissions()\n\n def set_rejected(self):\n self.status = self.STATUS.REJECTED\n self.clear_pending()\n\n def set_error(self):\n self.status = self.STATUS.ERROR\n self.clear_pending()\n\n @property\n def is_assessed(self) -> bool:\n \"\"\"Return whether the submission has been manually assessed\"\"\"\n return self.grader is not None\n\n @property\n def is_graded(self):\n return self.status in (self.STATUS.READY, self.STATUS.UNOFFICIAL)\n\n @property\n def lang(self):\n try:\n return self.meta_data.get('lang', None)\n except AttributeError:\n # Handle cases where database includes null or non dictionary json\n return None\n\n @property\n def is_approvable(self):\n \"\"\"Is this submission late or unofficial so that it could be approved?\"\"\"\n return (self.late_penalty_applied is not None\n or self.status == self.STATUS.UNOFFICIAL)\n\n @property\n def lti_launch_id(self):\n try:\n return self.meta_data.get('lti-launch-id')\n except AttributeError:\n return None\n\n def mark_pending(self):\n grading_host = urlparse(self.exercise.service_url).netloc\n if grading_host in settings.SUBMISSION_RETRY_SERVICES:\n pending, created = PendingSubmission.objects.get_or_create(submission=self)\n if not created:\n pending.num_retries = F('num_retries') + 1\n pending.submission_time = timezone.now()\n pending.save()\n\n def clear_pending(self):\n try:\n pending = PendingSubmission.objects.get(submission=self)\n pending.delete()\n except PendingSubmission.DoesNotExist:\n pass\n\n\nclass SubmissionDraft(models.Model):\n \"\"\"\n An incomplete submission that is saved automatically before the user\n submits it. A user can have exactly one draft per exercise instead of\n multiple. The one draft is continuously updated as the user types.\n \"\"\"\n timestamp = models.DateTimeField(\n verbose_name=_('LABEL_TIMESTAMP'),\n auto_now=True,\n )\n exercise = DefaultForeignKey(exercise_models.BaseExercise,\n verbose_name=_('LABEL_EXERCISE'),\n on_delete=models.CASCADE,\n related_name='submission_drafts'\n )\n submitter = models.ForeignKey(UserProfile,\n verbose_name=_('LABEL_SUBMITTER'),\n on_delete=models.CASCADE,\n related_name='submission_drafts'\n )\n submission_data = JSONField(\n verbose_name=_('LABEL_SUBMISSION_DATA'),\n blank=True,\n )\n # This flag is set to False when the student makes an actual submission.\n # This way the draft doesn't have to be deleted and recreated every time\n # the student makes a submission and then starts a new draft.\n active = models.BooleanField(\n verbose_name=_('LABEL_ACTIVE'),\n default=True,\n )\n\n if TYPE_CHECKING:\n objects: models.Manager['SubmissionDraft']\n id: models.AutoField\n\n class Meta:\n verbose_name = _('MODEL_NAME_SUBMISSION_DRAFT')\n verbose_name_plural = _('MODEL_NAME_SUBMISSION_DRAFT_PLURAL')\n app_label = 'exercise'\n unique_together = ('exercise', 'submitter')\n\n def load(self, request: HttpRequest) -> ExercisePage:\n \"\"\"\n Loads the draft page, i.e. the exercise form with the user's\n incomplete answers filled in.\n \"\"\"\n enrollment = self.exercise.course_instance.get_enrollment_for(request.user)\n if enrollment and enrollment.selected_group:\n students = list(enrollment.selected_group.members.all())\n else:\n students = [request.user.userprofile]\n\n page = self.exercise.as_leaf_class().load(\n request,\n students,\n url_name='exercise',\n )\n if self.submission_data:\n data = pairs_to_dict(self.submission_data)\n # Format the timestamp so that it can be used in Javascript's Date constructor\n timestamp = str(int(self.timestamp.timestamp() * 1000))\n page.populate_form(field_values=data, data_values={'draft-timestamp': timestamp}, allow_submit=True)\n\n return page\n\n\ndef build_upload_dir(instance, filename):\n \"\"\"\n Returns the path to a directory where a file should be saved.\n This is called every time a new SubmittedFile model is created.\n\n @param instance: the new SubmittedFile object\n @param filename: the actual name of the submitted file\n @return: a path where the file should be stored, relative to MEDIA_ROOT directory\n \"\"\"\n submission = instance.submission\n exercise = submission.exercise\n submitter_ids = [str(profile.id) for profile in submission.submitters.all().order_by(\"id\")]\n return \"course_instance_{:d}/submissions/exercise_{:d}/users_{}/submission_{:d}/{}\".format(\n exercise.course_instance.id,\n exercise.id,\n \"-\".join(submitter_ids),\n submission.id,\n safe_file_name(filename)\n )\n\n\nclass SubmittedFile(UrlMixin, models.Model):\n \"\"\"\n Represents a file submitted by the student as a solution to an exercise.\n Submitted files are always linked to a certain submission through a\n foreign key relation. The files are stored on the disk while models are\n stored in the database.\n \"\"\"\n submission = models.ForeignKey(Submission,\n verbose_name=_('LABEL_SUBMISSION'),\n on_delete=models.CASCADE,\n related_name=\"files\",\n )\n param_name = models.CharField(\n verbose_name=_('LABEL_PARAM_NAME'),\n max_length=128,\n )\n file_object = models.FileField(\n verbose_name=_('LABEL_FILE_OBJECT'),\n upload_to=build_upload_dir,\n max_length=255,\n )\n\n class Meta:\n verbose_name = _('MODEL_NAME_SUBMITTED_FILE')\n verbose_name_plural = _('MODEL_NAME_SUBMITTED_FILE_PLURAL')\n app_label = 'exercise'\n\n @property\n def filename(self):\n \"\"\"\n Returns the actual name of the file on the disk.\n \"\"\"\n return os.path.basename(self.file_object.path)\n\n @property\n def exists(self):\n try:\n return bool(self.file_object.size)\n except OSError:\n return False\n\n def get_mime(self):\n return guess_type(self.file_object.path)[0]\n\n def is_passed(self):\n if self.file_object.path.endswith(\".pdf\"):\n # PDF files are sometimes incorrectly classified as non-binary by the 'binaryornot' library\n return True\n return is_binary(self.file_object.path)\n\n\n ABSOLUTE_URL_NAME = \"submission-file\"\n\n def get_url_kwargs(self):\n return dict( # pylint: disable=use-dict-literal\n file_id=self.id,\n file_name=self.filename,\n **self.submission.get_url_kwargs()\n )\n\n\ndef _delete_file(sender, instance, **kwargs): # pylint: disable=unused-argument\n \"\"\"\n Deletes the actual submission files after the submission in database is\n removed.\n \"\"\"\n instance.file_object.delete(save=False)\n\n\npost_delete.connect(_delete_file, SubmittedFile)\n\n\nclass PendingSubmissionManager(models.Manager):\n\n def is_grader_stable(self):\n total_retries = self.aggregate(sum=models.Sum('num_retries'))['sum']\n return not (total_retries and total_retries > settings.GRADER_STABLE_THRESHOLD)\n\n\n def get_exercise_names_if_grader_is_unstable(self, instance):\n total_retries_per_exercise = self.values(\n 'submission__exercise__name',\n ).filter(\n submission__exercise__course_module__course_instance=instance.id,\n ).annotate(\n num_retries=models.Sum('num_retries'),\n ).order_by(\n '-num_retries',\n )[:10]\n total_retries = sum(entry['num_retries'] for entry in total_retries_per_exercise)\n # Check if the grader can be considered unstable on this course instance\n if total_retries > settings.GRADER_STABLE_THRESHOLD:\n lang = get_language()\n exercises = \", \".join(\n f\"'{pick_localized(entry['submission__exercise__name'], lang)}'\"\n for entry in total_retries_per_exercise\n )\n return exercises\n return ''\n\n\nclass PendingSubmission(models.Model):\n submission = models.OneToOneField(Submission,\n verbose_name=_('LABEL_SUBMISSION'),\n on_delete=models.CASCADE,\n )\n submission_time = models.DateTimeField(\n verbose_name=_('LABEL_SUBMISSION_TIME'),\n null=True, # to make usage with get_or_create easier\n )\n num_retries = models.PositiveIntegerField(\n verbose_name=_('LABEL_NUMBER_OF_RETRIES'),\n default=0,\n )\n objects = PendingSubmissionManager()\n\n class Meta:\n verbose_name = _('MODEL_NAME_PENDING_SUBMISSION')\n verbose_name_plural = _('MODEL_NAME_PENDING_SUBMISSION_PLURAL')\n",
"path": "exercise/submission_models.py"
}
] | diff --git a/exercise/submission_models.py b/exercise/submission_models.py
index 2755d0554..ba6402a8c 100644
--- a/exercise/submission_models.py
+++ b/exercise/submission_models.py
@@ -832,6 +832,9 @@ def get_mime(self):
return guess_type(self.file_object.path)[0]
def is_passed(self):
+ if self.file_object.path.endswith(".pdf"):
+ # PDF files are sometimes incorrectly classified as non-binary by the 'binaryornot' library
+ return True
return is_binary(self.file_object.path)
|
ivy-llc__ivy-14663 | Fix generating_index_arrays.test_numpy_diag_indices
| | |
|---|---|
|paddle|<a href="https://github.com/unifyai/ivy/actions/runs/6413197943/job/17411744582"><img src=https://img.shields.io/badge/-failure-red></a>
|tensorflow|<a href="https://github.com/unifyai/ivy/actions/runs/6413197943/job/17411744582"><img src=https://img.shields.io/badge/-failure-red></a>
|torch|<a href="https://github.com/unifyai/ivy/actions/runs/6413197943/job/17411744582"><img src=https://img.shields.io/badge/-failure-red></a>
|numpy|<a href="https://github.com/unifyai/ivy/actions/runs/6413197943/job/17411744582"><img src=https://img.shields.io/badge/-failure-red></a>
|jax|<a href="https://github.com/unifyai/ivy/actions/runs/6413197943/job/17411744582"><img src=https://img.shields.io/badge/-failure-red></a>
| [
{
"content": "import ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n outputs_to_numpy_arrays,\n)\n\n\n@to_ivy_arrays_and_back\ndef indices(dimensions, dtype=int, sparse=False):\n dimensions = tuple(dimensions)\n N = len(dimensions)\n shape = (1,) * N\n if sparse:\n res = tuple()\n else:\n res = ivy.empty((N,) + dimensions, dtype=dtype)\n for i, dim in enumerate(dimensions):\n idx = ivy.arange(dim, dtype=dtype).reshape(shape[:i] + (dim,) + shape[i + 1 :])\n if sparse:\n res = res + (idx,)\n else:\n res[i] = idx\n return res\n\n\n# unravel_index\n@to_ivy_arrays_and_back\ndef unravel_index(indices, shape, order=\"C\"):\n ret = [x.astype(\"int64\") for x in ivy.unravel_index(indices, shape)]\n return tuple(ret)\n\n\n@outputs_to_numpy_arrays\ndef diag_indices(n, ndim=2):\n idx = ivy.arange(n, dtype=int)\n return (idx,) * ndim\n\n\n@to_ivy_arrays_and_back\ndef tril_indices(n, k=0, m=None):\n return ivy.tril_indices(n, m, k)\n",
"path": "ivy/functional/frontends/numpy/indexing_routines/generating_index_arrays.py"
}
] | [
{
"content": "import ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n outputs_to_numpy_arrays,\n)\n\n\n@to_ivy_arrays_and_back\ndef indices(dimensions, dtype=int, sparse=False):\n dimensions = tuple(dimensions)\n N = len(dimensions)\n shape = (1,) * N\n if sparse:\n res = tuple()\n else:\n res = ivy.empty((N,) + dimensions, dtype=dtype)\n for i, dim in enumerate(dimensions):\n idx = ivy.arange(dim, dtype=dtype).reshape(shape[:i] + (dim,) + shape[i + 1 :])\n if sparse:\n res = res + (idx,)\n else:\n res[i] = idx\n return res\n\n\n# unravel_index\n@to_ivy_arrays_and_back\ndef unravel_index(indices, shape, order=\"C\"):\n ret = [x.astype(\"int64\") for x in ivy.unravel_index(indices, shape)]\n return tuple(ret)\n\n\n@to_ivy_arrays_and_back\ndef diag_indices(n, ndim=2):\n idx = ivy.arange(n)\n res = ivy.array((idx,) * ndim)\n res = tuple(res.astype(\"int64\"))\n return res\n\n\n@to_ivy_arrays_and_back\ndef tril_indices(n, k=0, m=None):\n return ivy.tril_indices(n, m, k)\n",
"path": "ivy/functional/frontends/numpy/indexing_routines/generating_index_arrays.py"
}
] | diff --git a/ivy/functional/frontends/numpy/indexing_routines/generating_index_arrays.py b/ivy/functional/frontends/numpy/indexing_routines/generating_index_arrays.py
index c0052ea95f611..5faf97413fe2f 100644
--- a/ivy/functional/frontends/numpy/indexing_routines/generating_index_arrays.py
+++ b/ivy/functional/frontends/numpy/indexing_routines/generating_index_arrays.py
@@ -30,10 +30,12 @@ def unravel_index(indices, shape, order="C"):
return tuple(ret)
-@outputs_to_numpy_arrays
+@to_ivy_arrays_and_back
def diag_indices(n, ndim=2):
- idx = ivy.arange(n, dtype=int)
- return (idx,) * ndim
+ idx = ivy.arange(n)
+ res = ivy.array((idx,) * ndim)
+ res = tuple(res.astype("int64"))
+ return res
@to_ivy_arrays_and_back
|
conda__conda-build-1088 | problems with GIT_DESCRIBE_NUMBER
Hello, my recipe is like:
```
package:
name: pkg
version: {{ GIT_DESCRIBE_TAG }}
source:
git_url: .
git_rev: conda_pkg
build:
number: {{ GIT_DESCRIBE_NUMBER }}
...
```
and is located in a git repo.
```
user@machine:pkg_git $ git status
On branch conda_pkg
Your branch is up-to-date with 'origin/conda_pkg'.
nothing to commit, working directory clean
```
If I perform a `conda build .`, this error is produced:
```
Traceback (most recent call last):
File "/home/user/anaconda3/bin/conda-build", line 5, in <module>
sys.exit(main())
File "/home/user/anaconda3/lib/python3.5/site-packages/conda_build/main_build.py", line 144, in main
args_func(args, p)
File "/home/user/anaconda3/lib/python3.5/site-packages/conda_build/main_build.py", line 389, in args_func
args.func(args, p)
File "/home/user/anaconda3/lib/python3.5/site-packages/conda_build/main_build.py", line 287, in execute
verbose=False, dirty=args.dirty)
File "/home/user/anaconda3/lib/python3.5/site-packages/conda_build/render.py", line 135, in render_recipe
verbose=verbose, dirty=dirty)
File "/home/user/anaconda3/lib/python3.5/site-packages/conda_build/render.py", line 87, in parse_or_try_download
metadata.parse_again(permit_undefined_jinja=False)
File "/home/user/anaconda3/lib/python3.5/site-packages/conda_build/metadata.py", line 377, in parse_again
self.meta = parse(self._get_contents(permit_undefined_jinja), path=self.meta_path)
File "/home/user/anaconda3/lib/python3.5/site-packages/conda_build/metadata.py", line 666, in _get_contents
env.globals.update(context_processor(self, path))
File "/home/user/anaconda3/lib/python3.5/site-packages/conda_build/jinja_context.py", line 130, in context_processor
ctx = get_environ(m=initial_metadata)
File "/home/user/anaconda3/lib/python3.5/site-packages/conda_build/environ.py", line 174, in get_dict
d.update(meta_vars(m))
File "/home/user/anaconda3/lib/python3.5/site-packages/conda_build/environ.py", line 276, in meta_vars
d['PKG_BUILDNUM'] = str(meta.build_number())
File "/home/user/anaconda3/lib/python3.5/site-packages/conda_build/metadata.py", line 454, in build_number
return int(self.get_value('build/number', 0))
ValueError: invalid literal for int() with base 10: ''
```
If I substitute:
```
build:
number: {{ GIT_DESCRIBE_NUMBER }}
```
with
```
build:
number: 0
```
it works and the package created is something like pkg-v0.2.2-76_g869cb67.tar.bz2
Stepping into `/home/user/anaconda3/lib/python3.5/site-packages/conda_build/metadata.py:454` it seems that GIT_DESCRIBE_NUMBER is `None`...
Do you have any idea on why `GIT_DESCRIBE_NUMBER` is not working?
Thanks
---
```
$ conda build --version
conda-build 1.21.3
$ conda --version
conda 4.1.6
```
| [
{
"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport re\nimport sys\nfrom os.path import isdir, isfile, join\n\nfrom conda.compat import iteritems, PY3, text_type\nfrom conda.utils import memoized, md5_file\nimport conda.config as cc\nfrom conda.resolve import MatchSpec\nfrom conda.cli.common import specs_from_url\n\nfrom conda_build import exceptions\nfrom conda_build.features import feature_list\n\ntry:\n import yaml\n\n # try to import C loader\n try:\n from yaml import CBaseLoader as BaseLoader\n except ImportError:\n from yaml import BaseLoader\nexcept ImportError:\n sys.exit('Error: could not import yaml (required to read meta.yaml '\n 'files of conda recipes)')\n\nfrom conda_build.config import config\nfrom conda_build.utils import comma_join\n\non_win = (sys.platform == 'win32')\n\n\ndef ns_cfg():\n # Remember to update the docs of any of this changes\n plat = cc.subdir\n py = config.CONDA_PY\n np = config.CONDA_NPY\n pl = config.CONDA_PERL\n lua = config.CONDA_LUA\n assert isinstance(py, int), py\n d = dict(\n linux=plat.startswith('linux-'),\n linux32=bool(plat == 'linux-32'),\n linux64=bool(plat == 'linux-64'),\n arm=plat.startswith('linux-arm'),\n osx=plat.startswith('osx-'),\n unix=plat.startswith(('linux-', 'osx-')),\n win=plat.startswith('win-'),\n win32=bool(plat == 'win-32'),\n win64=bool(plat == 'win-64'),\n x86=plat.endswith(('-32', '-64')),\n x86_64=plat.endswith('-64'),\n pl=pl,\n py=py,\n lua=lua,\n luajit=bool(lua[0] == \"2\"),\n py3k=bool(30 <= py < 40),\n py2k=bool(20 <= py < 30),\n py26=bool(py == 26),\n py27=bool(py == 27),\n py33=bool(py == 33),\n py34=bool(py == 34),\n py35=bool(py == 35),\n np=np,\n os=os,\n environ=os.environ,\n )\n for machine in cc.non_x86_linux_machines:\n d[machine] = bool(plat == 'linux-%s' % machine)\n\n for feature, value in feature_list:\n d[feature] = value\n d.update(os.environ)\n return d\n\n\n# Selectors must be either:\n# - at end of the line\n# - embedded (anywhere) within a comment\n#\n# Notes:\n# - [([^\\[\\]]+)\\] means \"find a pair of brackets containing any\n# NON-bracket chars, and capture the contents\"\n# - (?(2).*)$ means \"allow trailing characters iff group 2 (#.*) was found.\"\nsel_pat = re.compile(r'(.+?)\\s*(#.*)?\\[([^\\[\\]]+)\\](?(2).*)$')\n\n\ndef select_lines(data, namespace):\n lines = []\n for i, line in enumerate(data.splitlines()):\n line = line.rstrip()\n if line.lstrip().startswith('#'):\n # Don't bother with comment only lines\n continue\n m = sel_pat.match(line)\n if m:\n cond = m.group(3)\n try:\n if eval(cond, namespace, {}):\n lines.append(m.group(1))\n except:\n sys.exit('''\\\nError: Invalid selector in meta.yaml line %d:\n%s\n''' % (i + 1, line))\n sys.exit(1)\n continue\n lines.append(line)\n return '\\n'.join(lines) + '\\n'\n\n\n@memoized\ndef yamlize(data):\n try:\n return yaml.load(data, Loader=BaseLoader)\n except yaml.error.YAMLError as e:\n if '{{' in data:\n try:\n import jinja2\n jinja2 # Avoid pyflakes failure: 'jinja2' imported but unused\n except ImportError:\n raise exceptions.UnableToParseMissingJinja2(original=e)\n raise exceptions.UnableToParse(original=e)\n\n\nallowed_license_families = set(\"\"\"\nAGPL\nApache\nBSD\nGPL2\nGPL3\nLGPL\nMIT\nOther\nPSF\nProprietary\nPublic-Domain\n\"\"\".split())\n\n\ndef ensure_valid_license_family(meta):\n try:\n license_family = meta['about']['license_family']\n except KeyError:\n return\n if license_family not in allowed_license_families:\n raise RuntimeError(exceptions.indent(\n \"about/license_family '%s' not allowed. Allowed families are %s.\" %\n (license_family, comma_join(sorted(allowed_license_families)))))\n\n\ndef ensure_valid_fields(meta):\n try:\n pin_depends = meta['build']['pin_depends']\n except KeyError:\n pin_depends = ''\n if pin_depends not in ('', 'record', 'strict'):\n raise RuntimeError(\"build/pin_depends cannot be '%s'\" % pin_depends)\n\n\ndef parse(data, path=None):\n data = select_lines(data, ns_cfg())\n res = yamlize(data)\n # ensure the result is a dict\n if res is None:\n res = {}\n for field in FIELDS:\n if field not in res:\n continue\n # ensure that empty fields are dicts (otherwise selectors can cause invalid fields)\n if not res[field]:\n res[field] = {}\n if not isinstance(res[field], dict):\n raise RuntimeError(\"The %s field should be a dict, not %s in file %s.\" %\n (field, res[field].__class__.__name__, path))\n\n ensure_valid_fields(res)\n ensure_valid_license_family(res)\n return sanitize(res)\n\n\ntrues = {'y', 'on', 'true', 'yes'}\nfalses = {'n', 'no', 'false', 'off'}\n\ndefault_structs = {\n 'source/patches': list,\n 'build/entry_points': list,\n 'build/script_env': list,\n 'build/features': list,\n 'build/track_features': list,\n 'requirements/build': list,\n 'requirements/run': list,\n 'requirements/conflicts': list,\n 'test/requires': list,\n 'test/files': list,\n 'test/commands': list,\n 'test/imports': list,\n 'package/version': text_type,\n 'build/string': text_type,\n 'build/pin_depends': text_type,\n 'source/svn_rev': text_type,\n 'source/git_tag': text_type,\n 'source/git_branch': text_type,\n 'source/md5': text_type,\n 'source/git_rev': text_type,\n 'source/path': text_type,\n 'source/git_url': text_type,\n 'build/osx_is_app': bool,\n 'build/preserve_egg_dir': bool,\n 'build/binary_relocation': bool,\n 'build/noarch_python': bool,\n 'build/detect_binary_files_with_prefix': bool,\n 'build/skip': bool,\n 'app/own_environment': bool\n}\n\n\ndef sanitize(meta):\n \"\"\"\n Sanitize the meta-data to remove aliases/handle deprecation\n\n \"\"\"\n # make a copy to avoid side-effects\n meta = meta.copy()\n sanitize_funs = [('source', _git_clean), ]\n for section, func in sanitize_funs:\n if section in meta:\n meta[section] = func(meta[section])\n return meta\n\n\ndef _git_clean(source_meta):\n \"\"\"\n Reduce the redundancy in git specification by removing git_tag and\n git_branch.\n\n If one is specified, copy to git_rev.\n\n If more than one field is used to specified, exit\n and complain.\n \"\"\"\n\n git_rev_tags_old = ('git_branch', 'git_tag')\n git_rev = 'git_rev'\n\n git_rev_tags = (git_rev,) + git_rev_tags_old\n\n has_rev_tags = tuple(bool(source_meta.get(tag, text_type())) for\n tag in git_rev_tags)\n if sum(has_rev_tags) > 1:\n msg = \"Error: multiple git_revs:\"\n msg += ', '.join(\"{}\".format(key) for key, has in\n zip(git_rev_tags, has_rev_tags) if has)\n sys.exit(msg)\n\n # make a copy of the input so we have no side-effects\n ret_meta = source_meta.copy()\n # loop over the old versions\n for key, has in zip(git_rev_tags[1:], has_rev_tags[1:]):\n # update if needed\n if has:\n ret_meta[git_rev_tags[0]] = ret_meta[key]\n # and remove\n ret_meta.pop(key, None)\n\n return ret_meta\n\n# If you update this please update the example in\n# conda-docs/docs/source/build.rst\nFIELDS = {\n 'package': ['name', 'version'],\n 'source': ['fn', 'url', 'md5', 'sha1', 'sha256', 'path',\n 'git_url', 'git_tag', 'git_branch', 'git_rev', 'git_depth',\n 'hg_url', 'hg_tag',\n 'svn_url', 'svn_rev', 'svn_ignore_externals',\n 'patches'\n ],\n 'build': ['number', 'string', 'entry_points', 'osx_is_app',\n 'features', 'track_features', 'preserve_egg_dir',\n 'no_link', 'binary_relocation', 'script', 'noarch_python',\n 'has_prefix_files', 'binary_has_prefix_files', 'ignore_prefix_files',\n 'detect_binary_files_with_prefix', 'rpaths', 'script_env',\n 'always_include_files', 'skip', 'msvc_compiler',\n 'pin_depends' # pin_depends is experimental still\n ],\n 'requirements': ['build', 'run', 'conflicts'],\n 'app': ['entry', 'icon', 'summary', 'type', 'cli_opts',\n 'own_environment'],\n 'test': ['requires', 'commands', 'files', 'imports'],\n 'about': ['home', 'dev_url', 'doc_url', 'license_url', # these are URLs\n 'license', 'summary', 'description', 'license_family', # text\n 'license_file', 'readme', # paths in source tree\n ],\n}\n\n\ndef check_bad_chrs(s, field):\n bad_chrs = '=!@#$%^&*:;\"\\'\\\\|<>?/ '\n if field in ('package/version', 'build/string'):\n bad_chrs += '-'\n for c in bad_chrs:\n if c in s:\n sys.exit(\"Error: bad character '%s' in %s: %s\" % (c, field, s))\n\n\ndef handle_config_version(ms, ver, dep_type='run'):\n \"\"\"\n 'ms' is an instance of MatchSpec, and 'ver' is the version from the\n configuration, e.g. for ms.name == 'python', ver = 26 or None,\n return a (sometimes new) MatchSpec object\n \"\"\"\n if ms.strictness == 3:\n return ms\n\n if ms.strictness == 2:\n if ms.spec.split()[1] == 'x.x':\n if ver is None:\n raise RuntimeError(\"'%s' requires external setting\" % ms.spec)\n # (no return here - proceeds below)\n else: # regular version\n return ms\n\n # If we don't have a configured version, or we are dealing with a simple\n # numpy runtime dependency; just use \"numpy\"/the name of the package as\n # the specification. In practice this means that a recipe which just\n # defines numpy as a runtime dependency will match any version of numpy\n # at install time.\n if ver is None or (dep_type == 'run' and ms.strictness == 1 and\n ms.name == 'numpy'):\n return MatchSpec(ms.name)\n\n ver = text_type(ver)\n if '.' not in ver:\n if ms.name == 'numpy':\n ver = '%s.%s' % (ver[0], ver[1:])\n else:\n ver = '.'.join(ver)\n return MatchSpec('%s %s*' % (ms.name, ver))\n\n\nclass MetaData(object):\n\n def __init__(self, path):\n assert isdir(path)\n self.path = path\n self.meta_path = join(path, 'meta.yaml')\n self.requirements_path = join(path, 'requirements.txt')\n if not isfile(self.meta_path):\n self.meta_path = join(path, 'conda.yaml')\n if not isfile(self.meta_path):\n sys.exit(\"Error: meta.yaml or conda.yaml not found in %s\" % path)\n\n # Start with bare-minimum contents so we can call environ.get_dict() with impunity\n # We'll immediately replace these contents in parse_again()\n self.meta = parse(\"package:\\n\"\n \" name: uninitialized\", path=self.meta_path)\n\n # This is the 'first pass' parse of meta.yaml, so not all variables are defined yet\n # (e.g. GIT_FULL_HASH, etc. are undefined)\n # Therefore, undefined jinja variables are permitted here\n # In the second pass, we'll be more strict. See build.build()\n self.undefined_jinja_vars = []\n self.parse_again(permit_undefined_jinja=True)\n\n def parse_again(self, permit_undefined_jinja=False):\n \"\"\"Redo parsing for key-value pairs that are not initialized in the\n first pass.\n\n permit_undefined_jinja: If True, *any* use of undefined jinja variables will\n evaluate to an emtpy string, without emitting an error.\n \"\"\"\n if not self.meta_path:\n return\n\n self.meta = parse(self._get_contents(permit_undefined_jinja), path=self.meta_path)\n\n if (isfile(self.requirements_path) and\n not self.meta['requirements']['run']):\n self.meta.setdefault('requirements', {})\n run_requirements = specs_from_url(self.requirements_path)\n self.meta['requirements']['run'] = run_requirements\n\n @classmethod\n def fromdict(cls, metadata):\n \"\"\"\n Create a MetaData object from metadata dict directly.\n \"\"\"\n m = super(MetaData, cls).__new__(cls)\n m.path = ''\n m.meta_path = ''\n m.meta = sanitize(metadata)\n return m\n\n def get_section(self, section):\n return self.meta.get(section, {})\n\n def get_value(self, field, default=None, autotype=True):\n \"\"\"\n Get a value from a meta.yaml.\n :param field: Field to return\n :param default: Default object to return if field doesn't exist\n :param autotype: If True, return the default type of field if one exists.\n False will return the default object.\n :return:\n \"\"\"\n section, key = field.split('/')\n\n # get correct default\n if autotype and default is None and field in default_structs:\n default = default_structs[field]()\n\n value = self.get_section(section).get(key, default)\n\n # handle yaml 1.1 boolean values\n if isinstance(value, text_type):\n if value.lower() in trues:\n value = True\n elif value.lower() in falses:\n value = False\n\n return value\n\n def check_fields(self):\n for section, submeta in iteritems(self.meta):\n if section == 'extra':\n continue\n if section not in FIELDS:\n sys.exit(\"Error: unknown section: %s\" % section)\n for key in submeta:\n if key not in FIELDS[section]:\n sys.exit(\"Error: in section %r: unknown key %r\" %\n (section, key))\n\n def name(self):\n res = self.get_value('package/name')\n if not res:\n sys.exit('Error: package/name missing in: %r' % self.meta_path)\n res = text_type(res)\n if res != res.lower():\n sys.exit('Error: package/name must be lowercase, got: %r' % res)\n check_bad_chrs(res, 'package/name')\n return res\n\n def version(self):\n res = self.get_value('package/version')\n if res is None:\n sys.exit(\"Error: package/version missing in: %r\" % self.meta_path)\n check_bad_chrs(res, 'package/version')\n return res\n\n def build_number(self):\n return int(self.get_value('build/number', 0))\n\n def ms_depends(self, typ='run'):\n res = []\n name_ver_list = [\n ('python', config.CONDA_PY),\n ('numpy', config.CONDA_NPY),\n ('perl', config.CONDA_PERL),\n ('lua', config.CONDA_LUA),\n # r is kept for legacy installations, r-base deprecates it.\n ('r', config.CONDA_R),\n ('r-base', config.CONDA_R),\n ]\n for spec in self.get_value('requirements/' + typ, []):\n try:\n ms = MatchSpec(spec)\n except AssertionError:\n raise RuntimeError(\"Invalid package specification: %r\" % spec)\n if ms.name == self.name():\n raise RuntimeError(\"%s cannot depend on itself\" % self.name())\n for name, ver in name_ver_list:\n if ms.name == name:\n if self.get_value('build/noarch_python'):\n continue\n ms = handle_config_version(ms, ver, typ)\n\n for c in '=!@#$%^&*:;\"\\'\\\\|<>?/':\n if c in ms.name:\n sys.exit(\"Error: bad character '%s' in package name \"\n \"dependency '%s'\" % (c, ms.name))\n parts = spec.split()\n if len(parts) >= 2:\n if parts[1] in {'>', '>=', '=', '==', '!=', '<', '<='}:\n msg = (\"Error: bad character '%s' in package version \"\n \"dependency '%s'\" % (parts[1], ms.name))\n if len(parts) >= 3:\n msg += \"\\nPerhaps you meant '%s %s%s'\" % (ms.name,\n parts[1], parts[2])\n sys.exit(msg)\n res.append(ms)\n return res\n\n def build_id(self):\n ret = self.get_value('build/string')\n if ret:\n check_bad_chrs(ret, 'build/string')\n return ret\n res = []\n version_pat = re.compile(r'(?:==)?(\\d+)\\.(\\d+)')\n for name, s in (('numpy', 'np'), ('python', 'py'),\n ('perl', 'pl'), ('lua', 'lua'),\n ('r', 'r'), ('r-base', 'r')):\n for ms in self.ms_depends():\n if ms.name == name:\n try:\n v = ms.spec.split()[1]\n except IndexError:\n if name not in ['numpy']:\n res.append(s)\n break\n if any(i in v for i in ',|>!<'):\n break\n if name not in ['perl', 'lua', 'r', 'r-base']:\n match = version_pat.match(v)\n if match:\n res.append(s + match.group(1) + match.group(2))\n else:\n res.append(s + v.strip('*'))\n break\n\n features = self.get_value('build/features', [])\n if res:\n res.append('_')\n if features:\n res.extend(('_'.join(features), '_'))\n res.append('%d' % self.build_number())\n return ''.join(res)\n\n def dist(self):\n return '%s-%s-%s' % (self.name(), self.version(), self.build_id())\n\n def pkg_fn(self):\n return \"%s.tar.bz2\" % self.dist()\n\n def is_app(self):\n return bool(self.get_value('app/entry'))\n\n def app_meta(self):\n d = {'type': 'app'}\n if self.get_value('app/icon'):\n d['icon'] = '%s.png' % md5_file(join(\n self.path, self.get_value('app/icon')))\n\n for field, key in [('app/entry', 'app_entry'),\n ('app/type', 'app_type'),\n ('app/cli_opts', 'app_cli_opts'),\n ('app/summary', 'summary'),\n ('app/own_environment', 'app_own_environment')]:\n value = self.get_value(field)\n if value:\n d[key] = value\n return d\n\n def info_index(self):\n d = dict(\n name=self.name(),\n version=self.version(),\n build=self.build_id(),\n build_number=self.build_number(),\n platform=cc.platform,\n arch=cc.arch_name,\n subdir=cc.subdir,\n depends=sorted(' '.join(ms.spec.split())\n for ms in self.ms_depends()),\n )\n for key in ('license', 'license_family'):\n value = self.get_value('about/' + key)\n if value:\n d[key] = value\n\n if self.get_value('build/features'):\n d['features'] = ' '.join(self.get_value('build/features'))\n if self.get_value('build/track_features'):\n d['track_features'] = ' '.join(self.get_value('build/track_features'))\n if self.get_value('build/noarch_python'):\n d['platform'] = d['arch'] = None\n d['subdir'] = 'noarch'\n if self.is_app():\n d.update(self.app_meta())\n return d\n\n def has_prefix_files(self):\n ret = self.get_value('build/has_prefix_files', [])\n if not isinstance(ret, list):\n raise RuntimeError('build/has_prefix_files should be a list of paths')\n if sys.platform == 'win32':\n if any('\\\\' in i for i in ret):\n raise RuntimeError(\"build/has_prefix_files paths must use / \"\n \"as the path delimiter on Windows\")\n return ret\n\n def ignore_prefix_files(self):\n ret = self.get_value('build/ignore_prefix_files', False)\n if type(ret) not in (list, bool):\n raise RuntimeError('build/ignore_prefix_files should be boolean or a list of paths')\n if sys.platform == 'win32':\n if type(ret) is list and any('\\\\' in i for i in ret):\n raise RuntimeError(\"build/ignore_prefix_files paths must use / \"\n \"as the path delimiter on Windows\")\n return ret\n\n def always_include_files(self):\n return self.get_value('build/always_include_files', [])\n\n def binary_has_prefix_files(self):\n ret = self.get_value('build/binary_has_prefix_files', [])\n if not isinstance(ret, list):\n raise RuntimeError('build/binary_has_prefix_files should be a list of paths')\n if sys.platform == 'win32':\n if any('\\\\' in i for i in ret):\n raise RuntimeError(\"build/binary_has_prefix_files paths must use / \"\n \"as the path delimiter on Windows\")\n return ret\n\n def skip(self):\n return self.get_value('build/skip', False)\n\n def _get_contents(self, permit_undefined_jinja):\n '''\n Get the contents of our [meta.yaml|conda.yaml] file.\n If jinja is installed, then the template.render function is called\n before standard conda macro processors.\n\n permit_undefined_jinja: If True, *any* use of undefined jinja variables will\n evaluate to an emtpy string, without emitting an error.\n '''\n try:\n import jinja2\n except ImportError:\n print(\"There was an error importing jinja2.\", file=sys.stderr)\n print(\"Please run `conda install jinja2` to enable jinja template support\", file=sys.stderr) # noqa\n with open(self.meta_path) as fd:\n return fd.read()\n\n from conda_build.jinja_context import context_processor, UndefinedNeverFail, FilteredLoader\n\n path, filename = os.path.split(self.meta_path)\n loaders = [ # search relative to '<conda_root>/Lib/site-packages/conda_build/templates'\n jinja2.PackageLoader('conda_build'),\n # search relative to RECIPE_DIR\n jinja2.FileSystemLoader(path)\n ]\n\n # search relative to current conda environment directory\n conda_env_path = os.environ.get('CONDA_DEFAULT_ENV') # path to current conda environment\n if conda_env_path and os.path.isdir(conda_env_path):\n conda_env_path = os.path.abspath(conda_env_path)\n conda_env_path = conda_env_path.replace('\\\\', '/') # need unix-style path\n env_loader = jinja2.FileSystemLoader(conda_env_path)\n loaders.append(jinja2.PrefixLoader({'$CONDA_DEFAULT_ENV': env_loader}))\n\n undefined_type = jinja2.StrictUndefined\n if permit_undefined_jinja:\n # The UndefinedNeverFail class keeps a global list of all undefined names\n # Clear any leftover names from the last parse.\n UndefinedNeverFail.all_undefined_names = []\n undefined_type = UndefinedNeverFail\n\n loader = FilteredLoader(jinja2.ChoiceLoader(loaders))\n env = jinja2.Environment(loader=loader, undefined=undefined_type)\n\n env.globals.update(ns_cfg())\n env.globals.update(context_processor(self, path))\n\n try:\n template = env.get_or_select_template(filename)\n rendered = template.render(environment=env)\n\n if permit_undefined_jinja:\n self.undefined_jinja_vars = UndefinedNeverFail.all_undefined_names\n else:\n self.undefined_jinja_vars = []\n\n return rendered\n except jinja2.TemplateError as ex:\n sys.exit(\"Error: Failed to render jinja template in {}:\\n{}\"\n .format(self.meta_path, ex.message))\n\n def __unicode__(self):\n '''\n String representation of the MetaData.\n '''\n return text_type(self.__dict__)\n\n def __str__(self):\n if PY3:\n return self.__unicode__()\n else:\n return self.__unicode__().encode('utf-8')\n\n def __repr__(self):\n '''\n String representation of the MetaData.\n '''\n return self.__str__()\n\n def uses_vcs_in_meta(self):\n \"\"\"returns true if recipe contains metadata associated with version control systems.\n If this metadata is present, a download/copy will be forced in parse_or_try_download.\n \"\"\"\n vcs_types = [\"git\", \"svn\", \"hg\"]\n if \"source\" in self.meta:\n for vcs in vcs_types:\n if vcs + \"_url\" in self.meta[\"source\"]:\n # translate command name to package name.\n # If more than hg, need a dict for this.\n if vcs == \"hg\":\n vcs = \"mercurial\"\n return vcs\n\n # We would get here if we use Jinja2 templating, but specify source with path.\n with open(self.meta_path) as f:\n metayaml = f.read()\n for vcs in vcs_types:\n matches = re.findall(r\"{}_[^\\.\\s\\'\\\"]+\".format(vcs.upper()), metayaml)\n if len(matches) > 0:\n if vcs == \"hg\":\n vcs = \"mercurial\"\n return vcs\n return None\n\n def uses_vcs_in_build(self):\n build_script = \"bld.bat\" if on_win else \"build.sh\"\n build_script = os.path.join(os.path.dirname(self.meta_path), build_script)\n if os.path.isfile(build_script):\n vcs_types = [\"git\", \"svn\", \"hg\"]\n with open(self.meta_path) as f:\n build_script = f.read()\n for vcs in vcs_types:\n matches = re.findall(r\"{}(?:\\.exe)?\".format(vcs), build_script)\n if len(matches) > 0:\n if vcs == \"hg\":\n vcs = \"mercurial\"\n return vcs\n return None\n",
"path": "conda_build/metadata.py"
}
] | [
{
"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport re\nimport sys\nfrom os.path import isdir, isfile, join\n\nfrom conda.compat import iteritems, PY3, text_type\nfrom conda.utils import memoized, md5_file\nimport conda.config as cc\nfrom conda.resolve import MatchSpec\nfrom conda.cli.common import specs_from_url\n\nfrom conda_build import exceptions\nfrom conda_build.features import feature_list\n\ntry:\n import yaml\n\n # try to import C loader\n try:\n from yaml import CBaseLoader as BaseLoader\n except ImportError:\n from yaml import BaseLoader\nexcept ImportError:\n sys.exit('Error: could not import yaml (required to read meta.yaml '\n 'files of conda recipes)')\n\nfrom conda_build.config import config\nfrom conda_build.utils import comma_join\n\non_win = (sys.platform == 'win32')\n\n\ndef ns_cfg():\n # Remember to update the docs of any of this changes\n plat = cc.subdir\n py = config.CONDA_PY\n np = config.CONDA_NPY\n pl = config.CONDA_PERL\n lua = config.CONDA_LUA\n assert isinstance(py, int), py\n d = dict(\n linux=plat.startswith('linux-'),\n linux32=bool(plat == 'linux-32'),\n linux64=bool(plat == 'linux-64'),\n arm=plat.startswith('linux-arm'),\n osx=plat.startswith('osx-'),\n unix=plat.startswith(('linux-', 'osx-')),\n win=plat.startswith('win-'),\n win32=bool(plat == 'win-32'),\n win64=bool(plat == 'win-64'),\n x86=plat.endswith(('-32', '-64')),\n x86_64=plat.endswith('-64'),\n pl=pl,\n py=py,\n lua=lua,\n luajit=bool(lua[0] == \"2\"),\n py3k=bool(30 <= py < 40),\n py2k=bool(20 <= py < 30),\n py26=bool(py == 26),\n py27=bool(py == 27),\n py33=bool(py == 33),\n py34=bool(py == 34),\n py35=bool(py == 35),\n np=np,\n os=os,\n environ=os.environ,\n )\n for machine in cc.non_x86_linux_machines:\n d[machine] = bool(plat == 'linux-%s' % machine)\n\n for feature, value in feature_list:\n d[feature] = value\n d.update(os.environ)\n return d\n\n\n# Selectors must be either:\n# - at end of the line\n# - embedded (anywhere) within a comment\n#\n# Notes:\n# - [([^\\[\\]]+)\\] means \"find a pair of brackets containing any\n# NON-bracket chars, and capture the contents\"\n# - (?(2).*)$ means \"allow trailing characters iff group 2 (#.*) was found.\"\nsel_pat = re.compile(r'(.+?)\\s*(#.*)?\\[([^\\[\\]]+)\\](?(2).*)$')\n\n\ndef select_lines(data, namespace):\n lines = []\n for i, line in enumerate(data.splitlines()):\n line = line.rstrip()\n if line.lstrip().startswith('#'):\n # Don't bother with comment only lines\n continue\n m = sel_pat.match(line)\n if m:\n cond = m.group(3)\n try:\n if eval(cond, namespace, {}):\n lines.append(m.group(1))\n except:\n sys.exit('''\\\nError: Invalid selector in meta.yaml line %d:\n%s\n''' % (i + 1, line))\n sys.exit(1)\n continue\n lines.append(line)\n return '\\n'.join(lines) + '\\n'\n\n\n@memoized\ndef yamlize(data):\n try:\n return yaml.load(data, Loader=BaseLoader)\n except yaml.error.YAMLError as e:\n if '{{' in data:\n try:\n import jinja2\n jinja2 # Avoid pyflakes failure: 'jinja2' imported but unused\n except ImportError:\n raise exceptions.UnableToParseMissingJinja2(original=e)\n raise exceptions.UnableToParse(original=e)\n\n\nallowed_license_families = set(\"\"\"\nAGPL\nApache\nBSD\nGPL2\nGPL3\nLGPL\nMIT\nOther\nPSF\nProprietary\nPublic-Domain\n\"\"\".split())\n\n\ndef ensure_valid_license_family(meta):\n try:\n license_family = meta['about']['license_family']\n except KeyError:\n return\n if license_family not in allowed_license_families:\n raise RuntimeError(exceptions.indent(\n \"about/license_family '%s' not allowed. Allowed families are %s.\" %\n (license_family, comma_join(sorted(allowed_license_families)))))\n\n\ndef ensure_valid_fields(meta):\n try:\n pin_depends = meta['build']['pin_depends']\n except KeyError:\n pin_depends = ''\n if pin_depends not in ('', 'record', 'strict'):\n raise RuntimeError(\"build/pin_depends cannot be '%s'\" % pin_depends)\n\n\ndef parse(data, path=None):\n data = select_lines(data, ns_cfg())\n res = yamlize(data)\n # ensure the result is a dict\n if res is None:\n res = {}\n for field in FIELDS:\n if field not in res:\n continue\n # ensure that empty fields are dicts (otherwise selectors can cause invalid fields)\n if not res[field]:\n res[field] = {}\n if not isinstance(res[field], dict):\n raise RuntimeError(\"The %s field should be a dict, not %s in file %s.\" %\n (field, res[field].__class__.__name__, path))\n\n ensure_valid_fields(res)\n ensure_valid_license_family(res)\n return sanitize(res)\n\n\ntrues = {'y', 'on', 'true', 'yes'}\nfalses = {'n', 'no', 'false', 'off'}\n\ndefault_structs = {\n 'source/patches': list,\n 'build/entry_points': list,\n 'build/script_env': list,\n 'build/features': list,\n 'build/track_features': list,\n 'requirements/build': list,\n 'requirements/run': list,\n 'requirements/conflicts': list,\n 'test/requires': list,\n 'test/files': list,\n 'test/commands': list,\n 'test/imports': list,\n 'package/version': text_type,\n 'build/string': text_type,\n 'build/pin_depends': text_type,\n 'source/svn_rev': text_type,\n 'source/git_tag': text_type,\n 'source/git_branch': text_type,\n 'source/md5': text_type,\n 'source/git_rev': text_type,\n 'source/path': text_type,\n 'source/git_url': text_type,\n 'build/osx_is_app': bool,\n 'build/preserve_egg_dir': bool,\n 'build/binary_relocation': bool,\n 'build/noarch_python': bool,\n 'build/detect_binary_files_with_prefix': bool,\n 'build/skip': bool,\n 'app/own_environment': bool\n}\n\n\ndef sanitize(meta):\n \"\"\"\n Sanitize the meta-data to remove aliases/handle deprecation\n\n \"\"\"\n # make a copy to avoid side-effects\n meta = meta.copy()\n sanitize_funs = [('source', _git_clean), ]\n for section, func in sanitize_funs:\n if section in meta:\n meta[section] = func(meta[section])\n return meta\n\n\ndef _git_clean(source_meta):\n \"\"\"\n Reduce the redundancy in git specification by removing git_tag and\n git_branch.\n\n If one is specified, copy to git_rev.\n\n If more than one field is used to specified, exit\n and complain.\n \"\"\"\n\n git_rev_tags_old = ('git_branch', 'git_tag')\n git_rev = 'git_rev'\n\n git_rev_tags = (git_rev,) + git_rev_tags_old\n\n has_rev_tags = tuple(bool(source_meta.get(tag, text_type())) for\n tag in git_rev_tags)\n if sum(has_rev_tags) > 1:\n msg = \"Error: multiple git_revs:\"\n msg += ', '.join(\"{}\".format(key) for key, has in\n zip(git_rev_tags, has_rev_tags) if has)\n sys.exit(msg)\n\n # make a copy of the input so we have no side-effects\n ret_meta = source_meta.copy()\n # loop over the old versions\n for key, has in zip(git_rev_tags[1:], has_rev_tags[1:]):\n # update if needed\n if has:\n ret_meta[git_rev_tags[0]] = ret_meta[key]\n # and remove\n ret_meta.pop(key, None)\n\n return ret_meta\n\n# If you update this please update the example in\n# conda-docs/docs/source/build.rst\nFIELDS = {\n 'package': ['name', 'version'],\n 'source': ['fn', 'url', 'md5', 'sha1', 'sha256', 'path',\n 'git_url', 'git_tag', 'git_branch', 'git_rev', 'git_depth',\n 'hg_url', 'hg_tag',\n 'svn_url', 'svn_rev', 'svn_ignore_externals',\n 'patches'\n ],\n 'build': ['number', 'string', 'entry_points', 'osx_is_app',\n 'features', 'track_features', 'preserve_egg_dir',\n 'no_link', 'binary_relocation', 'script', 'noarch_python',\n 'has_prefix_files', 'binary_has_prefix_files', 'ignore_prefix_files',\n 'detect_binary_files_with_prefix', 'rpaths', 'script_env',\n 'always_include_files', 'skip', 'msvc_compiler',\n 'pin_depends' # pin_depends is experimental still\n ],\n 'requirements': ['build', 'run', 'conflicts'],\n 'app': ['entry', 'icon', 'summary', 'type', 'cli_opts',\n 'own_environment'],\n 'test': ['requires', 'commands', 'files', 'imports'],\n 'about': ['home', 'dev_url', 'doc_url', 'license_url', # these are URLs\n 'license', 'summary', 'description', 'license_family', # text\n 'license_file', 'readme', # paths in source tree\n ],\n}\n\n\ndef check_bad_chrs(s, field):\n bad_chrs = '=!@#$%^&*:;\"\\'\\\\|<>?/ '\n if field in ('package/version', 'build/string'):\n bad_chrs += '-'\n for c in bad_chrs:\n if c in s:\n sys.exit(\"Error: bad character '%s' in %s: %s\" % (c, field, s))\n\n\ndef handle_config_version(ms, ver, dep_type='run'):\n \"\"\"\n 'ms' is an instance of MatchSpec, and 'ver' is the version from the\n configuration, e.g. for ms.name == 'python', ver = 26 or None,\n return a (sometimes new) MatchSpec object\n \"\"\"\n if ms.strictness == 3:\n return ms\n\n if ms.strictness == 2:\n if ms.spec.split()[1] == 'x.x':\n if ver is None:\n raise RuntimeError(\"'%s' requires external setting\" % ms.spec)\n # (no return here - proceeds below)\n else: # regular version\n return ms\n\n # If we don't have a configured version, or we are dealing with a simple\n # numpy runtime dependency; just use \"numpy\"/the name of the package as\n # the specification. In practice this means that a recipe which just\n # defines numpy as a runtime dependency will match any version of numpy\n # at install time.\n if ver is None or (dep_type == 'run' and ms.strictness == 1 and\n ms.name == 'numpy'):\n return MatchSpec(ms.name)\n\n ver = text_type(ver)\n if '.' not in ver:\n if ms.name == 'numpy':\n ver = '%s.%s' % (ver[0], ver[1:])\n else:\n ver = '.'.join(ver)\n return MatchSpec('%s %s*' % (ms.name, ver))\n\n\nclass MetaData(object):\n\n def __init__(self, path):\n assert isdir(path)\n self.path = path\n self.meta_path = join(path, 'meta.yaml')\n self.requirements_path = join(path, 'requirements.txt')\n if not isfile(self.meta_path):\n self.meta_path = join(path, 'conda.yaml')\n if not isfile(self.meta_path):\n sys.exit(\"Error: meta.yaml or conda.yaml not found in %s\" % path)\n\n # Start with bare-minimum contents so we can call environ.get_dict() with impunity\n # We'll immediately replace these contents in parse_again()\n self.meta = parse(\"package:\\n\"\n \" name: uninitialized\", path=self.meta_path)\n\n # This is the 'first pass' parse of meta.yaml, so not all variables are defined yet\n # (e.g. GIT_FULL_HASH, etc. are undefined)\n # Therefore, undefined jinja variables are permitted here\n # In the second pass, we'll be more strict. See build.build()\n self.undefined_jinja_vars = []\n self.parse_again(permit_undefined_jinja=True)\n\n def parse_again(self, permit_undefined_jinja=False):\n \"\"\"Redo parsing for key-value pairs that are not initialized in the\n first pass.\n\n permit_undefined_jinja: If True, *any* use of undefined jinja variables will\n evaluate to an emtpy string, without emitting an error.\n \"\"\"\n if not self.meta_path:\n return\n\n self.meta = parse(self._get_contents(permit_undefined_jinja), path=self.meta_path)\n\n if (isfile(self.requirements_path) and\n not self.meta['requirements']['run']):\n self.meta.setdefault('requirements', {})\n run_requirements = specs_from_url(self.requirements_path)\n self.meta['requirements']['run'] = run_requirements\n\n @classmethod\n def fromdict(cls, metadata):\n \"\"\"\n Create a MetaData object from metadata dict directly.\n \"\"\"\n m = super(MetaData, cls).__new__(cls)\n m.path = ''\n m.meta_path = ''\n m.meta = sanitize(metadata)\n return m\n\n def get_section(self, section):\n return self.meta.get(section, {})\n\n def get_value(self, field, default=None, autotype=True):\n \"\"\"\n Get a value from a meta.yaml.\n :param field: Field to return\n :param default: Default object to return if field doesn't exist\n :param autotype: If True, return the default type of field if one exists.\n False will return the default object.\n :return:\n \"\"\"\n section, key = field.split('/')\n\n # get correct default\n if autotype and default is None and field in default_structs:\n default = default_structs[field]()\n\n value = self.get_section(section).get(key, default)\n\n # handle yaml 1.1 boolean values\n if isinstance(value, text_type):\n if value.lower() in trues:\n value = True\n elif value.lower() in falses:\n value = False\n\n return value\n\n def check_fields(self):\n for section, submeta in iteritems(self.meta):\n if section == 'extra':\n continue\n if section not in FIELDS:\n sys.exit(\"Error: unknown section: %s\" % section)\n for key in submeta:\n if key not in FIELDS[section]:\n sys.exit(\"Error: in section %r: unknown key %r\" %\n (section, key))\n\n def name(self):\n res = self.get_value('package/name')\n if not res:\n sys.exit('Error: package/name missing in: %r' % self.meta_path)\n res = text_type(res)\n if res != res.lower():\n sys.exit('Error: package/name must be lowercase, got: %r' % res)\n check_bad_chrs(res, 'package/name')\n return res\n\n def version(self):\n res = self.get_value('package/version')\n if res is None:\n sys.exit(\"Error: package/version missing in: %r\" % self.meta_path)\n check_bad_chrs(res, 'package/version')\n return res\n\n def build_number(self):\n number = self.get_value('build/number', 0)\n # build number can come back as None if no setting (or jinja intermediate)\n return int(number) if number else 0\n\n def ms_depends(self, typ='run'):\n res = []\n name_ver_list = [\n ('python', config.CONDA_PY),\n ('numpy', config.CONDA_NPY),\n ('perl', config.CONDA_PERL),\n ('lua', config.CONDA_LUA),\n # r is kept for legacy installations, r-base deprecates it.\n ('r', config.CONDA_R),\n ('r-base', config.CONDA_R),\n ]\n for spec in self.get_value('requirements/' + typ, []):\n try:\n ms = MatchSpec(spec)\n except AssertionError:\n raise RuntimeError(\"Invalid package specification: %r\" % spec)\n if ms.name == self.name():\n raise RuntimeError(\"%s cannot depend on itself\" % self.name())\n for name, ver in name_ver_list:\n if ms.name == name:\n if self.get_value('build/noarch_python'):\n continue\n ms = handle_config_version(ms, ver, typ)\n\n for c in '=!@#$%^&*:;\"\\'\\\\|<>?/':\n if c in ms.name:\n sys.exit(\"Error: bad character '%s' in package name \"\n \"dependency '%s'\" % (c, ms.name))\n parts = spec.split()\n if len(parts) >= 2:\n if parts[1] in {'>', '>=', '=', '==', '!=', '<', '<='}:\n msg = (\"Error: bad character '%s' in package version \"\n \"dependency '%s'\" % (parts[1], ms.name))\n if len(parts) >= 3:\n msg += \"\\nPerhaps you meant '%s %s%s'\" % (ms.name,\n parts[1], parts[2])\n sys.exit(msg)\n res.append(ms)\n return res\n\n def build_id(self):\n ret = self.get_value('build/string')\n if ret:\n check_bad_chrs(ret, 'build/string')\n return ret\n res = []\n version_pat = re.compile(r'(?:==)?(\\d+)\\.(\\d+)')\n for name, s in (('numpy', 'np'), ('python', 'py'),\n ('perl', 'pl'), ('lua', 'lua'),\n ('r', 'r'), ('r-base', 'r')):\n for ms in self.ms_depends():\n if ms.name == name:\n try:\n v = ms.spec.split()[1]\n except IndexError:\n if name not in ['numpy']:\n res.append(s)\n break\n if any(i in v for i in ',|>!<'):\n break\n if name not in ['perl', 'lua', 'r', 'r-base']:\n match = version_pat.match(v)\n if match:\n res.append(s + match.group(1) + match.group(2))\n else:\n res.append(s + v.strip('*'))\n break\n\n features = self.get_value('build/features', [])\n if res:\n res.append('_')\n if features:\n res.extend(('_'.join(features), '_'))\n res.append('%d' % self.build_number())\n return ''.join(res)\n\n def dist(self):\n return '%s-%s-%s' % (self.name(), self.version(), self.build_id())\n\n def pkg_fn(self):\n return \"%s.tar.bz2\" % self.dist()\n\n def is_app(self):\n return bool(self.get_value('app/entry'))\n\n def app_meta(self):\n d = {'type': 'app'}\n if self.get_value('app/icon'):\n d['icon'] = '%s.png' % md5_file(join(\n self.path, self.get_value('app/icon')))\n\n for field, key in [('app/entry', 'app_entry'),\n ('app/type', 'app_type'),\n ('app/cli_opts', 'app_cli_opts'),\n ('app/summary', 'summary'),\n ('app/own_environment', 'app_own_environment')]:\n value = self.get_value(field)\n if value:\n d[key] = value\n return d\n\n def info_index(self):\n d = dict(\n name=self.name(),\n version=self.version(),\n build=self.build_id(),\n build_number=self.build_number(),\n platform=cc.platform,\n arch=cc.arch_name,\n subdir=cc.subdir,\n depends=sorted(' '.join(ms.spec.split())\n for ms in self.ms_depends()),\n )\n for key in ('license', 'license_family'):\n value = self.get_value('about/' + key)\n if value:\n d[key] = value\n\n if self.get_value('build/features'):\n d['features'] = ' '.join(self.get_value('build/features'))\n if self.get_value('build/track_features'):\n d['track_features'] = ' '.join(self.get_value('build/track_features'))\n if self.get_value('build/noarch_python'):\n d['platform'] = d['arch'] = None\n d['subdir'] = 'noarch'\n if self.is_app():\n d.update(self.app_meta())\n return d\n\n def has_prefix_files(self):\n ret = self.get_value('build/has_prefix_files', [])\n if not isinstance(ret, list):\n raise RuntimeError('build/has_prefix_files should be a list of paths')\n if sys.platform == 'win32':\n if any('\\\\' in i for i in ret):\n raise RuntimeError(\"build/has_prefix_files paths must use / \"\n \"as the path delimiter on Windows\")\n return ret\n\n def ignore_prefix_files(self):\n ret = self.get_value('build/ignore_prefix_files', False)\n if type(ret) not in (list, bool):\n raise RuntimeError('build/ignore_prefix_files should be boolean or a list of paths')\n if sys.platform == 'win32':\n if type(ret) is list and any('\\\\' in i for i in ret):\n raise RuntimeError(\"build/ignore_prefix_files paths must use / \"\n \"as the path delimiter on Windows\")\n return ret\n\n def always_include_files(self):\n return self.get_value('build/always_include_files', [])\n\n def binary_has_prefix_files(self):\n ret = self.get_value('build/binary_has_prefix_files', [])\n if not isinstance(ret, list):\n raise RuntimeError('build/binary_has_prefix_files should be a list of paths')\n if sys.platform == 'win32':\n if any('\\\\' in i for i in ret):\n raise RuntimeError(\"build/binary_has_prefix_files paths must use / \"\n \"as the path delimiter on Windows\")\n return ret\n\n def skip(self):\n return self.get_value('build/skip', False)\n\n def _get_contents(self, permit_undefined_jinja):\n '''\n Get the contents of our [meta.yaml|conda.yaml] file.\n If jinja is installed, then the template.render function is called\n before standard conda macro processors.\n\n permit_undefined_jinja: If True, *any* use of undefined jinja variables will\n evaluate to an emtpy string, without emitting an error.\n '''\n try:\n import jinja2\n except ImportError:\n print(\"There was an error importing jinja2.\", file=sys.stderr)\n print(\"Please run `conda install jinja2` to enable jinja template support\", file=sys.stderr) # noqa\n with open(self.meta_path) as fd:\n return fd.read()\n\n from conda_build.jinja_context import context_processor, UndefinedNeverFail, FilteredLoader\n\n path, filename = os.path.split(self.meta_path)\n loaders = [ # search relative to '<conda_root>/Lib/site-packages/conda_build/templates'\n jinja2.PackageLoader('conda_build'),\n # search relative to RECIPE_DIR\n jinja2.FileSystemLoader(path)\n ]\n\n # search relative to current conda environment directory\n conda_env_path = os.environ.get('CONDA_DEFAULT_ENV') # path to current conda environment\n if conda_env_path and os.path.isdir(conda_env_path):\n conda_env_path = os.path.abspath(conda_env_path)\n conda_env_path = conda_env_path.replace('\\\\', '/') # need unix-style path\n env_loader = jinja2.FileSystemLoader(conda_env_path)\n loaders.append(jinja2.PrefixLoader({'$CONDA_DEFAULT_ENV': env_loader}))\n\n undefined_type = jinja2.StrictUndefined\n if permit_undefined_jinja:\n # The UndefinedNeverFail class keeps a global list of all undefined names\n # Clear any leftover names from the last parse.\n UndefinedNeverFail.all_undefined_names = []\n undefined_type = UndefinedNeverFail\n\n loader = FilteredLoader(jinja2.ChoiceLoader(loaders))\n env = jinja2.Environment(loader=loader, undefined=undefined_type)\n\n env.globals.update(ns_cfg())\n env.globals.update(context_processor(self, path))\n\n try:\n template = env.get_or_select_template(filename)\n rendered = template.render(environment=env)\n\n if permit_undefined_jinja:\n self.undefined_jinja_vars = UndefinedNeverFail.all_undefined_names\n else:\n self.undefined_jinja_vars = []\n\n return rendered\n except jinja2.TemplateError as ex:\n sys.exit(\"Error: Failed to render jinja template in {}:\\n{}\"\n .format(self.meta_path, ex.message))\n\n def __unicode__(self):\n '''\n String representation of the MetaData.\n '''\n return text_type(self.__dict__)\n\n def __str__(self):\n if PY3:\n return self.__unicode__()\n else:\n return self.__unicode__().encode('utf-8')\n\n def __repr__(self):\n '''\n String representation of the MetaData.\n '''\n return self.__str__()\n\n def uses_vcs_in_meta(self):\n \"\"\"returns true if recipe contains metadata associated with version control systems.\n If this metadata is present, a download/copy will be forced in parse_or_try_download.\n \"\"\"\n vcs_types = [\"git\", \"svn\", \"hg\"]\n if \"source\" in self.meta:\n for vcs in vcs_types:\n if vcs + \"_url\" in self.meta[\"source\"]:\n # translate command name to package name.\n # If more than hg, need a dict for this.\n if vcs == \"hg\":\n vcs = \"mercurial\"\n return vcs\n\n # We would get here if we use Jinja2 templating, but specify source with path.\n with open(self.meta_path) as f:\n metayaml = f.read()\n for vcs in vcs_types:\n matches = re.findall(r\"{}_[^\\.\\s\\'\\\"]+\".format(vcs.upper()), metayaml)\n if len(matches) > 0:\n if vcs == \"hg\":\n vcs = \"mercurial\"\n return vcs\n return None\n\n def uses_vcs_in_build(self):\n build_script = \"bld.bat\" if on_win else \"build.sh\"\n build_script = os.path.join(os.path.dirname(self.meta_path), build_script)\n if os.path.isfile(build_script):\n vcs_types = [\"git\", \"svn\", \"hg\"]\n with open(self.meta_path) as f:\n build_script = f.read()\n for vcs in vcs_types:\n matches = re.findall(r\"{}(?:\\.exe)?\".format(vcs), build_script)\n if len(matches) > 0:\n if vcs == \"hg\":\n vcs = \"mercurial\"\n return vcs\n return None\n",
"path": "conda_build/metadata.py"
}
] | diff --git a/conda_build/metadata.py b/conda_build/metadata.py
index 56eb7f011a..a96c073f1c 100644
--- a/conda_build/metadata.py
+++ b/conda_build/metadata.py
@@ -451,7 +451,9 @@ def version(self):
return res
def build_number(self):
- return int(self.get_value('build/number', 0))
+ number = self.get_value('build/number', 0)
+ # build number can come back as None if no setting (or jinja intermediate)
+ return int(number) if number else 0
def ms_depends(self, typ='run'):
res = []
diff --git a/tests/test-recipes/metadata/_git_describe_number_branch/meta.yaml b/tests/test-recipes/metadata/_git_describe_number_branch/meta.yaml
new file mode 100644
index 0000000000..f8b22a7d59
--- /dev/null
+++ b/tests/test-recipes/metadata/_git_describe_number_branch/meta.yaml
@@ -0,0 +1,11 @@
+package:
+ name: git_describe_number_branch
+ version: {{ GIT_DESCRIBE_TAG }}
+
+source:
+ git_url: https://github.com/conda/conda_build_test_recipe
+ git_branch: 1.20.2+1
+
+build:
+ number: {{ GIT_DESCRIBE_NUMBER }}
+ string: {{ GIT_BUILD_STR }}
diff --git a/tests/test_build_recipes.py b/tests/test_build_recipes.py
index 7df353c140..2e67a3360d 100644
--- a/tests/test_build_recipes.py
+++ b/tests/test_build_recipes.py
@@ -377,3 +377,15 @@ def test_patch():
lines = modified.readlines()
assert lines[0] == '43770\n'
os.chdir(basedir)
+
+
+def test_git_describe_info_on_branch():
+ cmd = 'conda build --output {}'.format(os.path.join(metadata_dir, "_git_describe_number_branch"))
+ process = subprocess.Popen(cmd.split(),
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ output, error = process.communicate()
+ test_path = os.path.join(sys.prefix, "conda-bld", subdir,
+ "git_describe_number_branch-1.20.2-1_g82c6ba6.tar.bz2")
+ output = output.decode('utf-8').rstrip()
+ error = error.decode('utf-8')
+ assert test_path == output, error
|
beetbox__beets-3703 | Minor documentation correction: correct id3.org url
https://github.com/beetbox/beets/blob/master/docs/faq.rst#L303
refers to:
http://www.id3.org/id3v2.4.0-structure
as a reference url for a copy of the ID3v2.4 standard documentation, but this returns a "Not found" error. I've found 2 possibilities for the replacement:
https://id3.org/id3v2.4.0-structure
(with adverts) or
https://github.com/id3/ID3v2.4/raw/master/id3v2.40-structure.txt
(without adverts)
| [
{
"content": "# -*- coding: utf-8 -*-\n\nfrom __future__ import division, absolute_import, print_function\n\nAUTHOR = u'Adrian Sampson'\n\n# General configuration\n\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.extlinks']\n\nexclude_patterns = ['_build']\nsource_suffix = '.rst'\nmaster_doc = 'index'\n\nproject = u'beets'\ncopyright = u'2016, Adrian Sampson'\n\nversion = '1.5'\nrelease = '1.5.0'\n\npygments_style = 'sphinx'\n\n# External links to the bug tracker and other sites.\nextlinks = {\n 'bug': ('https://github.com/beetbox/beets/issues/%s', '#'),\n 'user': ('https://github.com/%s', ''),\n 'pypi': ('https://pypi.org/project/%s/', ''),\n 'stdlib': ('https://docs.python.org/3/library/%s.html', ''),\n}\n\n# Options for HTML output\nhtmlhelp_basename = 'beetsdoc'\n\n# Options for LaTeX output\nlatex_documents = [\n ('index', 'beets.tex', u'beets Documentation',\n AUTHOR, 'manual'),\n]\n\n# Options for manual page output\nman_pages = [\n ('reference/cli', 'beet', u'music tagger and library organizer',\n [AUTHOR], 1),\n ('reference/config', 'beetsconfig', u'beets configuration file',\n [AUTHOR], 5),\n]\n",
"path": "docs/conf.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\nfrom __future__ import division, absolute_import, print_function\n\nAUTHOR = u'Adrian Sampson'\n\n# General configuration\n\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.extlinks']\n\nexclude_patterns = ['_build']\nsource_suffix = '.rst'\nmaster_doc = 'index'\n\nproject = u'beets'\ncopyright = u'2016, Adrian Sampson'\n\nversion = '1.5'\nrelease = '1.5.0'\n\npygments_style = 'sphinx'\n\n# External links to the bug tracker and other sites.\nextlinks = {\n 'bug': ('https://github.com/beetbox/beets/issues/%s', '#'),\n 'user': ('https://github.com/%s', ''),\n 'pypi': ('https://pypi.org/project/%s/', ''),\n 'stdlib': ('https://docs.python.org/3/library/%s.html', ''),\n}\n\nlinkcheck_ignore = [\n r'https://github.com/beetbox/beets/issues/',\n r'https://github.com/\\w+$', # ignore user pages\n r'.*localhost.*',\n r'https://www.musixmatch.com/', # blocks requests\n]\n\n# Options for HTML output\nhtmlhelp_basename = 'beetsdoc'\n\n# Options for LaTeX output\nlatex_documents = [\n ('index', 'beets.tex', u'beets Documentation',\n AUTHOR, 'manual'),\n]\n\n# Options for manual page output\nman_pages = [\n ('reference/cli', 'beet', u'music tagger and library organizer',\n [AUTHOR], 1),\n ('reference/config', 'beetsconfig', u'beets configuration file',\n [AUTHOR], 5),\n]\n",
"path": "docs/conf.py"
}
] | diff --git a/.github/workflows/integration_test.yaml b/.github/workflows/integration_test.yaml
index 386571e5a4..633947fd3b 100644
--- a/.github/workflows/integration_test.yaml
+++ b/.github/workflows/integration_test.yaml
@@ -27,6 +27,10 @@ jobs:
run: |
tox -e int
+ - name: Check external links in docs
+ run: |
+ tox -e links
+
- name: Notify on failure
if: ${{ failure() }}
env:
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index d86c490b93..9600ee966b 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -28,7 +28,7 @@ Non-Programming
- Promote beets! Help get the word out by telling your friends, writing
a blog post, or discussing it on a forum you frequent.
-- Improve the `documentation <http://beets.readthedocs.org/>`__. It’s
+- Improve the `documentation`_. It’s
incredibly easy to contribute here: just find a page you want to
modify and hit the “Edit on GitHub” button in the upper-right. You
can automatically send us a pull request for your changes.
@@ -62,7 +62,7 @@ Getting the Source
^^^^^^^^^^^^^^^^^^
The easiest way to get started with the latest beets source is to use
-`pip <https://pip.pypa.io/>`__ to install an “editable” package. This
+`pip`_ to install an “editable” package. This
can be done with one command:
.. code-block:: bash
@@ -147,8 +147,7 @@ request and your code will ship in no time.
5. Add a changelog entry to ``docs/changelog.rst`` near the top of the
document.
6. Run the tests and style checker. The easiest way to run the tests is
- to use `tox <https://tox.readthedocs.org/en/latest/>`__. For more
- information on running tests, see :ref:`testing`.
+ to use `tox`_. For more information on running tests, see :ref:`testing`.
7. Push to your fork and open a pull request! We’ll be in touch shortly.
8. If you add commits to a pull request, please add a comment or
re-request a review after you push them since GitHub doesn’t
@@ -253,7 +252,7 @@ guidelines to follow:
Editor Settings
---------------
-Personally, I work on beets with `vim <http://www.vim.org/>`__. Here are
+Personally, I work on beets with `vim`_. Here are
some ``.vimrc`` lines that might help with PEP 8-compliant Python
coding::
@@ -318,7 +317,7 @@ To install the test dependencies, run ``python -m pip install .[test]``.
Or, just run a test suite with ``tox`` which will install them
automatically.
-.. _setup.py: https://github.com/beetbox/beets/blob/master/setup.py#L99`
+.. _setup.py: https://github.com/beetbox/beets/blob/master/setup.py
Writing Tests
-------------
@@ -352,9 +351,9 @@ others. See `unittest.mock`_ for more info.
.. _Python unittest: https://docs.python.org/2/library/unittest.html
.. _Codecov: https://codecov.io/github/beetbox/beets
.. _pytest-random: https://github.com/klrmn/pytest-random
-.. _tox: http://tox.readthedocs.org
-.. _detox: https://pypi.python.org/pypi/detox/
-.. _pytest: http://pytest.org
+.. _tox: https://tox.readthedocs.io/en/latest/
+.. _detox: https://pypi.org/project/detox/
+.. _pytest: https://docs.pytest.org/en/stable/
.. _Linux: https://github.com/beetbox/beets/actions
.. _Windows: https://ci.appveyor.com/project/beetbox/beets/
.. _`https://github.com/beetbox/beets/blob/master/setup.py#L99`: https://github.com/beetbox/beets/blob/master/setup.py#L99
@@ -364,3 +363,6 @@ others. See `unittest.mock`_ for more info.
.. _integration test: https://github.com/beetbox/beets/actions?query=workflow%3A%22integration+tests%22
.. _unittest.mock: https://docs.python.org/3/library/unittest.mock.html
.. _Python unittest: https://docs.python.org/2/library/unittest.html
+.. _documentation: https://beets.readthedocs.io/en/stable/
+.. _pip: https://pip.pypa.io/en/stable/
+.. _vim: https://www.vim.org/
diff --git a/docs/changelog.rst b/docs/changelog.rst
index 4a87f7f071..b370d117c5 100644
--- a/docs/changelog.rst
+++ b/docs/changelog.rst
@@ -189,7 +189,7 @@ Fixes:
* ``beet update`` will now confirm that the user still wants to update if
their library folder cannot be found, preventing the user from accidentally
wiping out their beets database.
- Thanks to :user:`logan-arens`.
+ Thanks to user: `logan-arens`.
:bug:`1934`
* :doc:`/plugins/bpd`: Fix the transition to next track when in consume mode.
Thanks to :user:`aereaux`.
@@ -1262,7 +1262,7 @@ And there are a few bug fixes too:
The last release, 1.3.19, also erroneously reported its version as "1.3.18"
when you typed ``beet version``. This has been corrected.
-.. _six: https://pythonhosted.org/six/
+.. _six: https://pypi.org/project/six/
1.3.19 (June 25, 2016)
@@ -2108,7 +2108,7 @@ As usual, there are loads of little fixes and improvements:
* The :ref:`config-cmd` command can now use ``$EDITOR`` variables with
arguments.
-.. _API changes: https://developer.echonest.com/forums/thread/3650
+.. _API changes: https://web.archive.org/web/20160814092627/https://developer.echonest.com/forums/thread/3650
.. _Plex: https://plex.tv/
.. _musixmatch: https://www.musixmatch.com/
@@ -2333,7 +2333,7 @@ The big new features are:
* A new :ref:`asciify-paths` configuration option replaces all non-ASCII
characters in paths.
-.. _Mutagen: https://bitbucket.org/lazka/mutagen
+.. _Mutagen: https://github.com/quodlibet/mutagen
.. _Spotify: https://www.spotify.com/
And the multitude of little improvements and fixes:
@@ -2588,7 +2588,7 @@ Fixes:
* :doc:`/plugins/convert`: Display a useful error message when the FFmpeg
executable can't be found.
-.. _requests: https://www.python-requests.org/
+.. _requests: https://requests.readthedocs.io/en/master/
1.3.3 (February 26, 2014)
@@ -2769,7 +2769,7 @@ As usual, there are also innumerable little fixes and improvements:
Bezman.
-.. _Acoustic Attributes: http://developer.echonest.com/acoustic-attributes.html
+.. _Acoustic Attributes: https://web.archive.org/web/20160701063109/http://developer.echonest.com/acoustic-attributes.html
.. _MPD: https://www.musicpd.org/
@@ -3119,7 +3119,7 @@ will automatically migrate your configuration to the new system.
header. Thanks to Uwe L. Korn.
* :doc:`/plugins/lastgenre`: Fix an error when using genre canonicalization.
-.. _Tomahawk: https://tomahawk-player.org/
+.. _Tomahawk: https://github.com/tomahawk-player/tomahawk
1.1b3 (March 16, 2013)
----------------------
@@ -3462,7 +3462,7 @@ begins today on features for version 1.1.
* Changed plugin loading so that modules can be imported without
unintentionally loading the plugins they contain.
-.. _The Echo Nest: http://the.echonest.com/
+.. _The Echo Nest: https://web.archive.org/web/20180329103558/http://the.echonest.com/
.. _Tomahawk resolver: https://beets.io/blog/tomahawk-resolver.html
.. _mp3gain: http://mp3gain.sourceforge.net/download.php
.. _aacgain: https://aacgain.altosdesign.com
@@ -3900,7 +3900,7 @@ plugin.
* The :doc:`/plugins/web` encapsulates a simple **Web-based GUI for beets**. The
current iteration can browse the library and play music in browsers that
- support `HTML5 Audio`_.
+ support HTML5 Audio.
* When moving items that are part of an album, the album art implicitly moves
too.
@@ -3917,8 +3917,6 @@ plugin.
* Fix crash when "copying" an art file that's already in place.
-.. _HTML5 Audio: http://www.w3.org/TR/html-markup/audio.html
-
1.0b9 (July 9, 2011)
--------------------
diff --git a/docs/conf.py b/docs/conf.py
index bb3e3d00f6..018ef53974 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -28,6 +28,13 @@
'stdlib': ('https://docs.python.org/3/library/%s.html', ''),
}
+linkcheck_ignore = [
+ r'https://github.com/beetbox/beets/issues/',
+ r'https://github.com/\w+$', # ignore user pages
+ r'.*localhost.*',
+ r'https://www.musixmatch.com/', # blocks requests
+]
+
# Options for HTML output
htmlhelp_basename = 'beetsdoc'
diff --git a/docs/dev/index.rst b/docs/dev/index.rst
index f1465494d6..63335160c7 100644
--- a/docs/dev/index.rst
+++ b/docs/dev/index.rst
@@ -7,7 +7,7 @@ in hacking beets itself or creating plugins for it.
See also the documentation for `MediaFile`_, the library used by beets to read
and write metadata tags in media files.
-.. _MediaFile: https://mediafile.readthedocs.io/
+.. _MediaFile: https://mediafile.readthedocs.io/en/latest/
.. toctree::
diff --git a/docs/dev/library.rst b/docs/dev/library.rst
index 77e218b939..071b780f3a 100644
--- a/docs/dev/library.rst
+++ b/docs/dev/library.rst
@@ -45,7 +45,7 @@ responsible for handling queries to retrieve stored objects.
.. automethod:: transaction
-.. _SQLite: https://sqlite.org/
+.. _SQLite: https://sqlite.org/index.html
.. _ORM: https://en.wikipedia.org/wiki/Object-relational_mapping
@@ -118,7 +118,7 @@ To make changes to either the database or the tags on a file, you
update an item's fields (e.g., ``item.title = "Let It Be"``) and then call
``item.write()``.
-.. _MediaFile: https://mediafile.readthedocs.io/
+.. _MediaFile: https://mediafile.readthedocs.io/en/latest/
Items also track their modification times (mtimes) to help detect when they
become out of sync with on-disk metadata, mainly to speed up the
diff --git a/docs/dev/plugins.rst b/docs/dev/plugins.rst
index 3328654e07..563775fd64 100644
--- a/docs/dev/plugins.rst
+++ b/docs/dev/plugins.rst
@@ -301,7 +301,7 @@ To access this value, say ``self.config['foo'].get()`` at any point in your
plugin's code. The `self.config` object is a *view* as defined by the `Confuse`_
library.
-.. _Confuse: https://confuse.readthedocs.org/
+.. _Confuse: https://confuse.readthedocs.io/en/latest/
If you want to access configuration values *outside* of your plugin's section,
import the `config` object from the `beets` module. That is, just put ``from
@@ -379,7 +379,7 @@ access to file tags. If you have created a descriptor you can add it through
your plugins ``add_media_field()`` method.
.. automethod:: beets.plugins.BeetsPlugin.add_media_field
-.. _MediaFile: https://mediafile.readthedocs.io/
+.. _MediaFile: https://mediafile.readthedocs.io/en/latest/
Here's an example plugin that provides a meaningless new field "foo"::
diff --git a/docs/faq.rst b/docs/faq.rst
index 9732a47259..eeab6c1ef7 100644
--- a/docs/faq.rst
+++ b/docs/faq.rst
@@ -2,10 +2,9 @@ FAQ
###
Here are some answers to frequently-asked questions from IRC and elsewhere.
-Got a question that isn't answered here? Try `IRC`_, the `discussion board`_, or
+Got a question that isn't answered here? Try the `discussion board`_, or
:ref:`filing an issue <bugs>` in the bug tracker.
-.. _IRC: irc://irc.freenode.net/beets
.. _mailing list: https://groups.google.com/group/beets-users
.. _discussion board: https://discourse.beets.io
@@ -119,7 +118,7 @@ Run a command like this::
pip install -U beets
-The ``-U`` flag tells `pip <https://pip.pypa.io/>`__ to upgrade
+The ``-U`` flag tells `pip`_ to upgrade
beets to the latest version. If you want a specific version, you can
specify with using ``==`` like so::
@@ -188,7 +187,9 @@ there to report a bug. Please follow these guidelines when reporting an issue:
If you've never reported a bug before, Mozilla has some well-written
`general guidelines for good bug
-reports <https://www.mozilla.org/bugs/>`__.
+reports`_.
+
+.. _general guidelines for good bug reports: https://developer.mozilla.org/en-US/docs/Mozilla/QA/Bug_writing_guidelines
.. _find-config:
@@ -300,8 +301,7 @@ a flag. There is no simple way to remedy this.)
…not change my ID3 tags?
------------------------
-Beets writes `ID3v2.4 <http://www.id3.org/id3v2.4.0-structure>`__ tags by
-default.
+Beets writes `ID3v2.4`_ tags by default.
Some software, including Windows (i.e., Windows Explorer and Windows
Media Player) and `id3lib/id3v2 <http://id3v2.sourceforge.net/>`__,
don't support v2.4 tags. When using 2.4-unaware software, it might look
@@ -311,6 +311,7 @@ To enable ID3v2.3 tags, enable the :ref:`id3v23` config option.
.. _invalid:
+.. _ID3v2.4: https://id3.org/id3v2.4.0-structure
…complain that a file is "unreadable"?
--------------------------------------
@@ -379,3 +380,4 @@ installed using pip, the command ``pip show -f beets`` can show you where
try `this Super User answer`_.
.. _this Super User answer: https://superuser.com/a/284361/4569
+.. _pip: https://pip.pypa.io/en/stable/
diff --git a/docs/guides/main.rst b/docs/guides/main.rst
index 2f05634d98..f1da16f502 100644
--- a/docs/guides/main.rst
+++ b/docs/guides/main.rst
@@ -64,7 +64,7 @@ beets`` if you run into permissions problems).
To install without pip, download beets from `its PyPI page`_ and run ``python
setup.py install`` in the directory therein.
-.. _its PyPI page: https://pypi.org/project/beets#downloads
+.. _its PyPI page: https://pypi.org/project/beets/#files
.. _pip: https://pip.pypa.io
The best way to upgrade beets to a new version is by running ``pip install -U
diff --git a/docs/plugins/absubmit.rst b/docs/plugins/absubmit.rst
index 64c77e0773..953335a143 100644
--- a/docs/plugins/absubmit.rst
+++ b/docs/plugins/absubmit.rst
@@ -62,6 +62,6 @@ file. The available options are:
.. _streaming_extractor_music: https://acousticbrainz.org/download
.. _FAQ: https://acousticbrainz.org/faq
.. _pip: https://pip.pypa.io
-.. _requests: https://docs.python-requests.org/en/master/
+.. _requests: https://requests.readthedocs.io/en/master/
.. _github: https://github.com/MTG/essentia
.. _AcousticBrainz: https://acousticbrainz.org
diff --git a/docs/plugins/beatport.rst b/docs/plugins/beatport.rst
index cbf5b4312c..6117c4a1f1 100644
--- a/docs/plugins/beatport.rst
+++ b/docs/plugins/beatport.rst
@@ -41,6 +41,6 @@ Configuration
This plugin can be configured like other metadata source plugins as described in :ref:`metadata-source-plugin-configuration`.
-.. _requests: https://docs.python-requests.org/en/latest/
+.. _requests: https://requests.readthedocs.io/en/master/
.. _requests_oauthlib: https://github.com/requests/requests-oauthlib
-.. _Beatport: https://beetport.com
+.. _Beatport: https://www.beatport.com/
diff --git a/docs/plugins/bpd.rst b/docs/plugins/bpd.rst
index 49563a73ae..2330bea70a 100644
--- a/docs/plugins/bpd.rst
+++ b/docs/plugins/bpd.rst
@@ -5,7 +5,7 @@ BPD is a music player using music from a beets library. It runs as a daemon and
implements the MPD protocol, so it's compatible with all the great MPD clients
out there. I'm using `Theremin`_, `gmpc`_, `Sonata`_, and `Ario`_ successfully.
-.. _Theremin: https://theremin.sigterm.eu/
+.. _Theremin: https://github.com/TheStalwart/Theremin
.. _gmpc: https://gmpc.wikia.com/wiki/Gnome_Music_Player_Client
.. _Sonata: http://sonata.berlios.de/
.. _Ario: http://ario-player.sourceforge.net/
@@ -13,7 +13,7 @@ out there. I'm using `Theremin`_, `gmpc`_, `Sonata`_, and `Ario`_ successfully.
Dependencies
------------
-Before you can use BPD, you'll need the media library called GStreamer (along
+Before you can use BPD, you'll need the media library called `GStreamer`_ (along
with its Python bindings) on your system.
* On Mac OS X, you can use `Homebrew`_. Run ``brew install gstreamer
@@ -22,14 +22,11 @@ with its Python bindings) on your system.
* On Linux, you need to install GStreamer 1.0 and the GObject bindings for
python. Under Ubuntu, they are called ``python-gi`` and ``gstreamer1.0``.
-* On Windows, you may want to try `GStreamer WinBuilds`_ (caveat emptor: I
- haven't tried this).
-
You will also need the various GStreamer plugin packages to make everything
work. See the :doc:`/plugins/chroma` documentation for more information on
installing GStreamer plugins.
-.. _GStreamer WinBuilds: https://www.gstreamer-winbuild.ylatuya.es/
+.. _GStreamer: https://gstreamer.freedesktop.org/download
.. _Homebrew: https://brew.sh
Usage
diff --git a/docs/plugins/convert.rst b/docs/plugins/convert.rst
index 6e9d00a11b..3bf892e0a4 100644
--- a/docs/plugins/convert.rst
+++ b/docs/plugins/convert.rst
@@ -189,7 +189,7 @@ can use the :doc:`/plugins/replaygain` to do this analysis. See the LAME
`documentation`_ and the `HydrogenAudio wiki`_ for other LAME configuration
options and a thorough discussion of MP3 encoding.
-.. _documentation: http://lame.sourceforge.net/using.php
+.. _documentation: https://lame.sourceforge.io/index.php
.. _HydrogenAudio wiki: https://wiki.hydrogenaud.io/index.php?title=LAME
.. _gapless: https://wiki.hydrogenaud.io/index.php?title=Gapless_playback
-.. _LAME: https://lame.sourceforge.net/
+.. _LAME: https://lame.sourceforge.io/index.php
diff --git a/docs/plugins/embyupdate.rst b/docs/plugins/embyupdate.rst
index 626fafa9df..1a8b7c7b10 100644
--- a/docs/plugins/embyupdate.rst
+++ b/docs/plugins/embyupdate.rst
@@ -18,7 +18,7 @@ To use the ``embyupdate`` plugin you need to install the `requests`_ library wit
With that all in place, you'll see beets send the "update" command to your Emby server every time you change your beets library.
.. _Emby: https://emby.media/
-.. _requests: https://docs.python-requests.org/en/latest/
+.. _requests: https://requests.readthedocs.io/en/master/
Configuration
-------------
diff --git a/docs/plugins/keyfinder.rst b/docs/plugins/keyfinder.rst
index 2ed2c1cec9..a5c64d39c7 100644
--- a/docs/plugins/keyfinder.rst
+++ b/docs/plugins/keyfinder.rst
@@ -31,5 +31,5 @@ configuration file. The available options are:
`initial_key` value.
Default: ``no``.
-.. _KeyFinder: https://www.ibrahimshaath.co.uk/keyfinder/
+.. _KeyFinder: http://www.ibrahimshaath.co.uk/keyfinder/
.. _keyfinder-cli: https://github.com/EvanPurkhiser/keyfinder-cli/
diff --git a/docs/plugins/kodiupdate.rst b/docs/plugins/kodiupdate.rst
index e60f503f2a..f521a80004 100644
--- a/docs/plugins/kodiupdate.rst
+++ b/docs/plugins/kodiupdate.rst
@@ -27,7 +27,7 @@ With that all in place, you'll see beets send the "update" command to your Kodi
host every time you change your beets library.
.. _Kodi: https://kodi.tv/
-.. _requests: https://docs.python-requests.org/en/latest/
+.. _requests: https://requests.readthedocs.io/en/master/
Configuration
-------------
diff --git a/docs/plugins/lastgenre.rst b/docs/plugins/lastgenre.rst
index 5fcdd2254a..dee4260de3 100644
--- a/docs/plugins/lastgenre.rst
+++ b/docs/plugins/lastgenre.rst
@@ -1,13 +1,10 @@
LastGenre Plugin
================
-The MusicBrainz database `does not contain genre information`_. Therefore, when
-importing and autotagging music, beets does not assign a genre. The
-``lastgenre`` plugin fetches *tags* from `Last.fm`_ and assigns them as genres
+
+The ``lastgenre`` plugin fetches *tags* from `Last.fm`_ and assigns them as genres
to your albums and items.
-.. _does not contain genre information:
- https://musicbrainz.org/doc/General_FAQ#Why_does_MusicBrainz_not_support_genre_information.3F
.. _Last.fm: https://last.fm/
Installation
@@ -72,7 +69,7 @@ nothing would ever be matched to a more generic node since all the specific
subgenres are in the whitelist to begin with.
-.. _YAML: https://www.yaml.org/
+.. _YAML: https://yaml.org/
.. _tree of nested genre names: https://raw.githubusercontent.com/beetbox/beets/master/beetsplug/lastgenre/genres-tree.yaml
diff --git a/docs/plugins/lyrics.rst b/docs/plugins/lyrics.rst
index fac07ad872..942497a7c9 100644
--- a/docs/plugins/lyrics.rst
+++ b/docs/plugins/lyrics.rst
@@ -26,7 +26,7 @@ already have them. The lyrics will be stored in the beets database. If the
``import.write`` config option is on, then the lyrics will also be written to
the files' tags.
-.. _requests: https://docs.python-requests.org/en/latest/
+.. _requests: https://requests.readthedocs.io/en/master/
Configuration
@@ -180,8 +180,7 @@ You also need to register for a Microsoft Azure Marketplace free account and
to the `Microsoft Translator API`_. Follow the four steps process, specifically
at step 3 enter ``beets`` as *Client ID* and copy/paste the generated
*Client secret* into your ``bing_client_secret`` configuration, alongside
-``bing_lang_to`` target `language code`_.
+``bing_lang_to`` target `language code`.
.. _langdetect: https://pypi.python.org/pypi/langdetect
-.. _Microsoft Translator API: https://www.microsoft.com/en-us/translator/getstarted.aspx
-.. _language code: https://msdn.microsoft.com/en-us/library/hh456380.aspx
+.. _Microsoft Translator API: https://docs.microsoft.com/en-us/azure/cognitive-services/translator/translator-how-to-signup
diff --git a/docs/plugins/plexupdate.rst b/docs/plugins/plexupdate.rst
index 92fc949d25..b6a2bf9207 100644
--- a/docs/plugins/plexupdate.rst
+++ b/docs/plugins/plexupdate.rst
@@ -25,7 +25,7 @@ With that all in place, you'll see beets send the "update" command to your Plex
server every time you change your beets library.
.. _Plex: https://plex.tv/
-.. _requests: https://docs.python-requests.org/en/latest/
+.. _requests: https://requests.readthedocs.io/en/master/
.. _documentation about tokens: https://support.plex.tv/hc/en-us/articles/204059436-Finding-your-account-token-X-Plex-Token
Configuration
diff --git a/docs/plugins/subsonicupdate.rst b/docs/plugins/subsonicupdate.rst
index 3549be091d..710d21f2cd 100644
--- a/docs/plugins/subsonicupdate.rst
+++ b/docs/plugins/subsonicupdate.rst
@@ -4,7 +4,7 @@ SubsonicUpdate Plugin
``subsonicupdate`` is a very simple plugin for beets that lets you automatically
update `Subsonic`_'s index whenever you change your beets library.
-.. _Subsonic: https://www.subsonic.org
+.. _Subsonic: http://www.subsonic.org/pages/index.jsp
To use ``subsonicupdate`` plugin, enable it in your configuration
(see :ref:`using-plugins`).
diff --git a/docs/plugins/web.rst b/docs/plugins/web.rst
index 65d4743fb7..85de48dd43 100644
--- a/docs/plugins/web.rst
+++ b/docs/plugins/web.rst
@@ -19,8 +19,6 @@ The Web interface depends on `Flask`_. To get it, just run ``pip install
flask``. Then enable the ``web`` plugin in your configuration (see
:ref:`using-plugins`).
-.. _Flask: https://flask.pocoo.org/
-
If you need CORS (it's disabled by default---see :ref:`web-cors`, below), then
you also need `flask-cors`_. Just type ``pip install flask-cors``.
@@ -47,9 +45,7 @@ Usage
-----
Type queries into the little search box. Double-click a track to play it with
-`HTML5 Audio`_.
-
-.. _HTML5 Audio: http://www.w3.org/TR/html-markup/audio.html
+HTML5 Audio.
Configuration
-------------
@@ -78,7 +74,7 @@ The Web backend is built using a simple REST+JSON API with the excellent
`Flask`_ library. The frontend is a single-page application written with
`Backbone.js`_. This allows future non-Web clients to use the same backend API.
-.. _Flask: https://flask.pocoo.org/
+
.. _Backbone.js: https://backbonejs.org
Eventually, to make the Web player really viable, we should use a Flash fallback
@@ -90,7 +86,7 @@ for unsupported formats/browsers. There are a number of options for this:
.. _audio.js: https://kolber.github.io/audiojs/
.. _html5media: https://html5media.info/
-.. _MediaElement.js: https://mediaelementjs.com/
+.. _MediaElement.js: https://www.mediaelementjs.com/
.. _web-cors:
@@ -262,3 +258,5 @@ Responds with the number of tracks and albums in the database. ::
"items": 5,
"albums": 3
}
+
+.. _Flask: https://flask.palletsprojects.com/en/1.1.x/
diff --git a/docs/reference/config.rst b/docs/reference/config.rst
index 46f14f2c5d..2f8cee3c9c 100644
--- a/docs/reference/config.rst
+++ b/docs/reference/config.rst
@@ -689,7 +689,7 @@ to one request per second.
.. _your own MusicBrainz database: https://musicbrainz.org/doc/MusicBrainz_Server/Setup
.. _main server: https://musicbrainz.org/
.. _limited: https://musicbrainz.org/doc/XML_Web_Service/Rate_Limiting
-.. _Building search indexes: https://musicbrainz.org/doc/MusicBrainz_Server/Setup#Building_search_indexes
+.. _Building search indexes: https://musicbrainz.org/doc/Development/Search_server_setup
.. _searchlimit:
diff --git a/tox.ini b/tox.ini
index cbf9530334..69308235dd 100644
--- a/tox.ini
+++ b/tox.ini
@@ -27,6 +27,12 @@ basepython = python2.7
deps = sphinx
commands = sphinx-build -W -q -b html docs {envtmpdir}/html {posargs}
+# checks all links in the docs
+[testenv:links]
+deps = sphinx
+allowlist_externals = /bin/bash
+commands = /bin/bash -c '! sphinx-build -b linkcheck docs {envtmpdir}/linkcheck | grep "broken\s"'
+
[testenv:int]
deps = {[_test]deps}
setenv = INTEGRATION_TEST = 1
|
bookwyrm-social__bookwyrm-878 | Can't create Invites
**Describe the bug**
When creating a new invite, the following appears:

Rest of the page is blank.
It appeared since the last update I did a few days ago (don't know at which commit exactly, sorry) and didn't change with the last one.
**Additional context**
It doesn't matter what I set for Expiry and Use limit.
Also, there's an invite in the list that has "Max uses: None" that I'm not sure where it comes from.
| [
{
"content": "\"\"\" using django model forms \"\"\"\nimport datetime\nfrom collections import defaultdict\n\nfrom django import forms\nfrom django.forms import ModelForm, PasswordInput, widgets\nfrom django.forms.widgets import Textarea\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\n\nfrom bookwyrm import models\n\n\nclass CustomForm(ModelForm):\n \"\"\" add css classes to the forms \"\"\"\n\n def __init__(self, *args, **kwargs):\n css_classes = defaultdict(lambda: \"\")\n css_classes[\"text\"] = \"input\"\n css_classes[\"password\"] = \"input\"\n css_classes[\"email\"] = \"input\"\n css_classes[\"number\"] = \"input\"\n css_classes[\"checkbox\"] = \"checkbox\"\n css_classes[\"textarea\"] = \"textarea\"\n super(CustomForm, self).__init__(*args, **kwargs)\n for visible in self.visible_fields():\n if hasattr(visible.field.widget, \"input_type\"):\n input_type = visible.field.widget.input_type\n if isinstance(visible.field.widget, Textarea):\n input_type = \"textarea\"\n visible.field.widget.attrs[\"cols\"] = None\n visible.field.widget.attrs[\"rows\"] = None\n visible.field.widget.attrs[\"class\"] = css_classes[input_type]\n\n\n# pylint: disable=missing-class-docstring\nclass LoginForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\"localname\", \"password\"]\n help_texts = {f: None for f in fields}\n widgets = {\n \"password\": PasswordInput(),\n }\n\n\nclass RegisterForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\"localname\", \"email\", \"password\"]\n help_texts = {f: None for f in fields}\n widgets = {\"password\": PasswordInput()}\n\n\nclass RatingForm(CustomForm):\n class Meta:\n model = models.ReviewRating\n fields = [\"user\", \"book\", \"rating\", \"privacy\"]\n\n\nclass ReviewForm(CustomForm):\n class Meta:\n model = models.Review\n fields = [\n \"user\",\n \"book\",\n \"name\",\n \"content\",\n \"rating\",\n \"content_warning\",\n \"sensitive\",\n \"privacy\",\n ]\n\n\nclass CommentForm(CustomForm):\n class Meta:\n model = models.Comment\n fields = [\n \"user\",\n \"book\",\n \"content\",\n \"content_warning\",\n \"sensitive\",\n \"privacy\",\n \"progress\",\n \"progress_mode\",\n ]\n\n\nclass QuotationForm(CustomForm):\n class Meta:\n model = models.Quotation\n fields = [\n \"user\",\n \"book\",\n \"quote\",\n \"content\",\n \"content_warning\",\n \"sensitive\",\n \"privacy\",\n ]\n\n\nclass ReplyForm(CustomForm):\n class Meta:\n model = models.Status\n fields = [\n \"user\",\n \"content\",\n \"content_warning\",\n \"sensitive\",\n \"reply_parent\",\n \"privacy\",\n ]\n\n\nclass StatusForm(CustomForm):\n class Meta:\n model = models.Status\n fields = [\"user\", \"content\", \"content_warning\", \"sensitive\", \"privacy\"]\n\n\nclass EditUserForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\n \"avatar\",\n \"name\",\n \"email\",\n \"summary\",\n \"show_goal\",\n \"manually_approves_followers\",\n \"discoverable\",\n \"preferred_timezone\",\n ]\n help_texts = {f: None for f in fields}\n\n\nclass LimitedEditUserForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\n \"avatar\",\n \"name\",\n \"summary\",\n \"manually_approves_followers\",\n \"discoverable\",\n ]\n help_texts = {f: None for f in fields}\n\n\nclass TagForm(CustomForm):\n class Meta:\n model = models.Tag\n fields = [\"name\"]\n help_texts = {f: None for f in fields}\n labels = {\"name\": \"Add a tag\"}\n\n\nclass CoverForm(CustomForm):\n class Meta:\n model = models.Book\n fields = [\"cover\"]\n help_texts = {f: None for f in fields}\n\n\nclass EditionForm(CustomForm):\n class Meta:\n model = models.Edition\n exclude = [\n \"remote_id\",\n \"origin_id\",\n \"created_date\",\n \"updated_date\",\n \"edition_rank\",\n \"authors\",\n \"parent_work\",\n \"shelves\",\n \"subjects\", # TODO\n \"subject_places\", # TODO\n \"connector\",\n ]\n\n\nclass AuthorForm(CustomForm):\n class Meta:\n model = models.Author\n exclude = [\n \"remote_id\",\n \"origin_id\",\n \"created_date\",\n \"updated_date\",\n ]\n\n\nclass ImportForm(forms.Form):\n csv_file = forms.FileField()\n\n\nclass ExpiryWidget(widgets.Select):\n def value_from_datadict(self, data, files, name):\n \"\"\" human-readable exiration time buckets \"\"\"\n selected_string = super().value_from_datadict(data, files, name)\n\n if selected_string == \"day\":\n interval = datetime.timedelta(days=1)\n elif selected_string == \"week\":\n interval = datetime.timedelta(days=7)\n elif selected_string == \"month\":\n interval = datetime.timedelta(days=31) # Close enough?\n elif selected_string == \"forever\":\n return None\n else:\n return selected_string # \"This will raise\n\n return timezone.now() + interval\n\n\nclass InviteRequestForm(CustomForm):\n def clean(self):\n \"\"\" make sure the email isn't in use by a registered user \"\"\"\n cleaned_data = super().clean()\n email = cleaned_data.get(\"email\")\n if email and models.User.objects.filter(email=email).exists():\n self.add_error(\"email\", _(\"A user with this email already exists.\"))\n\n class Meta:\n model = models.InviteRequest\n fields = [\"email\"]\n\n\nclass CreateInviteForm(CustomForm):\n class Meta:\n model = models.SiteInvite\n exclude = [\"code\", \"user\", \"times_used\"]\n widgets = {\n \"expiry\": ExpiryWidget(\n choices=[\n (\"day\", _(\"One Day\")),\n (\"week\", _(\"One Week\")),\n (\"month\", _(\"One Month\")),\n (\"forever\", _(\"Does Not Expire\")),\n ]\n ),\n \"use_limit\": widgets.Select(\n choices=[\n (i, _(\"%(count)d uses\" % {\"count\": i}))\n for i in [1, 5, 10, 25, 50, 100]\n ]\n + [(None, _(\"Unlimited\"))]\n ),\n }\n\n\nclass ShelfForm(CustomForm):\n class Meta:\n model = models.Shelf\n fields = [\"user\", \"name\", \"privacy\"]\n\n\nclass GoalForm(CustomForm):\n class Meta:\n model = models.AnnualGoal\n fields = [\"user\", \"year\", \"goal\", \"privacy\"]\n\n\nclass SiteForm(CustomForm):\n class Meta:\n model = models.SiteSettings\n exclude = []\n\n\nclass ListForm(CustomForm):\n class Meta:\n model = models.List\n fields = [\"user\", \"name\", \"description\", \"curation\", \"privacy\"]\n\n\nclass ReportForm(CustomForm):\n class Meta:\n model = models.Report\n fields = [\"user\", \"reporter\", \"statuses\", \"note\"]\n",
"path": "bookwyrm/forms.py"
}
] | [
{
"content": "\"\"\" using django model forms \"\"\"\nimport datetime\nfrom collections import defaultdict\n\nfrom django import forms\nfrom django.forms import ModelForm, PasswordInput, widgets\nfrom django.forms.widgets import Textarea\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\n\nfrom bookwyrm import models\n\n\nclass CustomForm(ModelForm):\n \"\"\" add css classes to the forms \"\"\"\n\n def __init__(self, *args, **kwargs):\n css_classes = defaultdict(lambda: \"\")\n css_classes[\"text\"] = \"input\"\n css_classes[\"password\"] = \"input\"\n css_classes[\"email\"] = \"input\"\n css_classes[\"number\"] = \"input\"\n css_classes[\"checkbox\"] = \"checkbox\"\n css_classes[\"textarea\"] = \"textarea\"\n super(CustomForm, self).__init__(*args, **kwargs)\n for visible in self.visible_fields():\n if hasattr(visible.field.widget, \"input_type\"):\n input_type = visible.field.widget.input_type\n if isinstance(visible.field.widget, Textarea):\n input_type = \"textarea\"\n visible.field.widget.attrs[\"cols\"] = None\n visible.field.widget.attrs[\"rows\"] = None\n visible.field.widget.attrs[\"class\"] = css_classes[input_type]\n\n\n# pylint: disable=missing-class-docstring\nclass LoginForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\"localname\", \"password\"]\n help_texts = {f: None for f in fields}\n widgets = {\n \"password\": PasswordInput(),\n }\n\n\nclass RegisterForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\"localname\", \"email\", \"password\"]\n help_texts = {f: None for f in fields}\n widgets = {\"password\": PasswordInput()}\n\n\nclass RatingForm(CustomForm):\n class Meta:\n model = models.ReviewRating\n fields = [\"user\", \"book\", \"rating\", \"privacy\"]\n\n\nclass ReviewForm(CustomForm):\n class Meta:\n model = models.Review\n fields = [\n \"user\",\n \"book\",\n \"name\",\n \"content\",\n \"rating\",\n \"content_warning\",\n \"sensitive\",\n \"privacy\",\n ]\n\n\nclass CommentForm(CustomForm):\n class Meta:\n model = models.Comment\n fields = [\n \"user\",\n \"book\",\n \"content\",\n \"content_warning\",\n \"sensitive\",\n \"privacy\",\n \"progress\",\n \"progress_mode\",\n ]\n\n\nclass QuotationForm(CustomForm):\n class Meta:\n model = models.Quotation\n fields = [\n \"user\",\n \"book\",\n \"quote\",\n \"content\",\n \"content_warning\",\n \"sensitive\",\n \"privacy\",\n ]\n\n\nclass ReplyForm(CustomForm):\n class Meta:\n model = models.Status\n fields = [\n \"user\",\n \"content\",\n \"content_warning\",\n \"sensitive\",\n \"reply_parent\",\n \"privacy\",\n ]\n\n\nclass StatusForm(CustomForm):\n class Meta:\n model = models.Status\n fields = [\"user\", \"content\", \"content_warning\", \"sensitive\", \"privacy\"]\n\n\nclass EditUserForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\n \"avatar\",\n \"name\",\n \"email\",\n \"summary\",\n \"show_goal\",\n \"manually_approves_followers\",\n \"discoverable\",\n \"preferred_timezone\",\n ]\n help_texts = {f: None for f in fields}\n\n\nclass LimitedEditUserForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\n \"avatar\",\n \"name\",\n \"summary\",\n \"manually_approves_followers\",\n \"discoverable\",\n ]\n help_texts = {f: None for f in fields}\n\n\nclass TagForm(CustomForm):\n class Meta:\n model = models.Tag\n fields = [\"name\"]\n help_texts = {f: None for f in fields}\n labels = {\"name\": \"Add a tag\"}\n\n\nclass CoverForm(CustomForm):\n class Meta:\n model = models.Book\n fields = [\"cover\"]\n help_texts = {f: None for f in fields}\n\n\nclass EditionForm(CustomForm):\n class Meta:\n model = models.Edition\n exclude = [\n \"remote_id\",\n \"origin_id\",\n \"created_date\",\n \"updated_date\",\n \"edition_rank\",\n \"authors\",\n \"parent_work\",\n \"shelves\",\n \"subjects\", # TODO\n \"subject_places\", # TODO\n \"connector\",\n ]\n\n\nclass AuthorForm(CustomForm):\n class Meta:\n model = models.Author\n exclude = [\n \"remote_id\",\n \"origin_id\",\n \"created_date\",\n \"updated_date\",\n ]\n\n\nclass ImportForm(forms.Form):\n csv_file = forms.FileField()\n\n\nclass ExpiryWidget(widgets.Select):\n def value_from_datadict(self, data, files, name):\n \"\"\" human-readable exiration time buckets \"\"\"\n selected_string = super().value_from_datadict(data, files, name)\n\n if selected_string == \"day\":\n interval = datetime.timedelta(days=1)\n elif selected_string == \"week\":\n interval = datetime.timedelta(days=7)\n elif selected_string == \"month\":\n interval = datetime.timedelta(days=31) # Close enough?\n elif selected_string == \"forever\":\n return None\n else:\n return selected_string # \"This will raise\n\n return timezone.now() + interval\n\n\nclass InviteRequestForm(CustomForm):\n def clean(self):\n \"\"\" make sure the email isn't in use by a registered user \"\"\"\n cleaned_data = super().clean()\n email = cleaned_data.get(\"email\")\n if email and models.User.objects.filter(email=email).exists():\n self.add_error(\"email\", _(\"A user with this email already exists.\"))\n\n class Meta:\n model = models.InviteRequest\n fields = [\"email\"]\n\n\nclass CreateInviteForm(CustomForm):\n class Meta:\n model = models.SiteInvite\n exclude = [\"code\", \"user\", \"times_used\", \"invitees\"]\n widgets = {\n \"expiry\": ExpiryWidget(\n choices=[\n (\"day\", _(\"One Day\")),\n (\"week\", _(\"One Week\")),\n (\"month\", _(\"One Month\")),\n (\"forever\", _(\"Does Not Expire\")),\n ]\n ),\n \"use_limit\": widgets.Select(\n choices=[\n (i, _(\"%(count)d uses\" % {\"count\": i}))\n for i in [1, 5, 10, 25, 50, 100]\n ]\n + [(None, _(\"Unlimited\"))]\n ),\n }\n\n\nclass ShelfForm(CustomForm):\n class Meta:\n model = models.Shelf\n fields = [\"user\", \"name\", \"privacy\"]\n\n\nclass GoalForm(CustomForm):\n class Meta:\n model = models.AnnualGoal\n fields = [\"user\", \"year\", \"goal\", \"privacy\"]\n\n\nclass SiteForm(CustomForm):\n class Meta:\n model = models.SiteSettings\n exclude = []\n\n\nclass ListForm(CustomForm):\n class Meta:\n model = models.List\n fields = [\"user\", \"name\", \"description\", \"curation\", \"privacy\"]\n\n\nclass ReportForm(CustomForm):\n class Meta:\n model = models.Report\n fields = [\"user\", \"reporter\", \"statuses\", \"note\"]\n",
"path": "bookwyrm/forms.py"
}
] | diff --git a/bookwyrm/forms.py b/bookwyrm/forms.py
index 1a114e05f5..b159a89ef5 100644
--- a/bookwyrm/forms.py
+++ b/bookwyrm/forms.py
@@ -233,7 +233,7 @@ class Meta:
class CreateInviteForm(CustomForm):
class Meta:
model = models.SiteInvite
- exclude = ["code", "user", "times_used"]
+ exclude = ["code", "user", "times_used", "invitees"]
widgets = {
"expiry": ExpiryWidget(
choices=[
|
espnet__espnet-3022 | Error with using compute-fbank-feats.py
Hello! I tried to use script compute-fbank-feats.py to compute fbank features from wav, and tried to use it according to its documentation https://espnet.github.io/espnet/apis/utils_py.html#compute-fbank-feats-py
as folllows:
```
python3.7 utils/compute-fbank-feats.py scp:wav.scp ark:out.ark
```
but got an error:
```
File "utils/compute-fbank-feats.py", line 134, in <module>
main()
File "utils/compute-fbank-feats.py", line 128, in main
fmax=args.fmax,
File "/home/karina/.local/lib/python3.7/site-packages/espnet/transform/spectrogram.py", line 116, in logmelspectrogram
x_stft, fs=fs, n_mels=n_mels, n_fft=n_fft, fmin=fmin, fmax=fmax, eps=eps
File "/home/karina/.local/lib/python3.7/site-packages/espnet/transform/spectrogram.py", line 74, in stft2logmelspectrogram
fmax = fs / 2 if fmax is None else fmax
TypeError: unsupported operand type(s) for /: 'NoneType' and 'int'
```
wav.scp contains this text:
```
0 test.wav
```
Does anyone have ideas how to solve this error?
| [
{
"content": "#!/usr/bin/env python3\n\n# Copyright 2018 Nagoya University (Tomoki Hayashi)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\nimport argparse\nfrom distutils.util import strtobool\nimport logging\n\nimport kaldiio\nimport numpy\nimport resampy\n\nfrom espnet.transform.spectrogram import logmelspectrogram\nfrom espnet.utils.cli_utils import get_commandline_args\nfrom espnet.utils.cli_writers import file_writer_helper\nfrom espnet2.utils.types import int_or_none\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(\n description=\"compute FBANK feature from WAV\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\"--fs\", type=int_or_none, help=\"Sampling frequency\")\n parser.add_argument(\n \"--fmax\", type=int_or_none, default=None, nargs=\"?\", help=\"Maximum frequency\"\n )\n parser.add_argument(\n \"--fmin\", type=int_or_none, default=None, nargs=\"?\", help=\"Minimum frequency\"\n )\n parser.add_argument(\"--n_mels\", type=int, default=80, help=\"Number of mel basis\")\n parser.add_argument(\"--n_fft\", type=int, default=1024, help=\"FFT length in point\")\n parser.add_argument(\n \"--n_shift\", type=int, default=512, help=\"Shift length in point\"\n )\n parser.add_argument(\n \"--win_length\",\n type=int_or_none,\n default=None,\n nargs=\"?\",\n help=\"Analisys window length in point\",\n )\n parser.add_argument(\n \"--window\",\n type=str,\n default=\"hann\",\n choices=[\"hann\", \"hamming\"],\n help=\"Type of window\",\n )\n parser.add_argument(\n \"--write-num-frames\", type=str, help=\"Specify wspecifer for utt2num_frames\"\n )\n parser.add_argument(\n \"--filetype\",\n type=str,\n default=\"mat\",\n choices=[\"mat\", \"hdf5\"],\n help=\"Specify the file format for output. \"\n '\"mat\" is the matrix format in kaldi',\n )\n parser.add_argument(\n \"--compress\", type=strtobool, default=False, help=\"Save in compressed format\"\n )\n parser.add_argument(\n \"--compression-method\",\n type=int,\n default=2,\n help=\"Specify the method(if mat) or \" \"gzip-level(if hdf5)\",\n )\n parser.add_argument(\"--verbose\", \"-V\", default=0, type=int, help=\"Verbose option\")\n parser.add_argument(\n \"--normalize\",\n choices=[1, 16, 24, 32],\n type=int,\n default=None,\n help=\"Give the bit depth of the PCM, \"\n \"then normalizes data to scale in [-1,1]\",\n )\n parser.add_argument(\"rspecifier\", type=str, help=\"WAV scp file\")\n parser.add_argument(\n \"--segments\",\n type=str,\n help=\"segments-file format: each line is either\"\n \"<segment-id> <recording-id> <start-time> <end-time>\"\n \"e.g. call-861225-A-0050-0065 call-861225-A 5.0 6.5\",\n )\n parser.add_argument(\"wspecifier\", type=str, help=\"Write specifier\")\n return parser\n\n\ndef main():\n parser = get_parser()\n args = parser.parse_args()\n\n logfmt = \"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\"\n if args.verbose > 0:\n logging.basicConfig(level=logging.INFO, format=logfmt)\n else:\n logging.basicConfig(level=logging.WARN, format=logfmt)\n logging.info(get_commandline_args())\n\n with kaldiio.ReadHelper(\n args.rspecifier, segments=args.segments\n ) as reader, file_writer_helper(\n args.wspecifier,\n filetype=args.filetype,\n write_num_frames=args.write_num_frames,\n compress=args.compress,\n compression_method=args.compression_method,\n ) as writer:\n for utt_id, (rate, array) in reader:\n array = array.astype(numpy.float32)\n if args.fs is not None and rate != args.fs:\n array = resampy.resample(array, rate, args.fs, axis=0)\n if args.normalize is not None and args.normalize != 1:\n array = array / (1 << (args.normalize - 1))\n\n lmspc = logmelspectrogram(\n x=array,\n fs=args.fs,\n n_mels=args.n_mels,\n n_fft=args.n_fft,\n n_shift=args.n_shift,\n win_length=args.win_length,\n window=args.window,\n fmin=args.fmin,\n fmax=args.fmax,\n )\n writer[utt_id] = lmspc\n\n\nif __name__ == \"__main__\":\n main()\n",
"path": "utils/compute-fbank-feats.py"
}
] | [
{
"content": "#!/usr/bin/env python3\n\n# Copyright 2018 Nagoya University (Tomoki Hayashi)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\nimport argparse\nfrom distutils.util import strtobool\nimport logging\n\nimport kaldiio\nimport numpy\nimport resampy\n\nfrom espnet.transform.spectrogram import logmelspectrogram\nfrom espnet.utils.cli_utils import get_commandline_args\nfrom espnet.utils.cli_writers import file_writer_helper\nfrom espnet2.utils.types import int_or_none\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(\n description=\"compute FBANK feature from WAV\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\"--fs\", type=int_or_none, help=\"Sampling frequency\")\n parser.add_argument(\n \"--fmax\", type=int_or_none, default=None, nargs=\"?\", help=\"Maximum frequency\"\n )\n parser.add_argument(\n \"--fmin\", type=int_or_none, default=None, nargs=\"?\", help=\"Minimum frequency\"\n )\n parser.add_argument(\"--n_mels\", type=int, default=80, help=\"Number of mel basis\")\n parser.add_argument(\"--n_fft\", type=int, default=1024, help=\"FFT length in point\")\n parser.add_argument(\n \"--n_shift\", type=int, default=512, help=\"Shift length in point\"\n )\n parser.add_argument(\n \"--win_length\",\n type=int_or_none,\n default=None,\n nargs=\"?\",\n help=\"Analisys window length in point\",\n )\n parser.add_argument(\n \"--window\",\n type=str,\n default=\"hann\",\n choices=[\"hann\", \"hamming\"],\n help=\"Type of window\",\n )\n parser.add_argument(\n \"--write-num-frames\", type=str, help=\"Specify wspecifer for utt2num_frames\"\n )\n parser.add_argument(\n \"--filetype\",\n type=str,\n default=\"mat\",\n choices=[\"mat\", \"hdf5\"],\n help=\"Specify the file format for output. \"\n '\"mat\" is the matrix format in kaldi',\n )\n parser.add_argument(\n \"--compress\", type=strtobool, default=False, help=\"Save in compressed format\"\n )\n parser.add_argument(\n \"--compression-method\",\n type=int,\n default=2,\n help=\"Specify the method(if mat) or \" \"gzip-level(if hdf5)\",\n )\n parser.add_argument(\"--verbose\", \"-V\", default=0, type=int, help=\"Verbose option\")\n parser.add_argument(\n \"--normalize\",\n choices=[1, 16, 24, 32],\n type=int,\n default=None,\n help=\"Give the bit depth of the PCM, \"\n \"then normalizes data to scale in [-1,1]\",\n )\n parser.add_argument(\"rspecifier\", type=str, help=\"WAV scp file\")\n parser.add_argument(\n \"--segments\",\n type=str,\n help=\"segments-file format: each line is either\"\n \"<segment-id> <recording-id> <start-time> <end-time>\"\n \"e.g. call-861225-A-0050-0065 call-861225-A 5.0 6.5\",\n )\n parser.add_argument(\"wspecifier\", type=str, help=\"Write specifier\")\n return parser\n\n\ndef main():\n parser = get_parser()\n args = parser.parse_args()\n\n logfmt = \"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\"\n if args.verbose > 0:\n logging.basicConfig(level=logging.INFO, format=logfmt)\n else:\n logging.basicConfig(level=logging.WARN, format=logfmt)\n logging.info(get_commandline_args())\n\n with kaldiio.ReadHelper(\n args.rspecifier, segments=args.segments\n ) as reader, file_writer_helper(\n args.wspecifier,\n filetype=args.filetype,\n write_num_frames=args.write_num_frames,\n compress=args.compress,\n compression_method=args.compression_method,\n ) as writer:\n for utt_id, (rate, array) in reader:\n array = array.astype(numpy.float32)\n if args.fs is not None and rate != args.fs:\n array = resampy.resample(array, rate, args.fs, axis=0)\n if args.normalize is not None and args.normalize != 1:\n array = array / (1 << (args.normalize - 1))\n\n lmspc = logmelspectrogram(\n x=array,\n fs=args.fs if args.fs is not None else rate,\n n_mels=args.n_mels,\n n_fft=args.n_fft,\n n_shift=args.n_shift,\n win_length=args.win_length,\n window=args.window,\n fmin=args.fmin,\n fmax=args.fmax,\n )\n writer[utt_id] = lmspc\n\n\nif __name__ == \"__main__\":\n main()\n",
"path": "utils/compute-fbank-feats.py"
}
] | diff --git a/utils/compute-fbank-feats.py b/utils/compute-fbank-feats.py
index 72feb1800c6..d5defc7d899 100755
--- a/utils/compute-fbank-feats.py
+++ b/utils/compute-fbank-feats.py
@@ -118,7 +118,7 @@ def main():
lmspc = logmelspectrogram(
x=array,
- fs=args.fs,
+ fs=args.fs if args.fs is not None else rate,
n_mels=args.n_mels,
n_fft=args.n_fft,
n_shift=args.n_shift,
|
Textualize__rich-2216 | Missing `f` prefix on f-strings
Some strings looks like they're meant to be f-strings but are missing the `f` prefix meaning variable interpolation won't happen.
https://github.com/Textualize/rich/blob/c979a1b16f27285b03fdb14f5e364ea36d7eba01/rich/pretty.py#L369
I found this issue automatically. I'm a bot. Beep Boop 🦊. See other issues I found in your repo [here](https://codereview.doctor/Textualize/rich)
| [
{
"content": "import builtins\nimport collections\nimport dataclasses\nimport inspect\nimport os\nimport sys\nfrom array import array\nfrom collections import Counter, UserDict, UserList, defaultdict, deque\nfrom dataclasses import dataclass, fields, is_dataclass\nfrom inspect import isclass\nfrom itertools import islice\nfrom types import MappingProxyType\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n DefaultDict,\n Dict,\n Iterable,\n List,\n Optional,\n Sequence,\n Set,\n Tuple,\n Union,\n)\n\nfrom rich.repr import RichReprResult\n\ntry:\n import attr as _attr_module\n\n _has_attrs = True\nexcept ImportError: # pragma: no cover\n _has_attrs = False\n\nfrom . import get_console\nfrom ._loop import loop_last\nfrom ._pick import pick_bool\nfrom .abc import RichRenderable\nfrom .cells import cell_len\nfrom .highlighter import ReprHighlighter\nfrom .jupyter import JupyterMixin, JupyterRenderable\nfrom .measure import Measurement\nfrom .text import Text\n\nif TYPE_CHECKING:\n from .console import (\n Console,\n ConsoleOptions,\n HighlighterType,\n JustifyMethod,\n OverflowMethod,\n RenderResult,\n )\n\n\ndef _is_attr_object(obj: Any) -> bool:\n \"\"\"Check if an object was created with attrs module.\"\"\"\n return _has_attrs and _attr_module.has(type(obj))\n\n\ndef _get_attr_fields(obj: Any) -> Sequence[\"_attr_module.Attribute[Any]\"]:\n \"\"\"Get fields for an attrs object.\"\"\"\n return _attr_module.fields(type(obj)) if _has_attrs else []\n\n\ndef _is_dataclass_repr(obj: object) -> bool:\n \"\"\"Check if an instance of a dataclass contains the default repr.\n\n Args:\n obj (object): A dataclass instance.\n\n Returns:\n bool: True if the default repr is used, False if there is a custom repr.\n \"\"\"\n # Digging in to a lot of internals here\n # Catching all exceptions in case something is missing on a non CPython implementation\n try:\n return obj.__repr__.__code__.co_filename == dataclasses.__file__\n except Exception: # pragma: no coverage\n return False\n\n\n_dummy_namedtuple = collections.namedtuple(\"_dummy_namedtuple\", [])\n\n\ndef _has_default_namedtuple_repr(obj: object) -> bool:\n \"\"\"Check if an instance of namedtuple contains the default repr\n\n Args:\n obj (object): A namedtuple\n\n Returns:\n bool: True if the default repr is used, False if there's a custom repr.\n \"\"\"\n obj_file = None\n try:\n obj_file = inspect.getfile(obj.__repr__)\n except (OSError, TypeError):\n # OSError handles case where object is defined in __main__ scope, e.g. REPL - no filename available.\n # TypeError trapped defensively, in case of object without filename slips through.\n pass\n default_repr_file = inspect.getfile(_dummy_namedtuple.__repr__)\n return obj_file == default_repr_file\n\n\ndef _ipy_display_hook(\n value: Any,\n console: Optional[\"Console\"] = None,\n overflow: \"OverflowMethod\" = \"ignore\",\n crop: bool = False,\n indent_guides: bool = False,\n max_length: Optional[int] = None,\n max_string: Optional[int] = None,\n expand_all: bool = False,\n) -> None:\n from .console import ConsoleRenderable # needed here to prevent circular import\n\n # always skip rich generated jupyter renderables or None values\n if _safe_isinstance(value, JupyterRenderable) or value is None:\n return\n\n console = console or get_console()\n if console.is_jupyter:\n # Delegate rendering to IPython if the object (and IPython) supports it\n # https://ipython.readthedocs.io/en/stable/config/integrating.html#rich-display\n ipython_repr_methods = [\n \"_repr_html_\",\n \"_repr_markdown_\",\n \"_repr_json_\",\n \"_repr_latex_\",\n \"_repr_jpeg_\",\n \"_repr_png_\",\n \"_repr_svg_\",\n \"_repr_mimebundle_\",\n ]\n for repr_method in ipython_repr_methods:\n method = getattr(value, repr_method, None)\n if inspect.ismethod(method):\n # Calling the method ourselves isn't ideal. The interface for the `_repr_*_` methods\n # specifies that if they return None, then they should not be rendered\n # by the notebook.\n try:\n repr_result = method()\n except Exception:\n continue # If the method raises, treat it as if it doesn't exist, try any others\n if repr_result is not None:\n return # Delegate rendering to IPython\n\n # certain renderables should start on a new line\n if _safe_isinstance(value, ConsoleRenderable):\n console.line()\n\n console.print(\n value\n if _safe_isinstance(value, RichRenderable)\n else Pretty(\n value,\n overflow=overflow,\n indent_guides=indent_guides,\n max_length=max_length,\n max_string=max_string,\n expand_all=expand_all,\n margin=12,\n ),\n crop=crop,\n new_line_start=True,\n )\n\n\ndef _safe_isinstance(\n obj: object, class_or_tuple: Union[type, Tuple[type, ...]]\n) -> bool:\n \"\"\"isinstance can fail in rare cases, for example types with no __class__\"\"\"\n try:\n return isinstance(obj, class_or_tuple)\n except Exception:\n return False\n\n\ndef install(\n console: Optional[\"Console\"] = None,\n overflow: \"OverflowMethod\" = \"ignore\",\n crop: bool = False,\n indent_guides: bool = False,\n max_length: Optional[int] = None,\n max_string: Optional[int] = None,\n expand_all: bool = False,\n) -> None:\n \"\"\"Install automatic pretty printing in the Python REPL.\n\n Args:\n console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.\n overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to \"ignore\".\n crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.\n indent_guides (bool, optional): Enable indentation guides. Defaults to False.\n max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.\n Defaults to None.\n max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.\n expand_all (bool, optional): Expand all containers. Defaults to False.\n max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.\n \"\"\"\n from rich import get_console\n\n console = console or get_console()\n assert console is not None\n\n def display_hook(value: Any) -> None:\n \"\"\"Replacement sys.displayhook which prettifies objects with Rich.\"\"\"\n if value is not None:\n assert console is not None\n builtins._ = None # type: ignore[attr-defined]\n console.print(\n value\n if _safe_isinstance(value, RichRenderable)\n else Pretty(\n value,\n overflow=overflow,\n indent_guides=indent_guides,\n max_length=max_length,\n max_string=max_string,\n expand_all=expand_all,\n ),\n crop=crop,\n )\n builtins._ = value # type: ignore[attr-defined]\n\n try: # pragma: no cover\n ip = get_ipython() # type: ignore[name-defined]\n from IPython.core.formatters import BaseFormatter\n\n class RichFormatter(BaseFormatter): # type: ignore[misc]\n pprint: bool = True\n\n def __call__(self, value: Any) -> Any:\n if self.pprint:\n return _ipy_display_hook(\n value,\n console=get_console(),\n overflow=overflow,\n indent_guides=indent_guides,\n max_length=max_length,\n max_string=max_string,\n expand_all=expand_all,\n )\n else:\n return repr(value)\n\n # replace plain text formatter with rich formatter\n rich_formatter = RichFormatter()\n ip.display_formatter.formatters[\"text/plain\"] = rich_formatter\n except Exception:\n sys.displayhook = display_hook\n\n\nclass Pretty(JupyterMixin):\n \"\"\"A rich renderable that pretty prints an object.\n\n Args:\n _object (Any): An object to pretty print.\n highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.\n indent_size (int, optional): Number of spaces in indent. Defaults to 4.\n justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.\n overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.\n no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.\n indent_guides (bool, optional): Enable indentation guides. Defaults to False.\n max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.\n Defaults to None.\n max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.\n max_depth (int, optional): Maximum depth of nested data structures, or None for no maximum. Defaults to None.\n expand_all (bool, optional): Expand all containers. Defaults to False.\n margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.\n insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.\n \"\"\"\n\n def __init__(\n self,\n _object: Any,\n highlighter: Optional[\"HighlighterType\"] = None,\n *,\n indent_size: int = 4,\n justify: Optional[\"JustifyMethod\"] = None,\n overflow: Optional[\"OverflowMethod\"] = None,\n no_wrap: Optional[bool] = False,\n indent_guides: bool = False,\n max_length: Optional[int] = None,\n max_string: Optional[int] = None,\n max_depth: Optional[int] = None,\n expand_all: bool = False,\n margin: int = 0,\n insert_line: bool = False,\n ) -> None:\n self._object = _object\n self.highlighter = highlighter or ReprHighlighter()\n self.indent_size = indent_size\n self.justify: Optional[\"JustifyMethod\"] = justify\n self.overflow: Optional[\"OverflowMethod\"] = overflow\n self.no_wrap = no_wrap\n self.indent_guides = indent_guides\n self.max_length = max_length\n self.max_string = max_string\n self.max_depth = max_depth\n self.expand_all = expand_all\n self.margin = margin\n self.insert_line = insert_line\n\n def __rich_console__(\n self, console: \"Console\", options: \"ConsoleOptions\"\n ) -> \"RenderResult\":\n pretty_str = pretty_repr(\n self._object,\n max_width=options.max_width - self.margin,\n indent_size=self.indent_size,\n max_length=self.max_length,\n max_string=self.max_string,\n max_depth=self.max_depth,\n expand_all=self.expand_all,\n )\n pretty_text = Text(\n pretty_str,\n justify=self.justify or options.justify,\n overflow=self.overflow or options.overflow,\n no_wrap=pick_bool(self.no_wrap, options.no_wrap),\n style=\"pretty\",\n )\n pretty_text = (\n self.highlighter(pretty_text)\n if pretty_text\n else Text(\n f\"{type(self._object)}.__repr__ returned empty string\",\n style=\"dim italic\",\n )\n )\n if self.indent_guides and not options.ascii_only:\n pretty_text = pretty_text.with_indent_guides(\n self.indent_size, style=\"repr.indent\"\n )\n if self.insert_line and \"\\n\" in pretty_text:\n yield \"\"\n yield pretty_text\n\n def __rich_measure__(\n self, console: \"Console\", options: \"ConsoleOptions\"\n ) -> \"Measurement\":\n pretty_str = pretty_repr(\n self._object,\n max_width=options.max_width,\n indent_size=self.indent_size,\n max_length=self.max_length,\n max_string=self.max_string,\n expand_all=self.expand_all,\n )\n text_width = (\n max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0\n )\n return Measurement(text_width, text_width)\n\n\ndef _get_braces_for_defaultdict(_object: DefaultDict[Any, Any]) -> Tuple[str, str, str]:\n return (\n f\"defaultdict({_object.default_factory!r}, {{\",\n \"})\",\n f\"defaultdict({_object.default_factory!r}, {{}})\",\n )\n\n\ndef _get_braces_for_array(_object: \"array[Any]\") -> Tuple[str, str, str]:\n return (f\"array({_object.typecode!r}, [\", \"])\", \"array({_object.typecode!r})\")\n\n\n_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {\n os._Environ: lambda _object: (\"environ({\", \"})\", \"environ({})\"),\n array: _get_braces_for_array,\n defaultdict: _get_braces_for_defaultdict,\n Counter: lambda _object: (\"Counter({\", \"})\", \"Counter()\"),\n deque: lambda _object: (\"deque([\", \"])\", \"deque()\"),\n dict: lambda _object: (\"{\", \"}\", \"{}\"),\n UserDict: lambda _object: (\"{\", \"}\", \"{}\"),\n frozenset: lambda _object: (\"frozenset({\", \"})\", \"frozenset()\"),\n list: lambda _object: (\"[\", \"]\", \"[]\"),\n UserList: lambda _object: (\"[\", \"]\", \"[]\"),\n set: lambda _object: (\"{\", \"}\", \"set()\"),\n tuple: lambda _object: (\"(\", \")\", \"()\"),\n MappingProxyType: lambda _object: (\"mappingproxy({\", \"})\", \"mappingproxy({})\"),\n}\n_CONTAINERS = tuple(_BRACES.keys())\n_MAPPING_CONTAINERS = (dict, os._Environ, MappingProxyType, UserDict)\n\n\ndef is_expandable(obj: Any) -> bool:\n \"\"\"Check if an object may be expanded by pretty print.\"\"\"\n return (\n _safe_isinstance(obj, _CONTAINERS)\n or (is_dataclass(obj))\n or (hasattr(obj, \"__rich_repr__\"))\n or _is_attr_object(obj)\n ) and not isclass(obj)\n\n\n@dataclass\nclass Node:\n \"\"\"A node in a repr tree. May be atomic or a container.\"\"\"\n\n key_repr: str = \"\"\n value_repr: str = \"\"\n open_brace: str = \"\"\n close_brace: str = \"\"\n empty: str = \"\"\n last: bool = False\n is_tuple: bool = False\n is_namedtuple: bool = False\n children: Optional[List[\"Node\"]] = None\n key_separator = \": \"\n separator: str = \", \"\n\n def iter_tokens(self) -> Iterable[str]:\n \"\"\"Generate tokens for this node.\"\"\"\n if self.key_repr:\n yield self.key_repr\n yield self.key_separator\n if self.value_repr:\n yield self.value_repr\n elif self.children is not None:\n if self.children:\n yield self.open_brace\n if self.is_tuple and not self.is_namedtuple and len(self.children) == 1:\n yield from self.children[0].iter_tokens()\n yield \",\"\n else:\n for child in self.children:\n yield from child.iter_tokens()\n if not child.last:\n yield self.separator\n yield self.close_brace\n else:\n yield self.empty\n\n def check_length(self, start_length: int, max_length: int) -> bool:\n \"\"\"Check the length fits within a limit.\n\n Args:\n start_length (int): Starting length of the line (indent, prefix, suffix).\n max_length (int): Maximum length.\n\n Returns:\n bool: True if the node can be rendered within max length, otherwise False.\n \"\"\"\n total_length = start_length\n for token in self.iter_tokens():\n total_length += cell_len(token)\n if total_length > max_length:\n return False\n return True\n\n def __str__(self) -> str:\n repr_text = \"\".join(self.iter_tokens())\n return repr_text\n\n def render(\n self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False\n ) -> str:\n \"\"\"Render the node to a pretty repr.\n\n Args:\n max_width (int, optional): Maximum width of the repr. Defaults to 80.\n indent_size (int, optional): Size of indents. Defaults to 4.\n expand_all (bool, optional): Expand all levels. Defaults to False.\n\n Returns:\n str: A repr string of the original object.\n \"\"\"\n lines = [_Line(node=self, is_root=True)]\n line_no = 0\n while line_no < len(lines):\n line = lines[line_no]\n if line.expandable and not line.expanded:\n if expand_all or not line.check_length(max_width):\n lines[line_no : line_no + 1] = line.expand(indent_size)\n line_no += 1\n\n repr_str = \"\\n\".join(str(line) for line in lines)\n return repr_str\n\n\n@dataclass\nclass _Line:\n \"\"\"A line in repr output.\"\"\"\n\n parent: Optional[\"_Line\"] = None\n is_root: bool = False\n node: Optional[Node] = None\n text: str = \"\"\n suffix: str = \"\"\n whitespace: str = \"\"\n expanded: bool = False\n last: bool = False\n\n @property\n def expandable(self) -> bool:\n \"\"\"Check if the line may be expanded.\"\"\"\n return bool(self.node is not None and self.node.children)\n\n def check_length(self, max_length: int) -> bool:\n \"\"\"Check this line fits within a given number of cells.\"\"\"\n start_length = (\n len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)\n )\n assert self.node is not None\n return self.node.check_length(start_length, max_length)\n\n def expand(self, indent_size: int) -> Iterable[\"_Line\"]:\n \"\"\"Expand this line by adding children on their own line.\"\"\"\n node = self.node\n assert node is not None\n whitespace = self.whitespace\n assert node.children\n if node.key_repr:\n new_line = yield _Line(\n text=f\"{node.key_repr}{node.key_separator}{node.open_brace}\",\n whitespace=whitespace,\n )\n else:\n new_line = yield _Line(text=node.open_brace, whitespace=whitespace)\n child_whitespace = self.whitespace + \" \" * indent_size\n tuple_of_one = node.is_tuple and len(node.children) == 1\n for last, child in loop_last(node.children):\n separator = \",\" if tuple_of_one else node.separator\n line = _Line(\n parent=new_line,\n node=child,\n whitespace=child_whitespace,\n suffix=separator,\n last=last and not tuple_of_one,\n )\n yield line\n\n yield _Line(\n text=node.close_brace,\n whitespace=whitespace,\n suffix=self.suffix,\n last=self.last,\n )\n\n def __str__(self) -> str:\n if self.last:\n return f\"{self.whitespace}{self.text}{self.node or ''}\"\n else:\n return (\n f\"{self.whitespace}{self.text}{self.node or ''}{self.suffix.rstrip()}\"\n )\n\n\ndef _is_namedtuple(obj: Any) -> bool:\n \"\"\"Checks if an object is most likely a namedtuple. It is possible\n to craft an object that passes this check and isn't a namedtuple, but\n there is only a minuscule chance of this happening unintentionally.\n\n Args:\n obj (Any): The object to test\n\n Returns:\n bool: True if the object is a namedtuple. False otherwise.\n \"\"\"\n try:\n fields = getattr(obj, \"_fields\", None)\n except Exception:\n # Being very defensive - if we cannot get the attr then its not a namedtuple\n return False\n return isinstance(obj, tuple) and isinstance(fields, tuple)\n\n\ndef traverse(\n _object: Any,\n max_length: Optional[int] = None,\n max_string: Optional[int] = None,\n max_depth: Optional[int] = None,\n) -> Node:\n \"\"\"Traverse object and generate a tree.\n\n Args:\n _object (Any): Object to be traversed.\n max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.\n Defaults to None.\n max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.\n Defaults to None.\n max_depth (int, optional): Maximum depth of data structures, or None for no maximum.\n Defaults to None.\n\n Returns:\n Node: The root of a tree structure which can be used to render a pretty repr.\n \"\"\"\n\n def to_repr(obj: Any) -> str:\n \"\"\"Get repr string for an object, but catch errors.\"\"\"\n if (\n max_string is not None\n and _safe_isinstance(obj, (bytes, str))\n and len(obj) > max_string\n ):\n truncated = len(obj) - max_string\n obj_repr = f\"{obj[:max_string]!r}+{truncated}\"\n else:\n try:\n obj_repr = repr(obj)\n except Exception as error:\n obj_repr = f\"<repr-error {str(error)!r}>\"\n return obj_repr\n\n visited_ids: Set[int] = set()\n push_visited = visited_ids.add\n pop_visited = visited_ids.remove\n\n def _traverse(obj: Any, root: bool = False, depth: int = 0) -> Node:\n \"\"\"Walk the object depth first.\"\"\"\n\n obj_type = type(obj)\n py_version = (sys.version_info.major, sys.version_info.minor)\n children: List[Node]\n reached_max_depth = max_depth is not None and depth >= max_depth\n\n def iter_rich_args(rich_args: Any) -> Iterable[Union[Any, Tuple[str, Any]]]:\n for arg in rich_args:\n if _safe_isinstance(arg, tuple):\n if len(arg) == 3:\n key, child, default = arg\n if default == child:\n continue\n yield key, child\n elif len(arg) == 2:\n key, child = arg\n yield key, child\n elif len(arg) == 1:\n yield arg[0]\n else:\n yield arg\n\n try:\n fake_attributes = hasattr(\n obj, \"awehoi234_wdfjwljet234_234wdfoijsdfmmnxpi492\"\n )\n except Exception:\n fake_attributes = False\n\n rich_repr_result: Optional[RichReprResult] = None\n if not fake_attributes:\n try:\n if hasattr(obj, \"__rich_repr__\") and not isclass(obj):\n rich_repr_result = obj.__rich_repr__()\n except Exception:\n pass\n\n if rich_repr_result is not None:\n angular = getattr(obj.__rich_repr__, \"angular\", False)\n args = list(iter_rich_args(rich_repr_result))\n class_name = obj.__class__.__name__\n\n if args:\n children = []\n append = children.append\n\n if reached_max_depth:\n node = Node(value_repr=f\"...\")\n else:\n if angular:\n node = Node(\n open_brace=f\"<{class_name} \",\n close_brace=\">\",\n children=children,\n last=root,\n separator=\" \",\n )\n else:\n node = Node(\n open_brace=f\"{class_name}(\",\n close_brace=\")\",\n children=children,\n last=root,\n )\n for last, arg in loop_last(args):\n if _safe_isinstance(arg, tuple):\n key, child = arg\n child_node = _traverse(child, depth=depth + 1)\n child_node.last = last\n child_node.key_repr = key\n child_node.key_separator = \"=\"\n append(child_node)\n else:\n child_node = _traverse(arg, depth=depth + 1)\n child_node.last = last\n append(child_node)\n else:\n node = Node(\n value_repr=f\"<{class_name}>\" if angular else f\"{class_name}()\",\n children=[],\n last=root,\n )\n elif _is_attr_object(obj) and not fake_attributes:\n children = []\n append = children.append\n\n attr_fields = _get_attr_fields(obj)\n if attr_fields:\n if reached_max_depth:\n node = Node(value_repr=f\"...\")\n else:\n node = Node(\n open_brace=f\"{obj.__class__.__name__}(\",\n close_brace=\")\",\n children=children,\n last=root,\n )\n\n def iter_attrs() -> Iterable[\n Tuple[str, Any, Optional[Callable[[Any], str]]]\n ]:\n \"\"\"Iterate over attr fields and values.\"\"\"\n for attr in attr_fields:\n if attr.repr:\n try:\n value = getattr(obj, attr.name)\n except Exception as error:\n # Can happen, albeit rarely\n yield (attr.name, error, None)\n else:\n yield (\n attr.name,\n value,\n attr.repr if callable(attr.repr) else None,\n )\n\n for last, (name, value, repr_callable) in loop_last(iter_attrs()):\n if repr_callable:\n child_node = Node(value_repr=str(repr_callable(value)))\n else:\n child_node = _traverse(value, depth=depth + 1)\n child_node.last = last\n child_node.key_repr = name\n child_node.key_separator = \"=\"\n append(child_node)\n else:\n node = Node(\n value_repr=f\"{obj.__class__.__name__}()\", children=[], last=root\n )\n\n elif (\n is_dataclass(obj)\n and not _safe_isinstance(obj, type)\n and not fake_attributes\n and (_is_dataclass_repr(obj) or py_version == (3, 6))\n ):\n obj_id = id(obj)\n if obj_id in visited_ids:\n # Recursion detected\n return Node(value_repr=\"...\")\n push_visited(obj_id)\n\n children = []\n append = children.append\n if reached_max_depth:\n node = Node(value_repr=f\"...\")\n else:\n node = Node(\n open_brace=f\"{obj.__class__.__name__}(\",\n close_brace=\")\",\n children=children,\n last=root,\n )\n\n for last, field in loop_last(\n field for field in fields(obj) if field.repr\n ):\n child_node = _traverse(getattr(obj, field.name), depth=depth + 1)\n child_node.key_repr = field.name\n child_node.last = last\n child_node.key_separator = \"=\"\n append(child_node)\n\n pop_visited(obj_id)\n elif _is_namedtuple(obj) and _has_default_namedtuple_repr(obj):\n if reached_max_depth:\n node = Node(value_repr=\"...\")\n else:\n children = []\n class_name = obj.__class__.__name__\n node = Node(\n open_brace=f\"{class_name}(\",\n close_brace=\")\",\n children=children,\n empty=f\"{class_name}()\",\n )\n append = children.append\n for last, (key, value) in loop_last(obj._asdict().items()):\n child_node = _traverse(value, depth=depth + 1)\n child_node.key_repr = key\n child_node.last = last\n child_node.key_separator = \"=\"\n append(child_node)\n elif _safe_isinstance(obj, _CONTAINERS):\n for container_type in _CONTAINERS:\n if _safe_isinstance(obj, container_type):\n obj_type = container_type\n break\n\n obj_id = id(obj)\n if obj_id in visited_ids:\n # Recursion detected\n return Node(value_repr=\"...\")\n push_visited(obj_id)\n\n open_brace, close_brace, empty = _BRACES[obj_type](obj)\n\n if reached_max_depth:\n node = Node(value_repr=f\"...\", last=root)\n elif obj_type.__repr__ != type(obj).__repr__:\n node = Node(value_repr=to_repr(obj), last=root)\n elif obj:\n children = []\n node = Node(\n open_brace=open_brace,\n close_brace=close_brace,\n children=children,\n last=root,\n )\n append = children.append\n num_items = len(obj)\n last_item_index = num_items - 1\n\n if _safe_isinstance(obj, _MAPPING_CONTAINERS):\n iter_items = iter(obj.items())\n if max_length is not None:\n iter_items = islice(iter_items, max_length)\n for index, (key, child) in enumerate(iter_items):\n child_node = _traverse(child, depth=depth + 1)\n child_node.key_repr = to_repr(key)\n child_node.last = index == last_item_index\n append(child_node)\n else:\n iter_values = iter(obj)\n if max_length is not None:\n iter_values = islice(iter_values, max_length)\n for index, child in enumerate(iter_values):\n child_node = _traverse(child, depth=depth + 1)\n child_node.last = index == last_item_index\n append(child_node)\n if max_length is not None and num_items > max_length:\n append(Node(value_repr=f\"... +{num_items - max_length}\", last=True))\n else:\n node = Node(empty=empty, children=[], last=root)\n\n pop_visited(obj_id)\n else:\n node = Node(value_repr=to_repr(obj), last=root)\n node.is_tuple = _safe_isinstance(obj, tuple)\n node.is_namedtuple = _is_namedtuple(obj)\n return node\n\n node = _traverse(_object, root=True)\n return node\n\n\ndef pretty_repr(\n _object: Any,\n *,\n max_width: int = 80,\n indent_size: int = 4,\n max_length: Optional[int] = None,\n max_string: Optional[int] = None,\n max_depth: Optional[int] = None,\n expand_all: bool = False,\n) -> str:\n \"\"\"Prettify repr string by expanding on to new lines to fit within a given width.\n\n Args:\n _object (Any): Object to repr.\n max_width (int, optional): Desired maximum width of repr string. Defaults to 80.\n indent_size (int, optional): Number of spaces to indent. Defaults to 4.\n max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.\n Defaults to None.\n max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.\n Defaults to None.\n max_depth (int, optional): Maximum depth of nested data structure, or None for no depth.\n Defaults to None.\n expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.\n\n Returns:\n str: A possibly multi-line representation of the object.\n \"\"\"\n\n if _safe_isinstance(_object, Node):\n node = _object\n else:\n node = traverse(\n _object, max_length=max_length, max_string=max_string, max_depth=max_depth\n )\n repr_str: str = node.render(\n max_width=max_width, indent_size=indent_size, expand_all=expand_all\n )\n return repr_str\n\n\ndef pprint(\n _object: Any,\n *,\n console: Optional[\"Console\"] = None,\n indent_guides: bool = True,\n max_length: Optional[int] = None,\n max_string: Optional[int] = None,\n max_depth: Optional[int] = None,\n expand_all: bool = False,\n) -> None:\n \"\"\"A convenience function for pretty printing.\n\n Args:\n _object (Any): Object to pretty print.\n console (Console, optional): Console instance, or None to use default. Defaults to None.\n max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.\n Defaults to None.\n max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.\n max_depth (int, optional): Maximum depth for nested data structures, or None for unlimited depth. Defaults to None.\n indent_guides (bool, optional): Enable indentation guides. Defaults to True.\n expand_all (bool, optional): Expand all containers. Defaults to False.\n \"\"\"\n _console = get_console() if console is None else console\n _console.print(\n Pretty(\n _object,\n max_length=max_length,\n max_string=max_string,\n max_depth=max_depth,\n indent_guides=indent_guides,\n expand_all=expand_all,\n overflow=\"ignore\",\n ),\n soft_wrap=True,\n )\n\n\nif __name__ == \"__main__\": # pragma: no cover\n\n class BrokenRepr:\n def __repr__(self) -> str:\n 1 / 0\n return \"this will fail\"\n\n from typing import NamedTuple\n\n class StockKeepingUnit(NamedTuple):\n name: str\n description: str\n price: float\n category: str\n reviews: List[str]\n\n d = defaultdict(int)\n d[\"foo\"] = 5\n data = {\n \"foo\": [\n 1,\n \"Hello World!\",\n 100.123,\n 323.232,\n 432324.0,\n {5, 6, 7, (1, 2, 3, 4), 8},\n ],\n \"bar\": frozenset({1, 2, 3}),\n \"defaultdict\": defaultdict(\n list, {\"crumble\": [\"apple\", \"rhubarb\", \"butter\", \"sugar\", \"flour\"]}\n ),\n \"counter\": Counter(\n [\n \"apple\",\n \"orange\",\n \"pear\",\n \"kumquat\",\n \"kumquat\",\n \"durian\" * 100,\n ]\n ),\n \"atomic\": (False, True, None),\n \"namedtuple\": StockKeepingUnit(\n \"Sparkling British Spring Water\",\n \"Carbonated spring water\",\n 0.9,\n \"water\",\n [\"its amazing!\", \"its terrible!\"],\n ),\n \"Broken\": BrokenRepr(),\n }\n data[\"foo\"].append(data) # type: ignore[attr-defined]\n\n from rich import print\n\n print(Pretty(data, indent_guides=True, max_string=20))\n",
"path": "rich/pretty.py"
}
] | [
{
"content": "import builtins\nimport collections\nimport dataclasses\nimport inspect\nimport os\nimport sys\nfrom array import array\nfrom collections import Counter, UserDict, UserList, defaultdict, deque\nfrom dataclasses import dataclass, fields, is_dataclass\nfrom inspect import isclass\nfrom itertools import islice\nfrom types import MappingProxyType\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n DefaultDict,\n Dict,\n Iterable,\n List,\n Optional,\n Sequence,\n Set,\n Tuple,\n Union,\n)\n\nfrom rich.repr import RichReprResult\n\ntry:\n import attr as _attr_module\n\n _has_attrs = True\nexcept ImportError: # pragma: no cover\n _has_attrs = False\n\nfrom . import get_console\nfrom ._loop import loop_last\nfrom ._pick import pick_bool\nfrom .abc import RichRenderable\nfrom .cells import cell_len\nfrom .highlighter import ReprHighlighter\nfrom .jupyter import JupyterMixin, JupyterRenderable\nfrom .measure import Measurement\nfrom .text import Text\n\nif TYPE_CHECKING:\n from .console import (\n Console,\n ConsoleOptions,\n HighlighterType,\n JustifyMethod,\n OverflowMethod,\n RenderResult,\n )\n\n\ndef _is_attr_object(obj: Any) -> bool:\n \"\"\"Check if an object was created with attrs module.\"\"\"\n return _has_attrs and _attr_module.has(type(obj))\n\n\ndef _get_attr_fields(obj: Any) -> Sequence[\"_attr_module.Attribute[Any]\"]:\n \"\"\"Get fields for an attrs object.\"\"\"\n return _attr_module.fields(type(obj)) if _has_attrs else []\n\n\ndef _is_dataclass_repr(obj: object) -> bool:\n \"\"\"Check if an instance of a dataclass contains the default repr.\n\n Args:\n obj (object): A dataclass instance.\n\n Returns:\n bool: True if the default repr is used, False if there is a custom repr.\n \"\"\"\n # Digging in to a lot of internals here\n # Catching all exceptions in case something is missing on a non CPython implementation\n try:\n return obj.__repr__.__code__.co_filename == dataclasses.__file__\n except Exception: # pragma: no coverage\n return False\n\n\n_dummy_namedtuple = collections.namedtuple(\"_dummy_namedtuple\", [])\n\n\ndef _has_default_namedtuple_repr(obj: object) -> bool:\n \"\"\"Check if an instance of namedtuple contains the default repr\n\n Args:\n obj (object): A namedtuple\n\n Returns:\n bool: True if the default repr is used, False if there's a custom repr.\n \"\"\"\n obj_file = None\n try:\n obj_file = inspect.getfile(obj.__repr__)\n except (OSError, TypeError):\n # OSError handles case where object is defined in __main__ scope, e.g. REPL - no filename available.\n # TypeError trapped defensively, in case of object without filename slips through.\n pass\n default_repr_file = inspect.getfile(_dummy_namedtuple.__repr__)\n return obj_file == default_repr_file\n\n\ndef _ipy_display_hook(\n value: Any,\n console: Optional[\"Console\"] = None,\n overflow: \"OverflowMethod\" = \"ignore\",\n crop: bool = False,\n indent_guides: bool = False,\n max_length: Optional[int] = None,\n max_string: Optional[int] = None,\n expand_all: bool = False,\n) -> None:\n from .console import ConsoleRenderable # needed here to prevent circular import\n\n # always skip rich generated jupyter renderables or None values\n if _safe_isinstance(value, JupyterRenderable) or value is None:\n return\n\n console = console or get_console()\n if console.is_jupyter:\n # Delegate rendering to IPython if the object (and IPython) supports it\n # https://ipython.readthedocs.io/en/stable/config/integrating.html#rich-display\n ipython_repr_methods = [\n \"_repr_html_\",\n \"_repr_markdown_\",\n \"_repr_json_\",\n \"_repr_latex_\",\n \"_repr_jpeg_\",\n \"_repr_png_\",\n \"_repr_svg_\",\n \"_repr_mimebundle_\",\n ]\n for repr_method in ipython_repr_methods:\n method = getattr(value, repr_method, None)\n if inspect.ismethod(method):\n # Calling the method ourselves isn't ideal. The interface for the `_repr_*_` methods\n # specifies that if they return None, then they should not be rendered\n # by the notebook.\n try:\n repr_result = method()\n except Exception:\n continue # If the method raises, treat it as if it doesn't exist, try any others\n if repr_result is not None:\n return # Delegate rendering to IPython\n\n # certain renderables should start on a new line\n if _safe_isinstance(value, ConsoleRenderable):\n console.line()\n\n console.print(\n value\n if _safe_isinstance(value, RichRenderable)\n else Pretty(\n value,\n overflow=overflow,\n indent_guides=indent_guides,\n max_length=max_length,\n max_string=max_string,\n expand_all=expand_all,\n margin=12,\n ),\n crop=crop,\n new_line_start=True,\n )\n\n\ndef _safe_isinstance(\n obj: object, class_or_tuple: Union[type, Tuple[type, ...]]\n) -> bool:\n \"\"\"isinstance can fail in rare cases, for example types with no __class__\"\"\"\n try:\n return isinstance(obj, class_or_tuple)\n except Exception:\n return False\n\n\ndef install(\n console: Optional[\"Console\"] = None,\n overflow: \"OverflowMethod\" = \"ignore\",\n crop: bool = False,\n indent_guides: bool = False,\n max_length: Optional[int] = None,\n max_string: Optional[int] = None,\n expand_all: bool = False,\n) -> None:\n \"\"\"Install automatic pretty printing in the Python REPL.\n\n Args:\n console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.\n overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to \"ignore\".\n crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.\n indent_guides (bool, optional): Enable indentation guides. Defaults to False.\n max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.\n Defaults to None.\n max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.\n expand_all (bool, optional): Expand all containers. Defaults to False.\n max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.\n \"\"\"\n from rich import get_console\n\n console = console or get_console()\n assert console is not None\n\n def display_hook(value: Any) -> None:\n \"\"\"Replacement sys.displayhook which prettifies objects with Rich.\"\"\"\n if value is not None:\n assert console is not None\n builtins._ = None # type: ignore[attr-defined]\n console.print(\n value\n if _safe_isinstance(value, RichRenderable)\n else Pretty(\n value,\n overflow=overflow,\n indent_guides=indent_guides,\n max_length=max_length,\n max_string=max_string,\n expand_all=expand_all,\n ),\n crop=crop,\n )\n builtins._ = value # type: ignore[attr-defined]\n\n try: # pragma: no cover\n ip = get_ipython() # type: ignore[name-defined]\n from IPython.core.formatters import BaseFormatter\n\n class RichFormatter(BaseFormatter): # type: ignore[misc]\n pprint: bool = True\n\n def __call__(self, value: Any) -> Any:\n if self.pprint:\n return _ipy_display_hook(\n value,\n console=get_console(),\n overflow=overflow,\n indent_guides=indent_guides,\n max_length=max_length,\n max_string=max_string,\n expand_all=expand_all,\n )\n else:\n return repr(value)\n\n # replace plain text formatter with rich formatter\n rich_formatter = RichFormatter()\n ip.display_formatter.formatters[\"text/plain\"] = rich_formatter\n except Exception:\n sys.displayhook = display_hook\n\n\nclass Pretty(JupyterMixin):\n \"\"\"A rich renderable that pretty prints an object.\n\n Args:\n _object (Any): An object to pretty print.\n highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.\n indent_size (int, optional): Number of spaces in indent. Defaults to 4.\n justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.\n overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.\n no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.\n indent_guides (bool, optional): Enable indentation guides. Defaults to False.\n max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.\n Defaults to None.\n max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.\n max_depth (int, optional): Maximum depth of nested data structures, or None for no maximum. Defaults to None.\n expand_all (bool, optional): Expand all containers. Defaults to False.\n margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.\n insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.\n \"\"\"\n\n def __init__(\n self,\n _object: Any,\n highlighter: Optional[\"HighlighterType\"] = None,\n *,\n indent_size: int = 4,\n justify: Optional[\"JustifyMethod\"] = None,\n overflow: Optional[\"OverflowMethod\"] = None,\n no_wrap: Optional[bool] = False,\n indent_guides: bool = False,\n max_length: Optional[int] = None,\n max_string: Optional[int] = None,\n max_depth: Optional[int] = None,\n expand_all: bool = False,\n margin: int = 0,\n insert_line: bool = False,\n ) -> None:\n self._object = _object\n self.highlighter = highlighter or ReprHighlighter()\n self.indent_size = indent_size\n self.justify: Optional[\"JustifyMethod\"] = justify\n self.overflow: Optional[\"OverflowMethod\"] = overflow\n self.no_wrap = no_wrap\n self.indent_guides = indent_guides\n self.max_length = max_length\n self.max_string = max_string\n self.max_depth = max_depth\n self.expand_all = expand_all\n self.margin = margin\n self.insert_line = insert_line\n\n def __rich_console__(\n self, console: \"Console\", options: \"ConsoleOptions\"\n ) -> \"RenderResult\":\n pretty_str = pretty_repr(\n self._object,\n max_width=options.max_width - self.margin,\n indent_size=self.indent_size,\n max_length=self.max_length,\n max_string=self.max_string,\n max_depth=self.max_depth,\n expand_all=self.expand_all,\n )\n pretty_text = Text(\n pretty_str,\n justify=self.justify or options.justify,\n overflow=self.overflow or options.overflow,\n no_wrap=pick_bool(self.no_wrap, options.no_wrap),\n style=\"pretty\",\n )\n pretty_text = (\n self.highlighter(pretty_text)\n if pretty_text\n else Text(\n f\"{type(self._object)}.__repr__ returned empty string\",\n style=\"dim italic\",\n )\n )\n if self.indent_guides and not options.ascii_only:\n pretty_text = pretty_text.with_indent_guides(\n self.indent_size, style=\"repr.indent\"\n )\n if self.insert_line and \"\\n\" in pretty_text:\n yield \"\"\n yield pretty_text\n\n def __rich_measure__(\n self, console: \"Console\", options: \"ConsoleOptions\"\n ) -> \"Measurement\":\n pretty_str = pretty_repr(\n self._object,\n max_width=options.max_width,\n indent_size=self.indent_size,\n max_length=self.max_length,\n max_string=self.max_string,\n expand_all=self.expand_all,\n )\n text_width = (\n max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0\n )\n return Measurement(text_width, text_width)\n\n\ndef _get_braces_for_defaultdict(_object: DefaultDict[Any, Any]) -> Tuple[str, str, str]:\n return (\n f\"defaultdict({_object.default_factory!r}, {{\",\n \"})\",\n f\"defaultdict({_object.default_factory!r}, {{}})\",\n )\n\n\ndef _get_braces_for_array(_object: \"array[Any]\") -> Tuple[str, str, str]:\n return (f\"array({_object.typecode!r}, [\", \"])\", f\"array({_object.typecode!r})\")\n\n\n_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {\n os._Environ: lambda _object: (\"environ({\", \"})\", \"environ({})\"),\n array: _get_braces_for_array,\n defaultdict: _get_braces_for_defaultdict,\n Counter: lambda _object: (\"Counter({\", \"})\", \"Counter()\"),\n deque: lambda _object: (\"deque([\", \"])\", \"deque()\"),\n dict: lambda _object: (\"{\", \"}\", \"{}\"),\n UserDict: lambda _object: (\"{\", \"}\", \"{}\"),\n frozenset: lambda _object: (\"frozenset({\", \"})\", \"frozenset()\"),\n list: lambda _object: (\"[\", \"]\", \"[]\"),\n UserList: lambda _object: (\"[\", \"]\", \"[]\"),\n set: lambda _object: (\"{\", \"}\", \"set()\"),\n tuple: lambda _object: (\"(\", \")\", \"()\"),\n MappingProxyType: lambda _object: (\"mappingproxy({\", \"})\", \"mappingproxy({})\"),\n}\n_CONTAINERS = tuple(_BRACES.keys())\n_MAPPING_CONTAINERS = (dict, os._Environ, MappingProxyType, UserDict)\n\n\ndef is_expandable(obj: Any) -> bool:\n \"\"\"Check if an object may be expanded by pretty print.\"\"\"\n return (\n _safe_isinstance(obj, _CONTAINERS)\n or (is_dataclass(obj))\n or (hasattr(obj, \"__rich_repr__\"))\n or _is_attr_object(obj)\n ) and not isclass(obj)\n\n\n@dataclass\nclass Node:\n \"\"\"A node in a repr tree. May be atomic or a container.\"\"\"\n\n key_repr: str = \"\"\n value_repr: str = \"\"\n open_brace: str = \"\"\n close_brace: str = \"\"\n empty: str = \"\"\n last: bool = False\n is_tuple: bool = False\n is_namedtuple: bool = False\n children: Optional[List[\"Node\"]] = None\n key_separator = \": \"\n separator: str = \", \"\n\n def iter_tokens(self) -> Iterable[str]:\n \"\"\"Generate tokens for this node.\"\"\"\n if self.key_repr:\n yield self.key_repr\n yield self.key_separator\n if self.value_repr:\n yield self.value_repr\n elif self.children is not None:\n if self.children:\n yield self.open_brace\n if self.is_tuple and not self.is_namedtuple and len(self.children) == 1:\n yield from self.children[0].iter_tokens()\n yield \",\"\n else:\n for child in self.children:\n yield from child.iter_tokens()\n if not child.last:\n yield self.separator\n yield self.close_brace\n else:\n yield self.empty\n\n def check_length(self, start_length: int, max_length: int) -> bool:\n \"\"\"Check the length fits within a limit.\n\n Args:\n start_length (int): Starting length of the line (indent, prefix, suffix).\n max_length (int): Maximum length.\n\n Returns:\n bool: True if the node can be rendered within max length, otherwise False.\n \"\"\"\n total_length = start_length\n for token in self.iter_tokens():\n total_length += cell_len(token)\n if total_length > max_length:\n return False\n return True\n\n def __str__(self) -> str:\n repr_text = \"\".join(self.iter_tokens())\n return repr_text\n\n def render(\n self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False\n ) -> str:\n \"\"\"Render the node to a pretty repr.\n\n Args:\n max_width (int, optional): Maximum width of the repr. Defaults to 80.\n indent_size (int, optional): Size of indents. Defaults to 4.\n expand_all (bool, optional): Expand all levels. Defaults to False.\n\n Returns:\n str: A repr string of the original object.\n \"\"\"\n lines = [_Line(node=self, is_root=True)]\n line_no = 0\n while line_no < len(lines):\n line = lines[line_no]\n if line.expandable and not line.expanded:\n if expand_all or not line.check_length(max_width):\n lines[line_no : line_no + 1] = line.expand(indent_size)\n line_no += 1\n\n repr_str = \"\\n\".join(str(line) for line in lines)\n return repr_str\n\n\n@dataclass\nclass _Line:\n \"\"\"A line in repr output.\"\"\"\n\n parent: Optional[\"_Line\"] = None\n is_root: bool = False\n node: Optional[Node] = None\n text: str = \"\"\n suffix: str = \"\"\n whitespace: str = \"\"\n expanded: bool = False\n last: bool = False\n\n @property\n def expandable(self) -> bool:\n \"\"\"Check if the line may be expanded.\"\"\"\n return bool(self.node is not None and self.node.children)\n\n def check_length(self, max_length: int) -> bool:\n \"\"\"Check this line fits within a given number of cells.\"\"\"\n start_length = (\n len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)\n )\n assert self.node is not None\n return self.node.check_length(start_length, max_length)\n\n def expand(self, indent_size: int) -> Iterable[\"_Line\"]:\n \"\"\"Expand this line by adding children on their own line.\"\"\"\n node = self.node\n assert node is not None\n whitespace = self.whitespace\n assert node.children\n if node.key_repr:\n new_line = yield _Line(\n text=f\"{node.key_repr}{node.key_separator}{node.open_brace}\",\n whitespace=whitespace,\n )\n else:\n new_line = yield _Line(text=node.open_brace, whitespace=whitespace)\n child_whitespace = self.whitespace + \" \" * indent_size\n tuple_of_one = node.is_tuple and len(node.children) == 1\n for last, child in loop_last(node.children):\n separator = \",\" if tuple_of_one else node.separator\n line = _Line(\n parent=new_line,\n node=child,\n whitespace=child_whitespace,\n suffix=separator,\n last=last and not tuple_of_one,\n )\n yield line\n\n yield _Line(\n text=node.close_brace,\n whitespace=whitespace,\n suffix=self.suffix,\n last=self.last,\n )\n\n def __str__(self) -> str:\n if self.last:\n return f\"{self.whitespace}{self.text}{self.node or ''}\"\n else:\n return (\n f\"{self.whitespace}{self.text}{self.node or ''}{self.suffix.rstrip()}\"\n )\n\n\ndef _is_namedtuple(obj: Any) -> bool:\n \"\"\"Checks if an object is most likely a namedtuple. It is possible\n to craft an object that passes this check and isn't a namedtuple, but\n there is only a minuscule chance of this happening unintentionally.\n\n Args:\n obj (Any): The object to test\n\n Returns:\n bool: True if the object is a namedtuple. False otherwise.\n \"\"\"\n try:\n fields = getattr(obj, \"_fields\", None)\n except Exception:\n # Being very defensive - if we cannot get the attr then its not a namedtuple\n return False\n return isinstance(obj, tuple) and isinstance(fields, tuple)\n\n\ndef traverse(\n _object: Any,\n max_length: Optional[int] = None,\n max_string: Optional[int] = None,\n max_depth: Optional[int] = None,\n) -> Node:\n \"\"\"Traverse object and generate a tree.\n\n Args:\n _object (Any): Object to be traversed.\n max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.\n Defaults to None.\n max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.\n Defaults to None.\n max_depth (int, optional): Maximum depth of data structures, or None for no maximum.\n Defaults to None.\n\n Returns:\n Node: The root of a tree structure which can be used to render a pretty repr.\n \"\"\"\n\n def to_repr(obj: Any) -> str:\n \"\"\"Get repr string for an object, but catch errors.\"\"\"\n if (\n max_string is not None\n and _safe_isinstance(obj, (bytes, str))\n and len(obj) > max_string\n ):\n truncated = len(obj) - max_string\n obj_repr = f\"{obj[:max_string]!r}+{truncated}\"\n else:\n try:\n obj_repr = repr(obj)\n except Exception as error:\n obj_repr = f\"<repr-error {str(error)!r}>\"\n return obj_repr\n\n visited_ids: Set[int] = set()\n push_visited = visited_ids.add\n pop_visited = visited_ids.remove\n\n def _traverse(obj: Any, root: bool = False, depth: int = 0) -> Node:\n \"\"\"Walk the object depth first.\"\"\"\n\n obj_type = type(obj)\n py_version = (sys.version_info.major, sys.version_info.minor)\n children: List[Node]\n reached_max_depth = max_depth is not None and depth >= max_depth\n\n def iter_rich_args(rich_args: Any) -> Iterable[Union[Any, Tuple[str, Any]]]:\n for arg in rich_args:\n if _safe_isinstance(arg, tuple):\n if len(arg) == 3:\n key, child, default = arg\n if default == child:\n continue\n yield key, child\n elif len(arg) == 2:\n key, child = arg\n yield key, child\n elif len(arg) == 1:\n yield arg[0]\n else:\n yield arg\n\n try:\n fake_attributes = hasattr(\n obj, \"awehoi234_wdfjwljet234_234wdfoijsdfmmnxpi492\"\n )\n except Exception:\n fake_attributes = False\n\n rich_repr_result: Optional[RichReprResult] = None\n if not fake_attributes:\n try:\n if hasattr(obj, \"__rich_repr__\") and not isclass(obj):\n rich_repr_result = obj.__rich_repr__()\n except Exception:\n pass\n\n if rich_repr_result is not None:\n angular = getattr(obj.__rich_repr__, \"angular\", False)\n args = list(iter_rich_args(rich_repr_result))\n class_name = obj.__class__.__name__\n\n if args:\n children = []\n append = children.append\n\n if reached_max_depth:\n node = Node(value_repr=f\"...\")\n else:\n if angular:\n node = Node(\n open_brace=f\"<{class_name} \",\n close_brace=\">\",\n children=children,\n last=root,\n separator=\" \",\n )\n else:\n node = Node(\n open_brace=f\"{class_name}(\",\n close_brace=\")\",\n children=children,\n last=root,\n )\n for last, arg in loop_last(args):\n if _safe_isinstance(arg, tuple):\n key, child = arg\n child_node = _traverse(child, depth=depth + 1)\n child_node.last = last\n child_node.key_repr = key\n child_node.key_separator = \"=\"\n append(child_node)\n else:\n child_node = _traverse(arg, depth=depth + 1)\n child_node.last = last\n append(child_node)\n else:\n node = Node(\n value_repr=f\"<{class_name}>\" if angular else f\"{class_name}()\",\n children=[],\n last=root,\n )\n elif _is_attr_object(obj) and not fake_attributes:\n children = []\n append = children.append\n\n attr_fields = _get_attr_fields(obj)\n if attr_fields:\n if reached_max_depth:\n node = Node(value_repr=f\"...\")\n else:\n node = Node(\n open_brace=f\"{obj.__class__.__name__}(\",\n close_brace=\")\",\n children=children,\n last=root,\n )\n\n def iter_attrs() -> Iterable[\n Tuple[str, Any, Optional[Callable[[Any], str]]]\n ]:\n \"\"\"Iterate over attr fields and values.\"\"\"\n for attr in attr_fields:\n if attr.repr:\n try:\n value = getattr(obj, attr.name)\n except Exception as error:\n # Can happen, albeit rarely\n yield (attr.name, error, None)\n else:\n yield (\n attr.name,\n value,\n attr.repr if callable(attr.repr) else None,\n )\n\n for last, (name, value, repr_callable) in loop_last(iter_attrs()):\n if repr_callable:\n child_node = Node(value_repr=str(repr_callable(value)))\n else:\n child_node = _traverse(value, depth=depth + 1)\n child_node.last = last\n child_node.key_repr = name\n child_node.key_separator = \"=\"\n append(child_node)\n else:\n node = Node(\n value_repr=f\"{obj.__class__.__name__}()\", children=[], last=root\n )\n\n elif (\n is_dataclass(obj)\n and not _safe_isinstance(obj, type)\n and not fake_attributes\n and (_is_dataclass_repr(obj) or py_version == (3, 6))\n ):\n obj_id = id(obj)\n if obj_id in visited_ids:\n # Recursion detected\n return Node(value_repr=\"...\")\n push_visited(obj_id)\n\n children = []\n append = children.append\n if reached_max_depth:\n node = Node(value_repr=f\"...\")\n else:\n node = Node(\n open_brace=f\"{obj.__class__.__name__}(\",\n close_brace=\")\",\n children=children,\n last=root,\n )\n\n for last, field in loop_last(\n field for field in fields(obj) if field.repr\n ):\n child_node = _traverse(getattr(obj, field.name), depth=depth + 1)\n child_node.key_repr = field.name\n child_node.last = last\n child_node.key_separator = \"=\"\n append(child_node)\n\n pop_visited(obj_id)\n elif _is_namedtuple(obj) and _has_default_namedtuple_repr(obj):\n if reached_max_depth:\n node = Node(value_repr=\"...\")\n else:\n children = []\n class_name = obj.__class__.__name__\n node = Node(\n open_brace=f\"{class_name}(\",\n close_brace=\")\",\n children=children,\n empty=f\"{class_name}()\",\n )\n append = children.append\n for last, (key, value) in loop_last(obj._asdict().items()):\n child_node = _traverse(value, depth=depth + 1)\n child_node.key_repr = key\n child_node.last = last\n child_node.key_separator = \"=\"\n append(child_node)\n elif _safe_isinstance(obj, _CONTAINERS):\n for container_type in _CONTAINERS:\n if _safe_isinstance(obj, container_type):\n obj_type = container_type\n break\n\n obj_id = id(obj)\n if obj_id in visited_ids:\n # Recursion detected\n return Node(value_repr=\"...\")\n push_visited(obj_id)\n\n open_brace, close_brace, empty = _BRACES[obj_type](obj)\n\n if reached_max_depth:\n node = Node(value_repr=f\"...\", last=root)\n elif obj_type.__repr__ != type(obj).__repr__:\n node = Node(value_repr=to_repr(obj), last=root)\n elif obj:\n children = []\n node = Node(\n open_brace=open_brace,\n close_brace=close_brace,\n children=children,\n last=root,\n )\n append = children.append\n num_items = len(obj)\n last_item_index = num_items - 1\n\n if _safe_isinstance(obj, _MAPPING_CONTAINERS):\n iter_items = iter(obj.items())\n if max_length is not None:\n iter_items = islice(iter_items, max_length)\n for index, (key, child) in enumerate(iter_items):\n child_node = _traverse(child, depth=depth + 1)\n child_node.key_repr = to_repr(key)\n child_node.last = index == last_item_index\n append(child_node)\n else:\n iter_values = iter(obj)\n if max_length is not None:\n iter_values = islice(iter_values, max_length)\n for index, child in enumerate(iter_values):\n child_node = _traverse(child, depth=depth + 1)\n child_node.last = index == last_item_index\n append(child_node)\n if max_length is not None and num_items > max_length:\n append(Node(value_repr=f\"... +{num_items - max_length}\", last=True))\n else:\n node = Node(empty=empty, children=[], last=root)\n\n pop_visited(obj_id)\n else:\n node = Node(value_repr=to_repr(obj), last=root)\n node.is_tuple = _safe_isinstance(obj, tuple)\n node.is_namedtuple = _is_namedtuple(obj)\n return node\n\n node = _traverse(_object, root=True)\n return node\n\n\ndef pretty_repr(\n _object: Any,\n *,\n max_width: int = 80,\n indent_size: int = 4,\n max_length: Optional[int] = None,\n max_string: Optional[int] = None,\n max_depth: Optional[int] = None,\n expand_all: bool = False,\n) -> str:\n \"\"\"Prettify repr string by expanding on to new lines to fit within a given width.\n\n Args:\n _object (Any): Object to repr.\n max_width (int, optional): Desired maximum width of repr string. Defaults to 80.\n indent_size (int, optional): Number of spaces to indent. Defaults to 4.\n max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.\n Defaults to None.\n max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.\n Defaults to None.\n max_depth (int, optional): Maximum depth of nested data structure, or None for no depth.\n Defaults to None.\n expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.\n\n Returns:\n str: A possibly multi-line representation of the object.\n \"\"\"\n\n if _safe_isinstance(_object, Node):\n node = _object\n else:\n node = traverse(\n _object, max_length=max_length, max_string=max_string, max_depth=max_depth\n )\n repr_str: str = node.render(\n max_width=max_width, indent_size=indent_size, expand_all=expand_all\n )\n return repr_str\n\n\ndef pprint(\n _object: Any,\n *,\n console: Optional[\"Console\"] = None,\n indent_guides: bool = True,\n max_length: Optional[int] = None,\n max_string: Optional[int] = None,\n max_depth: Optional[int] = None,\n expand_all: bool = False,\n) -> None:\n \"\"\"A convenience function for pretty printing.\n\n Args:\n _object (Any): Object to pretty print.\n console (Console, optional): Console instance, or None to use default. Defaults to None.\n max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.\n Defaults to None.\n max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.\n max_depth (int, optional): Maximum depth for nested data structures, or None for unlimited depth. Defaults to None.\n indent_guides (bool, optional): Enable indentation guides. Defaults to True.\n expand_all (bool, optional): Expand all containers. Defaults to False.\n \"\"\"\n _console = get_console() if console is None else console\n _console.print(\n Pretty(\n _object,\n max_length=max_length,\n max_string=max_string,\n max_depth=max_depth,\n indent_guides=indent_guides,\n expand_all=expand_all,\n overflow=\"ignore\",\n ),\n soft_wrap=True,\n )\n\n\nif __name__ == \"__main__\": # pragma: no cover\n\n class BrokenRepr:\n def __repr__(self) -> str:\n 1 / 0\n return \"this will fail\"\n\n from typing import NamedTuple\n\n class StockKeepingUnit(NamedTuple):\n name: str\n description: str\n price: float\n category: str\n reviews: List[str]\n\n d = defaultdict(int)\n d[\"foo\"] = 5\n data = {\n \"foo\": [\n 1,\n \"Hello World!\",\n 100.123,\n 323.232,\n 432324.0,\n {5, 6, 7, (1, 2, 3, 4), 8},\n ],\n \"bar\": frozenset({1, 2, 3}),\n \"defaultdict\": defaultdict(\n list, {\"crumble\": [\"apple\", \"rhubarb\", \"butter\", \"sugar\", \"flour\"]}\n ),\n \"counter\": Counter(\n [\n \"apple\",\n \"orange\",\n \"pear\",\n \"kumquat\",\n \"kumquat\",\n \"durian\" * 100,\n ]\n ),\n \"atomic\": (False, True, None),\n \"namedtuple\": StockKeepingUnit(\n \"Sparkling British Spring Water\",\n \"Carbonated spring water\",\n 0.9,\n \"water\",\n [\"its amazing!\", \"its terrible!\"],\n ),\n \"Broken\": BrokenRepr(),\n }\n data[\"foo\"].append(data) # type: ignore[attr-defined]\n\n from rich import print\n\n print(Pretty(data, indent_guides=True, max_string=20))\n",
"path": "rich/pretty.py"
}
] | diff --git a/rich/pretty.py b/rich/pretty.py
index d3bac94e8..1c6b16716 100644
--- a/rich/pretty.py
+++ b/rich/pretty.py
@@ -366,7 +366,7 @@ def _get_braces_for_defaultdict(_object: DefaultDict[Any, Any]) -> Tuple[str, st
def _get_braces_for_array(_object: "array[Any]") -> Tuple[str, str, str]:
- return (f"array({_object.typecode!r}, [", "])", "array({_object.typecode!r})")
+ return (f"array({_object.typecode!r}, [", "])", f"array({_object.typecode!r})")
_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
|
qtile__qtile-2082 | qtile top breaks when the terminal is too small
I just launched `qtile top` several times but it only worked once, and broke on the next runs:
```
$ qtile top --force-start
$ qtile top
Traceback (most recent call last):
File "libqtile/scripts/main.py", line 46, in main
options.func(options)
File "qtile/libqtile/scripts/top.py", line 158, in top
force_start=force_start)
File "/usr/lib/python3.7/curses/__init__.py", line 102, in wrapper
return func(stdscr, *args, **kwds)
File "libqtile/scripts/top.py", line 95, in get_stats
scr.addstr(cnt + 1, 0, '{:<3} {:<40} {:<30}'.format(index, filename, mem))
_curses.error: addwstr() returned ERR
```
Also I'm not sure what's happening, but I can't replicate this in another X session, whether I use PYTHONTRACEMALLOC or --force-start.
| [
{
"content": "# Copyright (c) 2015, Roger Duran\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n Command-line top like for qtile\n\"\"\"\n\nimport curses\nimport linecache\nimport os\nimport time\n\nfrom libqtile import command_client, command_interface, ipc\n\n\"\"\" These imports are here because they are not supported in pypy\nhaving them at the top of the file causes problems when running any\nof the other scripts.\n\"\"\"\ntry:\n import tracemalloc\n from tracemalloc import Snapshot\n ENABLED = True\nexcept ModuleNotFoundError:\n ENABLED = False\n\n\nclass TraceNotStarted(Exception):\n pass\n\n\nclass TraceCantStart(Exception):\n pass\n\n\ndef get_trace(client, force_start):\n (started, path) = client.tracemalloc_dump()\n if force_start and not started:\n client.tracemalloc_toggle()\n (started, path) = client.tracemalloc_dump()\n if not started:\n raise TraceCantStart\n elif not started:\n raise TraceNotStarted\n\n return Snapshot.load(path)\n\n\ndef filter_snapshot(snapshot):\n return snapshot.filter_traces((\n tracemalloc.Filter(False, \"<frozen importlib._bootstrap>\"),\n tracemalloc.Filter(False, \"<unknown>\"),\n ))\n\n\ndef get_stats(scr, client, group_by='lineno', limit=10, seconds=1.5,\n force_start=False):\n (max_y, max_x) = scr.getmaxyx()\n curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)\n while True:\n scr.addstr(0, 0, \"Qtile - Top {} lines\".format(limit))\n scr.addstr(1, 0, '{0:<3s} {1:<40s} {2:<30s} {3:<16s}'.format('#', 'Line', 'Memory', ' ' * (max_x - 71)),\n curses.A_BOLD | curses.A_REVERSE)\n\n snapshot = get_trace(client, force_start)\n snapshot = filter_snapshot(snapshot)\n top_stats = snapshot.statistics(group_by)\n cnt = 1\n for index, stat in enumerate(top_stats[:limit], 1):\n frame = stat.traceback[0]\n # replace \"/path/to/module/file.py\" with \"module/file.py\"\n filename = os.sep.join(frame.filename.split(os.sep)[-2:])\n code = \"\"\n line = linecache.getline(frame.filename, frame.lineno).strip()\n if line:\n code = line\n mem = \"{:.1f} KiB\".format(stat.size / 1024.0)\n filename = \"{}:{}\".format(filename, frame.lineno)\n scr.addstr(cnt + 1, 0, '{:<3} {:<40} {:<30}'.format(index, filename, mem))\n scr.addstr(cnt + 2, 4, code, curses.color_pair(1))\n cnt += 2\n\n other = top_stats[limit:]\n cnt += 2\n if other:\n size = sum(stat.size for stat in other)\n other_size = (\"{:d} other: {:.1f} KiB\".format(len(other), size / 1024.0))\n scr.addstr(cnt, 0, other_size, curses.A_BOLD)\n cnt += 1\n\n total = sum(stat.size for stat in top_stats)\n total_size = \"Total allocated size: {0:.1f} KiB\".format(total / 1024.0)\n scr.addstr(cnt, 0, total_size, curses.A_BOLD)\n\n scr.move(max_y - 2, max_y - 2)\n scr.refresh()\n time.sleep(seconds)\n scr.erase()\n\n\ndef raw_stats(client, group_by='lineno', limit=10, force_start=False):\n snapshot = get_trace(client, force_start)\n snapshot = filter_snapshot(snapshot)\n top_stats = snapshot.statistics(group_by)\n\n print(\"Qtile - Top {} lines\".format(limit))\n for index, stat in enumerate(top_stats[:limit], 1):\n frame = stat.traceback[0]\n # replace \"/path/to/module/file.py\" with \"module/file.py\"\n filename = os.sep.join(frame.filename.split(os.sep)[-2:])\n print(\"#{}: {}:{}: {:.1f} KiB\"\n .format(index, filename, frame.lineno, stat.size / 1024.0))\n line = linecache.getline(frame.filename, frame.lineno).strip()\n if line:\n print(' {}'.format(line))\n\n other = top_stats[limit:]\n if other:\n size = sum(stat.size for stat in other)\n print(\"{:d} other: {:.1f} KiB\".format(len(other), size / 1024.0))\n total = sum(stat.size for stat in top_stats)\n print(\"Total allocated size: {0:.1f} KiB\".format(total / 1024.0))\n\n\ndef top(opts):\n if not ENABLED:\n raise Exception('Could not import tracemalloc')\n lines = opts.lines\n seconds = opts.seconds\n force_start = opts.force_start\n if opts.socket is None:\n socket = ipc.find_sockfile()\n else:\n socket = opts.socket\n client = ipc.Client(socket)\n client = command_interface.IPCCommandInterface(client)\n client = command_client.InteractiveCommandClient(client)\n\n try:\n if not opts.raw:\n curses.wrapper(get_stats, client, limit=lines, seconds=seconds,\n force_start=force_start)\n else:\n raw_stats(client, limit=lines, force_start=force_start)\n except TraceNotStarted:\n print(\"tracemalloc not started on qtile, start by setting \"\n \"PYTHONTRACEMALLOC=1 before starting qtile\")\n print(\"or force start tracemalloc now, but you'll lose early traces\")\n exit(1)\n except TraceCantStart:\n print(\"Can't start tracemalloc on qtile, check the logs\")\n except KeyboardInterrupt:\n exit(-1)\n\n\ndef add_subcommand(subparsers):\n parser = subparsers.add_parser(\"top\", help=\"resource usage information\")\n parser.add_argument('-l', '--lines', type=int, dest=\"lines\", default=10,\n help='Number of lines.')\n parser.add_argument('-r', '--raw', dest=\"raw\", action=\"store_true\",\n default=False, help='Output raw without curses')\n parser.add_argument('-t', '--time', type=float, dest=\"seconds\",\n default=1.5, help='Number of seconds to refresh')\n parser.add_argument('--force-start', dest=\"force_start\",\n action=\"store_true\", default=False,\n help='Force start tracemalloc on qtile')\n parser.add_argument('-s', '--socket', type=str, dest=\"socket\",\n help='Use specified communication socket.')\n parser.set_defaults(func=top)\n",
"path": "libqtile/scripts/top.py"
}
] | [
{
"content": "# Copyright (c) 2015, Roger Duran\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n Command-line top like for qtile\n\"\"\"\n\nimport curses\nimport linecache\nimport os\nimport time\n\nfrom libqtile import command_client, command_interface, ipc\n\n\"\"\" These imports are here because they are not supported in pypy\nhaving them at the top of the file causes problems when running any\nof the other scripts.\n\"\"\"\ntry:\n import tracemalloc\n from tracemalloc import Snapshot\n ENABLED = True\nexcept ModuleNotFoundError:\n ENABLED = False\n\n\nclass TraceNotStarted(Exception):\n pass\n\n\nclass TraceCantStart(Exception):\n pass\n\n\ndef get_trace(client, force_start):\n (started, path) = client.tracemalloc_dump()\n if force_start and not started:\n client.tracemalloc_toggle()\n (started, path) = client.tracemalloc_dump()\n if not started:\n raise TraceCantStart\n elif not started:\n raise TraceNotStarted\n\n return Snapshot.load(path)\n\n\ndef filter_snapshot(snapshot):\n return snapshot.filter_traces((\n tracemalloc.Filter(False, \"<frozen importlib._bootstrap>\"),\n tracemalloc.Filter(False, \"<unknown>\"),\n ))\n\n\ndef get_stats(scr, client, group_by='lineno', limit=10, seconds=1.5,\n force_start=False):\n (max_y, max_x) = scr.getmaxyx()\n curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)\n while True:\n scr.addstr(0, 0, \"Qtile - Top {} lines\".format(limit))\n scr.addstr(1, 0, '{0:<3s} {1:<40s} {2:<30s} {3:<16s}'.format('#', 'Line', 'Memory', ' ' * (max_x - 71)),\n curses.A_BOLD | curses.A_REVERSE)\n\n snapshot = get_trace(client, force_start)\n snapshot = filter_snapshot(snapshot)\n top_stats = snapshot.statistics(group_by)\n cnt = 1\n for index, stat in enumerate(top_stats[:limit], 1):\n frame = stat.traceback[0]\n # replace \"/path/to/module/file.py\" with \"module/file.py\"\n filename = os.sep.join(frame.filename.split(os.sep)[-2:])\n code = \"\"\n line = linecache.getline(frame.filename, frame.lineno).strip()\n if line:\n code = line\n mem = \"{:.1f} KiB\".format(stat.size / 1024.0)\n filename = \"{}:{}\".format(filename, frame.lineno)\n scr.addstr(cnt + 1, 0, '{:<3} {:<40} {:<30}'.format(index, filename, mem))\n scr.addstr(cnt + 2, 4, code, curses.color_pair(1))\n cnt += 2\n\n other = top_stats[limit:]\n cnt += 2\n if other:\n size = sum(stat.size for stat in other)\n other_size = (\"{:d} other: {:.1f} KiB\".format(len(other), size / 1024.0))\n scr.addstr(cnt, 0, other_size, curses.A_BOLD)\n cnt += 1\n\n total = sum(stat.size for stat in top_stats)\n total_size = \"Total allocated size: {0:.1f} KiB\".format(total / 1024.0)\n scr.addstr(cnt, 0, total_size, curses.A_BOLD)\n\n scr.move(max_y - 2, max_y - 2)\n scr.refresh()\n time.sleep(seconds)\n scr.erase()\n\n\ndef raw_stats(client, group_by='lineno', limit=10, force_start=False):\n snapshot = get_trace(client, force_start)\n snapshot = filter_snapshot(snapshot)\n top_stats = snapshot.statistics(group_by)\n\n print(\"Qtile - Top {} lines\".format(limit))\n for index, stat in enumerate(top_stats[:limit], 1):\n frame = stat.traceback[0]\n # replace \"/path/to/module/file.py\" with \"module/file.py\"\n filename = os.sep.join(frame.filename.split(os.sep)[-2:])\n print(\"#{}: {}:{}: {:.1f} KiB\"\n .format(index, filename, frame.lineno, stat.size / 1024.0))\n line = linecache.getline(frame.filename, frame.lineno).strip()\n if line:\n print(' {}'.format(line))\n\n other = top_stats[limit:]\n if other:\n size = sum(stat.size for stat in other)\n print(\"{:d} other: {:.1f} KiB\".format(len(other), size / 1024.0))\n total = sum(stat.size for stat in top_stats)\n print(\"Total allocated size: {0:.1f} KiB\".format(total / 1024.0))\n\n\ndef top(opts):\n if not ENABLED:\n raise Exception('Could not import tracemalloc')\n lines = opts.lines\n seconds = opts.seconds\n force_start = opts.force_start\n if opts.socket is None:\n socket = ipc.find_sockfile()\n else:\n socket = opts.socket\n client = ipc.Client(socket)\n client = command_interface.IPCCommandInterface(client)\n client = command_client.InteractiveCommandClient(client)\n\n try:\n if not opts.raw:\n curses.wrapper(get_stats, client, limit=lines, seconds=seconds,\n force_start=force_start)\n else:\n raw_stats(client, limit=lines, force_start=force_start)\n except TraceNotStarted:\n print(\"tracemalloc not started on qtile, start by setting \"\n \"PYTHONTRACEMALLOC=1 before starting qtile\")\n print(\"or force start tracemalloc now, but you'll lose early traces\")\n exit(1)\n except TraceCantStart:\n print(\"Can't start tracemalloc on qtile, check the logs\")\n except KeyboardInterrupt:\n exit(-1)\n except curses.error:\n print(\"Terminal too small for curses interface.\")\n raw_stats(client, limit=lines, force_start=force_start)\n\n\ndef add_subcommand(subparsers):\n parser = subparsers.add_parser(\"top\", help=\"resource usage information\")\n parser.add_argument('-l', '--lines', type=int, dest=\"lines\", default=10,\n help='Number of lines.')\n parser.add_argument('-r', '--raw', dest=\"raw\", action=\"store_true\",\n default=False, help='Output raw without curses')\n parser.add_argument('-t', '--time', type=float, dest=\"seconds\",\n default=1.5, help='Number of seconds to refresh')\n parser.add_argument('--force-start', dest=\"force_start\",\n action=\"store_true\", default=False,\n help='Force start tracemalloc on qtile')\n parser.add_argument('-s', '--socket', type=str, dest=\"socket\",\n help='Use specified communication socket.')\n parser.set_defaults(func=top)\n",
"path": "libqtile/scripts/top.py"
}
] | diff --git a/libqtile/scripts/top.py b/libqtile/scripts/top.py
index 0777d21142..eb8da5b6a4 100644
--- a/libqtile/scripts/top.py
+++ b/libqtile/scripts/top.py
@@ -167,6 +167,9 @@ def top(opts):
print("Can't start tracemalloc on qtile, check the logs")
except KeyboardInterrupt:
exit(-1)
+ except curses.error:
+ print("Terminal too small for curses interface.")
+ raw_stats(client, limit=lines, force_start=force_start)
def add_subcommand(subparsers):
|
e2nIEE__pandapower-1738 | plotting.geo convert_gis_to_geodata leads to issue if run after convert_geodata_to_gis
```python
import pandapower.plotting.geo as geo
import pandapower.networks as pn
net = pn.mv_oberrhein()
geo.convert_geodata_to_gis(net)
geo.convert_gis_to_geodata(net)
```
results in `AttributeError: 'Series' object has no attribute 'geometry'`
| [
{
"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\nimport sys\nfrom numpy import array, setdiff1d\n\nfrom pandapower.auxiliary import soft_dependency_error\n\ntry:\n from shapely.geometry import Point, LineString\n shapely_INSTALLED = True\nexcept ImportError:\n shapely_INSTALLED = False\n\ntry:\n from geopandas import GeoDataFrame, GeoSeries\n geopandas_INSTALLED = True\nexcept ImportError:\n geopandas_INSTALLED = False\n\ntry:\n from pyproj import Proj, transform\n pyproj_INSTALLED = True\nexcept ImportError:\n pyproj_INSTALLED = False\n\n\ndef _node_geometries_from_geodata(node_geo, epsg=31467):\n \"\"\"\n Creates a geopandas geodataframe from a given dataframe of with node coordinates as x and y\n values.\n\n :param node_geo: The dataframe containing the node coordinates (x and y values)\n :type node_geo: pandas.dataframe\n :param epsg: The epsg projection of the node coordinates\n :type epsg: int, default 31467 (= Gauss-Krüger Zone 3)\n :return: node_geodata - a geodataframe containing the node_geo and Points in the geometry column\n \"\"\"\n missing_packages = array([\"shapely\", \"geopandas\"])[~array([\n shapely_INSTALLED, geopandas_INSTALLED])]\n if len(missing_packages):\n soft_dependency_error(str(sys._getframe().f_code.co_name)+\"()\", missing_packages)\n geoms = [Point(x, y) for x, y in node_geo[[\"x\", \"y\"]].values]\n return GeoDataFrame(node_geo, crs=f\"epsg:{epsg}\", geometry=geoms, index=node_geo.index)\n\n\ndef _branch_geometries_from_geodata(branch_geo, epsg=31467):\n missing_packages = array([\"shapely\", \"geopandas\"])[~array([\n shapely_INSTALLED, geopandas_INSTALLED])]\n if len(missing_packages):\n soft_dependency_error(str(sys._getframe().f_code.co_name)+\"()\", missing_packages)\n geoms = GeoSeries([LineString(x) for x in branch_geo.coords.values], index=branch_geo.index,\n crs=f\"epsg:{epsg}\")\n return GeoDataFrame(branch_geo, crs=f\"epsg:{epsg}\", geometry=geoms, index=branch_geo.index)\n\n\ndef _transform_node_geometry_to_geodata(node_geo):\n \"\"\"\n Create x and y values from geodataframe\n\n :param node_geo: The dataframe containing the node geometries (as shapely points)\n :type node_geo: geopandas.GeoDataFrame\n :return: bus_geo - The given geodataframe with x and y values\n \"\"\"\n node_geo[\"x\"] = [p.x for p in node_geo.geometry]\n node_geo[\"y\"] = [p.y for p in node_geo.geometry]\n return node_geo\n\n\ndef _transform_branch_geometry_to_coords(branch_geo):\n \"\"\"\n Create coords entries from geodataframe geometries\n\n :param branch_geo: The dataframe containing the branch geometries (as shapely LineStrings)\n :type branch_geo: geopandas.GeoDataFrame\n :return: branch_geo - The given geodataframe with coords\n \"\"\"\n branch_geo[\"coords\"] = branch_geo[\"coords\"].geometry.apply(lambda x: list(x.coords))\n return branch_geo\n\n\ndef _convert_xy_epsg(x, y, epsg_in=4326, epsg_out=31467):\n \"\"\"\n Converts the given x and y coordinates according to the defined epsg projections.\n\n :param x: x-values of coordinates\n :type x: iterable\n :param y: y-values of coordinates\n :type y: iterable\n :param epsg_in: current epsg projection\n :type epsg_in: int, default 4326 (= WGS84)\n :param epsg_out: epsg projection to be transformed to\n :type epsg_out: int, default 31467 (= Gauss-Krüger Zone 3)\n :return: transformed_coords - x and y values in new coordinate system\n \"\"\"\n if not pyproj_INSTALLED:\n soft_dependency_error(str(sys._getframe().f_code.co_name)+\"()\", \"pyproj\")\n in_proj = Proj(init='epsg:%i' % epsg_in)\n out_proj = Proj(init='epsg:%i' % epsg_out)\n return transform(in_proj, out_proj, x, y)\n\n\ndef convert_gis_to_geodata(net, node_geodata=True, branch_geodata=True):\n \"\"\"\n Extracts information on bus and line geodata from the geometries of a geopandas geodataframe.\n\n :param net: The net for which to convert the geodata\n :type net: pandapowerNet\n :param node_geodata: flag if to extract x and y values for bus geodata\n :type node_geodata: bool, default True\n :param branch_geodata: flag if to extract coordinates values for line geodata\n :type branch_geodata: bool, default True\n :return: No output.\n \"\"\"\n if node_geodata:\n _transform_node_geometry_to_geodata(net.bus_geodata)\n if branch_geodata:\n _transform_branch_geometry_to_coords(net.line_geodata)\n\n\ndef convert_geodata_to_gis(net, epsg=31467, node_geodata=True, branch_geodata=True):\n \"\"\"\n Transforms the bus and line geodata of a net into a geopandaas geodataframe with the respective\n geometries.\n\n :param net: The net for which to convert the geodata\n :type net: pandapowerNet\n :param epsg: current epsg projection\n :type epsg: int, default 4326 (= WGS84)\n :param node_geodata: flag if to transform the bus geodata table\n :type node_geodata: bool, default True\n :param branch_geodata: flag if to transform the line geodata table\n :type branch_geodata: bool, default True\n :return: No output.\n \"\"\"\n if node_geodata:\n net[\"bus_geodata\"] = _node_geometries_from_geodata(net[\"bus_geodata\"], epsg)\n if branch_geodata:\n net[\"line_geodata\"] = _branch_geometries_from_geodata(net[\"line_geodata\"], epsg)\n net[\"gis_epsg_code\"] = epsg\n\n\ndef convert_epsg_bus_geodata(net, epsg_in=4326, epsg_out=31467):\n \"\"\"\n Converts bus geodata in net from epsg_in to epsg_out\n\n :param net: The pandapower network\n :type net: pandapowerNet\n :param epsg_in: current epsg projection\n :type epsg_in: int, default 4326 (= WGS84)\n :param epsg_out: epsg projection to be transformed to\n :type epsg_out: int, default 31467 (= Gauss-Krüger Zone 3)\n :return: net - the given pandapower network (no copy!)\n \"\"\"\n net['bus_geodata'].loc[:, \"x\"], net['bus_geodata'].loc[:, \"y\"] = _convert_xy_epsg(\n net['bus_geodata'].loc[:, \"x\"], net['bus_geodata'].loc[:, \"y\"], epsg_in, epsg_out)\n return net\n",
"path": "pandapower/plotting/geo.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\nimport sys\nfrom numpy import array, setdiff1d\n\nfrom pandapower.auxiliary import soft_dependency_error\n\ntry:\n from shapely.geometry import Point, LineString\n shapely_INSTALLED = True\nexcept ImportError:\n shapely_INSTALLED = False\n\ntry:\n from geopandas import GeoDataFrame, GeoSeries\n geopandas_INSTALLED = True\nexcept ImportError:\n geopandas_INSTALLED = False\n\ntry:\n from pyproj import Proj, transform\n pyproj_INSTALLED = True\nexcept ImportError:\n pyproj_INSTALLED = False\n\n\ndef _node_geometries_from_geodata(node_geo, epsg=31467):\n \"\"\"\n Creates a geopandas geodataframe from a given dataframe of with node coordinates as x and y\n values.\n\n :param node_geo: The dataframe containing the node coordinates (x and y values)\n :type node_geo: pandas.dataframe\n :param epsg: The epsg projection of the node coordinates\n :type epsg: int, default 31467 (= Gauss-Krüger Zone 3)\n :return: node_geodata - a geodataframe containing the node_geo and Points in the geometry column\n \"\"\"\n missing_packages = array([\"shapely\", \"geopandas\"])[~array([\n shapely_INSTALLED, geopandas_INSTALLED])]\n if len(missing_packages):\n soft_dependency_error(str(sys._getframe().f_code.co_name)+\"()\", missing_packages)\n geoms = [Point(x, y) for x, y in node_geo[[\"x\", \"y\"]].values]\n return GeoDataFrame(node_geo, crs=f\"epsg:{epsg}\", geometry=geoms, index=node_geo.index)\n\n\ndef _branch_geometries_from_geodata(branch_geo, epsg=31467):\n missing_packages = array([\"shapely\", \"geopandas\"])[~array([\n shapely_INSTALLED, geopandas_INSTALLED])]\n if len(missing_packages):\n soft_dependency_error(str(sys._getframe().f_code.co_name)+\"()\", missing_packages)\n geoms = GeoSeries([LineString(x) for x in branch_geo.coords.values], index=branch_geo.index,\n crs=f\"epsg:{epsg}\")\n return GeoDataFrame(branch_geo, crs=f\"epsg:{epsg}\", geometry=geoms, index=branch_geo.index)\n\n\ndef _transform_node_geometry_to_geodata(node_geo):\n \"\"\"\n Create x and y values from geodataframe\n\n :param node_geo: The dataframe containing the node geometries (as shapely points)\n :type node_geo: geopandas.GeoDataFrame\n :return: bus_geo - The given geodataframe with x and y values\n \"\"\"\n node_geo[\"x\"] = [p.x for p in node_geo.geometry]\n node_geo[\"y\"] = [p.y for p in node_geo.geometry]\n return node_geo\n\n\ndef _transform_branch_geometry_to_coords(branch_geo):\n \"\"\"\n Create coords entries from geodataframe geometries\n\n :param branch_geo: The dataframe containing the branch geometries (as shapely LineStrings)\n :type branch_geo: geopandas.GeoDataFrame\n :return: branch_geo - The given geodataframe with coords\n \"\"\"\n branch_geo[\"coords\"] = branch_geo.geometry.apply(lambda x: list(x.coords))\n return branch_geo\n\n\ndef _convert_xy_epsg(x, y, epsg_in=4326, epsg_out=31467):\n \"\"\"\n Converts the given x and y coordinates according to the defined epsg projections.\n\n :param x: x-values of coordinates\n :type x: iterable\n :param y: y-values of coordinates\n :type y: iterable\n :param epsg_in: current epsg projection\n :type epsg_in: int, default 4326 (= WGS84)\n :param epsg_out: epsg projection to be transformed to\n :type epsg_out: int, default 31467 (= Gauss-Krüger Zone 3)\n :return: transformed_coords - x and y values in new coordinate system\n \"\"\"\n if not pyproj_INSTALLED:\n soft_dependency_error(str(sys._getframe().f_code.co_name)+\"()\", \"pyproj\")\n in_proj = Proj(init='epsg:%i' % epsg_in)\n out_proj = Proj(init='epsg:%i' % epsg_out)\n return transform(in_proj, out_proj, x, y)\n\n\ndef convert_gis_to_geodata(net, node_geodata=True, branch_geodata=True):\n \"\"\"\n Extracts information on bus and line geodata from the geometries of a geopandas geodataframe.\n\n :param net: The net for which to convert the geodata\n :type net: pandapowerNet\n :param node_geodata: flag if to extract x and y values for bus geodata\n :type node_geodata: bool, default True\n :param branch_geodata: flag if to extract coordinates values for line geodata\n :type branch_geodata: bool, default True\n :return: No output.\n \"\"\"\n if node_geodata:\n _transform_node_geometry_to_geodata(net.bus_geodata)\n if branch_geodata:\n _transform_branch_geometry_to_coords(net.line_geodata)\n\n\ndef convert_geodata_to_gis(net, epsg=31467, node_geodata=True, branch_geodata=True):\n \"\"\"\n Transforms the bus and line geodata of a net into a geopandaas geodataframe with the respective\n geometries.\n\n :param net: The net for which to convert the geodata\n :type net: pandapowerNet\n :param epsg: current epsg projection\n :type epsg: int, default 4326 (= WGS84)\n :param node_geodata: flag if to transform the bus geodata table\n :type node_geodata: bool, default True\n :param branch_geodata: flag if to transform the line geodata table\n :type branch_geodata: bool, default True\n :return: No output.\n \"\"\"\n if node_geodata:\n net[\"bus_geodata\"] = _node_geometries_from_geodata(net[\"bus_geodata\"], epsg)\n if branch_geodata:\n net[\"line_geodata\"] = _branch_geometries_from_geodata(net[\"line_geodata\"], epsg)\n net[\"gis_epsg_code\"] = epsg\n\n\ndef convert_epsg_bus_geodata(net, epsg_in=4326, epsg_out=31467):\n \"\"\"\n Converts bus geodata in net from epsg_in to epsg_out\n\n :param net: The pandapower network\n :type net: pandapowerNet\n :param epsg_in: current epsg projection\n :type epsg_in: int, default 4326 (= WGS84)\n :param epsg_out: epsg projection to be transformed to\n :type epsg_out: int, default 31467 (= Gauss-Krüger Zone 3)\n :return: net - the given pandapower network (no copy!)\n \"\"\"\n net['bus_geodata'].loc[:, \"x\"], net['bus_geodata'].loc[:, \"y\"] = _convert_xy_epsg(\n net['bus_geodata'].loc[:, \"x\"], net['bus_geodata'].loc[:, \"y\"], epsg_in, epsg_out)\n return net\n",
"path": "pandapower/plotting/geo.py"
}
] | diff --git a/pandapower/plotting/geo.py b/pandapower/plotting/geo.py
index 729307093..1fbc1a490 100644
--- a/pandapower/plotting/geo.py
+++ b/pandapower/plotting/geo.py
@@ -77,7 +77,7 @@ def _transform_branch_geometry_to_coords(branch_geo):
:type branch_geo: geopandas.GeoDataFrame
:return: branch_geo - The given geodataframe with coords
"""
- branch_geo["coords"] = branch_geo["coords"].geometry.apply(lambda x: list(x.coords))
+ branch_geo["coords"] = branch_geo.geometry.apply(lambda x: list(x.coords))
return branch_geo
|
pwndbg__pwndbg-877 | Exceptions when running from folder with space and number in name
### Description
When debugging an application in a folder with a name that includes a space followed by `0<digit>`, a Python traceback is triggered on seemingly every command.
What I think is happening is that pwndbg runs the "info auxv" command in the background every time the user enters a command, and a regular expression in `auxv.py` incorrectly parses the "File name of executable" line, which in this case looks something like this:
```
31 AT_EXECFN File name of executable 0x7fffffffefde "/home/user/test/x 01/test"
```
There are probably other file- and folder-name patterns that can result in this behavior, too.
### Steps to reproduce
- Make a folder named, for example, "x 01"
- Put any debuggable binary in it (even a basic hello-world works)
- Open it in gdb
- Type "r" to run
- Every gdb command run while the binary is running will now trigger a Python traceback
Here's a full example session that shows everything (sorry that it's kind of long):
```
$ pwd
/home/user/test/x 01
$ cat test.c
#include <stdio.h>
#include <stdlib.h>
void main()
{
printf("Hello world\n");
getchar();
}
$ gcc -o test test.c
$ gdb test
GNU gdb (Ubuntu 9.2-0ubuntu1~20.04) 9.2
Copyright (C) 2020 Free Software Foundation, Inc.
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
Type "show copying" and "show warranty" for details.
This GDB was configured as "x86_64-linux-gnu".
Type "show configuration" for configuration details.
For bug reporting instructions, please see:
<http://www.gnu.org/software/gdb/bugs/>.
Find the GDB manual and other documentation resources online at:
<http://www.gnu.org/software/gdb/documentation/>.
For help, type "help".
Type "apropos word" to search for commands related to "word"...
pwndbg: loaded 191 commands. Type pwndbg [filter] for a list.
pwndbg: created $rebase, $ida gdb functions (can be used with print/break)
Reading symbols from test...
(No debugging symbols found in test)
pwndbg> r
Starting program: /home/user/test/x 01/test
Hello world
^C
Program received signal SIGINT, Interrupt.
0x00007ffff7ebe142 in __GI___libc_read (fd=0, buf=0x5555555596b0, nbytes=1024) at ../sysdeps/unix/sysv/linux/read.c:26
26 ../sysdeps/unix/sysv/linux/read.c: No such file or directory.
Exception occurred: Error: invalid literal for int() with base 0: '01' (<class 'ValueError'>)
For more info invoke `set exception-verbose on` and rerun the command
or debug it by yourself with `set exception-debugger on`
Python Exception <class 'ValueError'> invalid literal for int() with base 0: '01':
Exception occurred: Error: invalid literal for int() with base 0: '01' (<class 'ValueError'>)
For more info invoke `set exception-verbose on` and rerun the command
or debug it by yourself with `set exception-debugger on`
Python Exception <class 'ValueError'> invalid literal for int() with base 0: '01':
Exception occurred: Error: invalid literal for int() with base 0: '01' (<class 'ValueError'>)
For more info invoke `set exception-verbose on` and rerun the command
or debug it by yourself with `set exception-debugger on`
Python Exception <class 'ValueError'> invalid literal for int() with base 0: '01':
pwndbg> set exception-debugger on
Set whether to debug exceptions raised in Pwndbg commands to True
Traceback (most recent call last):
File "/home/user/pwndbg/pwndbg/pwndbg/events.py", line 165, in caller
func()
File "/home/user/pwndbg/pwndbg/pwndbg/memoize.py", line 44, in __call__
value = self.func(*args, **kwargs)
File "/home/user/pwndbg/pwndbg/pwndbg/stack.py", line 79, in update
page = pwndbg.memory.Page(start, stop-start, 6 if not is_executable() else 7, 0, '[stack]')
File "/home/user/pwndbg/pwndbg/pwndbg/memoize.py", line 44, in __call__
value = self.func(*args, **kwargs)
File "/home/user/pwndbg/pwndbg/pwndbg/stack.py", line 127, in is_executable
ehdr = pwndbg.elf.exe()
File "/home/user/pwndbg/pwndbg/pwndbg/proc.py", line 71, in wrapper
return func(*a, **kw)
File "/home/user/pwndbg/pwndbg/pwndbg/memoize.py", line 44, in __call__
value = self.func(*args, **kwargs)
File "/home/user/pwndbg/pwndbg/pwndbg/elf.py", line 180, in exe
e = entry()
File "/home/user/pwndbg/pwndbg/pwndbg/proc.py", line 71, in wrapper
return func(*a, **kw)
File "/home/user/pwndbg/pwndbg/pwndbg/memoize.py", line 44, in __call__
value = self.func(*args, **kwargs)
File "/home/user/pwndbg/pwndbg/pwndbg/elf.py", line 191, in entry
entry = pwndbg.auxv.get().AT_ENTRY
File "/home/user/pwndbg/pwndbg/pwndbg/memoize.py", line 44, in __call__
value = self.func(*args, **kwargs)
File "/home/user/pwndbg/pwndbg/pwndbg/auxv.py", line 106, in get
return use_info_auxv() or walk_stack() or AUXV()
File "/home/user/pwndbg/pwndbg/pwndbg/auxv.py", line 121, in use_info_auxv
const, value = int(match.group(1)), int(match.group(2), 0)
ValueError: invalid literal for int() with base 0: '01'
If that is an issue, you can report it on https://github.com/pwndbg/pwndbg/issues
(Please don't forget to search if it hasn't been reported before)
To generate the report and open a browser, you may run `bugreport --run-browser`
PS: Pull requests are welcome
> /home/user/pwndbg/pwndbg/pwndbg/auxv.py(121)use_info_auxv()
-> const, value = int(match.group(1)), int(match.group(2), 0)
(Pdb) q
Traceback (most recent call last):
File "/home/user/pwndbg/pwndbg/pwndbg/prompt.py", line 33, in prompt_hook
pwndbg.events.after_reload(start=False)
File "/home/user/pwndbg/pwndbg/pwndbg/events.py", line 216, in after_reload
f()
File "/home/user/pwndbg/pwndbg/pwndbg/events.py", line 169, in caller
raise e
File "/home/user/pwndbg/pwndbg/pwndbg/events.py", line 165, in caller
func()
File "/home/user/pwndbg/pwndbg/pwndbg/memoize.py", line 44, in __call__
value = self.func(*args, **kwargs)
File "/home/user/pwndbg/pwndbg/pwndbg/stack.py", line 79, in update
page = pwndbg.memory.Page(start, stop-start, 6 if not is_executable() else 7, 0, '[stack]')
File "/home/user/pwndbg/pwndbg/pwndbg/memoize.py", line 44, in __call__
value = self.func(*args, **kwargs)
File "/home/user/pwndbg/pwndbg/pwndbg/stack.py", line 127, in is_executable
ehdr = pwndbg.elf.exe()
File "/home/user/pwndbg/pwndbg/pwndbg/proc.py", line 71, in wrapper
return func(*a, **kw)
File "/home/user/pwndbg/pwndbg/pwndbg/memoize.py", line 44, in __call__
value = self.func(*args, **kwargs)
File "/home/user/pwndbg/pwndbg/pwndbg/elf.py", line 180, in exe
e = entry()
File "/home/user/pwndbg/pwndbg/pwndbg/proc.py", line 71, in wrapper
return func(*a, **kw)
File "/home/user/pwndbg/pwndbg/pwndbg/memoize.py", line 44, in __call__
value = self.func(*args, **kwargs)
File "/home/user/pwndbg/pwndbg/pwndbg/elf.py", line 191, in entry
entry = pwndbg.auxv.get().AT_ENTRY
File "/home/user/pwndbg/pwndbg/pwndbg/memoize.py", line 44, in __call__
value = self.func(*args, **kwargs)
File "/home/user/pwndbg/pwndbg/pwndbg/auxv.py", line 106, in get
return use_info_auxv() or walk_stack() or AUXV()
File "/home/user/pwndbg/pwndbg/pwndbg/auxv.py", line 121, in use_info_auxv
const, value = int(match.group(1)), int(match.group(2), 0)
ValueError: invalid literal for int() with base 0: '01'
pwndbg> info auxv
33 AT_SYSINFO_EHDR System-supplied DSO's ELF header 0x7ffff7fce000
16 AT_HWCAP Machine-dependent CPU capability hints 0xbfebfbff
6 AT_PAGESZ System page size 4096
17 AT_CLKTCK Frequency of times() 100
3 AT_PHDR Program headers for program 0x555555554040
4 AT_PHENT Size of program header entry 56
5 AT_PHNUM Number of program headers 13
7 AT_BASE Base address of interpreter 0x7ffff7fcf000
8 AT_FLAGS Flags 0x0
9 AT_ENTRY Entry point of program 0x555555555080
11 AT_UID Real user ID 1000
12 AT_EUID Effective user ID 1000
13 AT_GID Real group ID 1001
14 AT_EGID Effective group ID 1001
23 AT_SECURE Boolean, was exec setuid-like? 0
25 AT_RANDOM Address of 16 random bytes 0x7fffffffdff9
26 AT_HWCAP2 Extension of AT_HWCAP 0x0
31 AT_EXECFN File name of executable 0x7fffffffefde "/home/user/test/x 01/test"
15 AT_PLATFORM String identifying platform 0x7fffffffe009 "x86_64"
0 AT_NULL End of vector 0x0
Traceback (most recent call last):
File "/home/user/pwndbg/pwndbg/pwndbg/events.py", line 165, in caller
func()
File "/home/user/pwndbg/pwndbg/pwndbg/memoize.py", line 44, in __call__
value = self.func(*args, **kwargs)
File "/home/user/pwndbg/pwndbg/pwndbg/stack.py", line 79, in update
page = pwndbg.memory.Page(start, stop-start, 6 if not is_executable() else 7, 0, '[stack]')
File "/home/user/pwndbg/pwndbg/pwndbg/memoize.py", line 44, in __call__
value = self.func(*args, **kwargs)
File "/home/user/pwndbg/pwndbg/pwndbg/stack.py", line 127, in is_executable
ehdr = pwndbg.elf.exe()
File "/home/user/pwndbg/pwndbg/pwndbg/proc.py", line 71, in wrapper
return func(*a, **kw)
File "/home/user/pwndbg/pwndbg/pwndbg/memoize.py", line 44, in __call__
value = self.func(*args, **kwargs)
File "/home/user/pwndbg/pwndbg/pwndbg/elf.py", line 180, in exe
e = entry()
File "/home/user/pwndbg/pwndbg/pwndbg/proc.py", line 71, in wrapper
return func(*a, **kw)
File "/home/user/pwndbg/pwndbg/pwndbg/memoize.py", line 44, in __call__
value = self.func(*args, **kwargs)
File "/home/user/pwndbg/pwndbg/pwndbg/elf.py", line 191, in entry
entry = pwndbg.auxv.get().AT_ENTRY
File "/home/user/pwndbg/pwndbg/pwndbg/memoize.py", line 44, in __call__
value = self.func(*args, **kwargs)
File "/home/user/pwndbg/pwndbg/pwndbg/auxv.py", line 106, in get
return use_info_auxv() or walk_stack() or AUXV()
File "/home/user/pwndbg/pwndbg/pwndbg/auxv.py", line 121, in use_info_auxv
const, value = int(match.group(1)), int(match.group(2), 0)
ValueError: invalid literal for int() with base 0: '01'
> /home/user/pwndbg/pwndbg/pwndbg/auxv.py(121)use_info_auxv()
-> const, value = int(match.group(1)), int(match.group(2), 0)
(Pdb)
```
### My setup
<!--
Show us your gdb/python/pwndbg/OS/IDA Pro version (depending on your case).
NOTE: We are currently supporting only Ubuntu installations.
It is known that pwndbg is not fully working e.g. on Arch Linux (the heap stuff is not working there).
If you would like to change this situation - help us improving pwndbg and supporting other distros!
This can be displayed in pwndbg through `version` command.
If it is somehow unavailable, use:
* `show version` - for gdb
* `py import sys; print(sys.version)` - for python
* pwndbg version/git commit id
-->
Platform: Linux-5.4.0-53-generic-x86_64-with-glibc2.29
Gdb: 9.2
Python: 3.8.5 (default, Jul 28 2020, 12:59:40) [GCC 9.3.0]
Pwndbg: 1.1.0 build: 29f962c
Capstone: 4.0.1024
Unicorn: 1.0.2
This GDB was configured as follows:
configure --host=x86_64-linux-gnu --target=x86_64-linux-gnu
--with-auto-load-dir=$debugdir:$datadir/auto-load
--with-auto-load-safe-path=$debugdir:$datadir/auto-load
--with-expat
--with-gdb-datadir=/usr/share/gdb (relocatable)
--with-jit-reader-dir=/usr/lib/gdb (relocatable)
--without-libunwind-ia64
--with-lzma
--with-babeltrace
--without-intel-pt
--with-mpfr
--without-xxhash
--with-python=/usr (relocatable)
--without-guile
--disable-source-highlight
--with-separate-debug-dir=/usr/lib/debug (relocatable)
--with-system-gdbinit=/etc/gdb/gdbinit
("Relocatable" means the directory can be moved with the GDB installation
tree, and GDB will still find it.)
| [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport sys\n\nimport gdb\n\nimport pwndbg.abi\nimport pwndbg.arch\nimport pwndbg.events\nimport pwndbg.info\nimport pwndbg.memory\nimport pwndbg.qemu\nimport pwndbg.regs\nimport pwndbg.stack\nimport pwndbg.typeinfo\n\nexample_info_auxv_linux = \"\"\"\n33 AT_SYSINFO_EHDR System-supplied DSO's ELF header 0x7ffff7ffa000\n16 AT_HWCAP Machine-dependent CPU capability hints 0xfabfbff\n6 AT_PAGESZ System page size 4096\n17 AT_CLKTCK Frequency of times() 100\n3 AT_PHDR Program headers for program 0x400040\n4 AT_PHENT Size of program header entry 56\n5 AT_PHNUM Number of program headers 9\n7 AT_BASE Base address of interpreter 0x7ffff7dda000\n8 AT_FLAGS Flags 0x0\n9 AT_ENTRY Entry point of program 0x42020b\n11 AT_UID Real user ID 1000\n12 AT_EUID Effective user ID 1000\n13 AT_GID Real group ID 1000\n14 AT_EGID Effective group ID 1000\n23 AT_SECURE Boolean, was exec setuid-like? 0\n25 AT_RANDOM Address of 16 random bytes 0x7fffffffdb39\n31 AT_EXECFN File name of executable 0x7fffffffefee \"/bin/bash\"\n15 AT_PLATFORM String identifying platform 0x7fffffffdb49 \"x86_64\"\n0 AT_NULL End of vector 0x0\n\"\"\"\n\n\nAT_CONSTANTS = {\n 0 : 'AT_NULL', # /* End of vector */\n 1 : 'AT_IGNORE', # /* Entry should be ignored */\n 2 : 'AT_EXECFD', # /* File descriptor of program */\n 3 : 'AT_PHDR', # /* Program headers for program */\n 4 : 'AT_PHENT', # /* Size of program header entry */\n 5 : 'AT_PHNUM', # /* Number of program headers */\n 6 : 'AT_PAGESZ', # /* System page size */\n 7 : 'AT_BASE', # /* Base address of interpreter */\n 8 : 'AT_FLAGS', # /* Flags */\n 9 : 'AT_ENTRY', # /* Entry point of program */\n 10: 'AT_NOTELF', # /* Program is not ELF */\n 11: 'AT_UID', # /* Real uid */\n 12: 'AT_EUID', # /* Effective uid */\n 13: 'AT_GID', # /* Real gid */\n 14: 'AT_EGID', # /* Effective gid */\n 15: 'AT_PLATFORM', # /* String identifying platform */\n 16: 'AT_HWCAP', # /* Machine dependent hints about processor capabilities */\n 17: 'AT_CLKTCK', # /* Frequency of times() */\n 18: 'AT_FPUCW',\n 19: 'AT_DCACHEBSIZE',\n 20: 'AT_ICACHEBSIZE',\n 21: 'AT_UCACHEBSIZE',\n 22: 'AT_IGNOREPPC',\n 23: 'AT_SECURE',\n 24: 'AT_BASE_PLATFORM', # String identifying real platforms\n 25: 'AT_RANDOM', # Address of 16 random bytes\n 31: 'AT_EXECFN', # Filename of executable\n 32: 'AT_SYSINFO',\n 33: 'AT_SYSINFO_EHDR',\n 34: 'AT_L1I_CACHESHAPE',\n 35: 'AT_L1D_CACHESHAPE',\n 36: 'AT_L2_CACHESHAPE',\n 37: 'AT_L3_CACHESHAPE',\n}\n\nsys.modules[__name__].__dict__.update({v:k for k,v in AT_CONSTANTS.items()})\n\n\n\nclass AUXV(dict):\n def __init__(self):\n for field in AT_CONSTANTS.values():\n self[field] = None\n def set(self, const, value):\n name = AT_CONSTANTS.get(const, \"AT_UNKNOWN%i\" % const)\n\n if name in ['AT_EXECFN', 'AT_PLATFORM']:\n try:\n value = gdb.Value(value)\n value = value.cast(pwndbg.typeinfo.pchar)\n value = value.string()\n except:\n value = 'couldnt read AUXV!'\n\n self[name] = value\n def __getattr__(self, attr):\n return self[attr]\n def __str__(self):\n return str({k:v for k,v in self.items() if v is not None})\n\[email protected]_on_objfile\ndef get():\n return use_info_auxv() or walk_stack() or AUXV()\n\ndef use_info_auxv():\n lines = pwndbg.info.auxv().splitlines()\n\n if not lines:\n return None\n\n auxv = AUXV()\n for line in lines:\n match = re.match('([0-9]+) .* (0x[0-9a-f]+|[0-9]+)', line)\n if not match:\n print(\"Warning: Skipping auxv entry '{}'\".format(line))\n continue\n\n const, value = int(match.group(1)), int(match.group(2), 0)\n auxv.set(const, value)\n\n return auxv\n\n\ndef find_stack_boundary(addr):\n # For real binaries, we can just use pwndbg.memory.find_upper_boundary\n # to search forward until we walk off the end of the stack.\n #\n # Unfortunately, qemu-user emulation likes to paste the stack right\n # before binaries in memory. This means that we walk right past the\n # stack and to the end of some random ELF.\n #\n # In order to mitigate this, we search page-by-page until either:\n #\n # 1) We get a page fault, and stop\n # 2) We find an ELF header, and stop\n addr = pwndbg.memory.page_align(int(addr))\n try:\n while True:\n if b'\\x7fELF' == pwndbg.memory.read(addr, 4):\n break\n addr += pwndbg.memory.PAGE_SIZE\n except gdb.MemoryError:\n pass\n return addr\n\ndef walk_stack():\n if not pwndbg.abi.linux:\n return None\n if pwndbg.qemu.is_qemu_kernel():\n return None\n\n auxv = walk_stack2(0)\n\n if not auxv:\n # For whatever reason, sometimes the ARM AUXV under qemu-user is\n # not aligned properly.\n auxv = walk_stack2(1)\n\n if not auxv.get('AT_EXECFN', None):\n try:\n auxv['AT_EXECFN'] = _get_execfn()\n except gdb.MemoryError:\n pass\n\n return auxv\n\ndef walk_stack2(offset=0):\n sp = pwndbg.regs.sp\n\n if not sp:\n return AUXV()\n\n #\n # Strategy looks like this:\n #\n # 1) Find the end of the stack.\n # 2) Scan backward from the end of the stack until we find what\n # could be an AT_NULL entry (two consecutive ULONGs)\n # 3) Scan back a little further until we find what could be an\n # AT_ENTRY entry.\n # 4) Keep scanning back until we find something that isn't in the\n # set of known AT_ enums.\n # 5) Vacuum up between the two.\n #\n end = find_stack_boundary(sp)\n p = gdb.Value(end).cast(pwndbg.typeinfo.ulong.pointer())\n\n p -= offset\n\n # So we don't walk off the end of the stack\n p -= 2\n\n # Find a ~guess at where AT_NULL is.\n #\n # Coming up from the end of the stack, there will be a\n # marker at the end which is a single ULONG of zeroes, and then\n # the ARGV and ENVP data.\n #\n # Assuming that the ARGV and ENVP data is formed normally,\n # (i.e. doesn't include 8-16 consecutive zero-length args)\n # this should land us at the *END* of AUXV, which is the\n # AT_NULL vector.\n while p.dereference() != 0 or (p+1).dereference() != 0:\n p -= 2\n\n # Now we want to continue until we fine, at a minumum, AT_BASE.\n # While there's no guarantee that this exists, I've not ever found\n # an instance when it doesn't.\n #\n # This check is needed because the above loop isn't\n # guaranteed to actually get us to AT_NULL, just to some\n # consecutive NULLs. QEMU is pretty generous with NULLs.\n for i in range(1024):\n if p.dereference() == AT_BASE:\n break\n p -= 2\n else:\n return AUXV()\n\n # If we continue to p back, we should bump into the\n # very end of ENVP (and perhaps ARGV if ENVP is empty).\n #\n # The highest value for the vector is AT_SYSINFO_EHDR, 33.\n while (p-2).dereference() < 37:\n p -= 2\n\n # Scan them into our structure\n auxv = AUXV()\n while True:\n const = int((p+0).dereference()) & pwndbg.arch.ptrmask\n value = int((p+1).dereference()) & pwndbg.arch.ptrmask\n\n if const == AT_NULL:\n break\n\n auxv.set(const, value)\n p += 2\n\n return auxv\n\ndef _get_execfn():\n # If the stack is not sane, this won't work\n if not pwndbg.memory.peek(pwndbg.regs.sp):\n return\n\n # QEMU does not put AT_EXECFN in the Auxiliary Vector\n # on the stack.\n #\n # However, it does put it at the very top of the stack.\n #\n # 32c:1960| 0x7fffffffefe0 <-- '/home/user/pwndbg/ld....'\n # 32d:1968| 0x7fffffffefe8 <-- 'er/pwndbg/ld.so'\n # 32e:1970| 0x7fffffffeff0 <-- 0x6f732e646c2f67 /* 'g/ld.so' */\n # 32f:1978| 0x7fffffffeff8 <-- 0\n # 330:1980| 0x7ffffffff000\n addr = pwndbg.stack.find_upper_stack_boundary(pwndbg.regs.sp)\n\n while pwndbg.memory.byte(addr-1) == 0:\n addr -= 1\n\n while pwndbg.memory.byte(addr-1) != 0:\n addr -= 1\n\n v = pwndbg.strings.get(addr, 1024)\n if v:\n return os.path.abspath(v)\n",
"path": "pwndbg/auxv.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport sys\n\nimport gdb\n\nimport pwndbg.abi\nimport pwndbg.arch\nimport pwndbg.events\nimport pwndbg.info\nimport pwndbg.memory\nimport pwndbg.qemu\nimport pwndbg.regs\nimport pwndbg.stack\nimport pwndbg.typeinfo\n\nexample_info_auxv_linux = \"\"\"\n33 AT_SYSINFO_EHDR System-supplied DSO's ELF header 0x7ffff7ffa000\n16 AT_HWCAP Machine-dependent CPU capability hints 0xfabfbff\n6 AT_PAGESZ System page size 4096\n17 AT_CLKTCK Frequency of times() 100\n3 AT_PHDR Program headers for program 0x400040\n4 AT_PHENT Size of program header entry 56\n5 AT_PHNUM Number of program headers 9\n7 AT_BASE Base address of interpreter 0x7ffff7dda000\n8 AT_FLAGS Flags 0x0\n9 AT_ENTRY Entry point of program 0x42020b\n11 AT_UID Real user ID 1000\n12 AT_EUID Effective user ID 1000\n13 AT_GID Real group ID 1000\n14 AT_EGID Effective group ID 1000\n23 AT_SECURE Boolean, was exec setuid-like? 0\n25 AT_RANDOM Address of 16 random bytes 0x7fffffffdb39\n31 AT_EXECFN File name of executable 0x7fffffffefee \"/bin/bash\"\n15 AT_PLATFORM String identifying platform 0x7fffffffdb49 \"x86_64\"\n0 AT_NULL End of vector 0x0\n\"\"\"\n\n\nAT_CONSTANTS = {\n 0 : 'AT_NULL', # /* End of vector */\n 1 : 'AT_IGNORE', # /* Entry should be ignored */\n 2 : 'AT_EXECFD', # /* File descriptor of program */\n 3 : 'AT_PHDR', # /* Program headers for program */\n 4 : 'AT_PHENT', # /* Size of program header entry */\n 5 : 'AT_PHNUM', # /* Number of program headers */\n 6 : 'AT_PAGESZ', # /* System page size */\n 7 : 'AT_BASE', # /* Base address of interpreter */\n 8 : 'AT_FLAGS', # /* Flags */\n 9 : 'AT_ENTRY', # /* Entry point of program */\n 10: 'AT_NOTELF', # /* Program is not ELF */\n 11: 'AT_UID', # /* Real uid */\n 12: 'AT_EUID', # /* Effective uid */\n 13: 'AT_GID', # /* Real gid */\n 14: 'AT_EGID', # /* Effective gid */\n 15: 'AT_PLATFORM', # /* String identifying platform */\n 16: 'AT_HWCAP', # /* Machine dependent hints about processor capabilities */\n 17: 'AT_CLKTCK', # /* Frequency of times() */\n 18: 'AT_FPUCW',\n 19: 'AT_DCACHEBSIZE',\n 20: 'AT_ICACHEBSIZE',\n 21: 'AT_UCACHEBSIZE',\n 22: 'AT_IGNOREPPC',\n 23: 'AT_SECURE',\n 24: 'AT_BASE_PLATFORM', # String identifying real platforms\n 25: 'AT_RANDOM', # Address of 16 random bytes\n 31: 'AT_EXECFN', # Filename of executable\n 32: 'AT_SYSINFO',\n 33: 'AT_SYSINFO_EHDR',\n 34: 'AT_L1I_CACHESHAPE',\n 35: 'AT_L1D_CACHESHAPE',\n 36: 'AT_L2_CACHESHAPE',\n 37: 'AT_L3_CACHESHAPE',\n}\n\nsys.modules[__name__].__dict__.update({v:k for k,v in AT_CONSTANTS.items()})\n\n\n\nclass AUXV(dict):\n def __init__(self):\n for field in AT_CONSTANTS.values():\n self[field] = None\n def set(self, const, value):\n name = AT_CONSTANTS.get(const, \"AT_UNKNOWN%i\" % const)\n\n if name in ['AT_EXECFN', 'AT_PLATFORM']:\n try:\n value = gdb.Value(value)\n value = value.cast(pwndbg.typeinfo.pchar)\n value = value.string()\n except:\n value = 'couldnt read AUXV!'\n\n self[name] = value\n def __getattr__(self, attr):\n return self[attr]\n def __str__(self):\n return str({k:v for k,v in self.items() if v is not None})\n\[email protected]_on_objfile\ndef get():\n return use_info_auxv() or walk_stack() or AUXV()\n\ndef use_info_auxv():\n lines = pwndbg.info.auxv().splitlines()\n\n if not lines:\n return None\n\n auxv = AUXV()\n for line in lines:\n match = re.match('([0-9]+) .*? (0x[0-9a-f]+|[0-9]+)', line)\n if not match:\n print(\"Warning: Skipping auxv entry '{}'\".format(line))\n continue\n\n const, value = int(match.group(1)), int(match.group(2), 0)\n auxv.set(const, value)\n\n return auxv\n\n\ndef find_stack_boundary(addr):\n # For real binaries, we can just use pwndbg.memory.find_upper_boundary\n # to search forward until we walk off the end of the stack.\n #\n # Unfortunately, qemu-user emulation likes to paste the stack right\n # before binaries in memory. This means that we walk right past the\n # stack and to the end of some random ELF.\n #\n # In order to mitigate this, we search page-by-page until either:\n #\n # 1) We get a page fault, and stop\n # 2) We find an ELF header, and stop\n addr = pwndbg.memory.page_align(int(addr))\n try:\n while True:\n if b'\\x7fELF' == pwndbg.memory.read(addr, 4):\n break\n addr += pwndbg.memory.PAGE_SIZE\n except gdb.MemoryError:\n pass\n return addr\n\ndef walk_stack():\n if not pwndbg.abi.linux:\n return None\n if pwndbg.qemu.is_qemu_kernel():\n return None\n\n auxv = walk_stack2(0)\n\n if not auxv:\n # For whatever reason, sometimes the ARM AUXV under qemu-user is\n # not aligned properly.\n auxv = walk_stack2(1)\n\n if not auxv.get('AT_EXECFN', None):\n try:\n auxv['AT_EXECFN'] = _get_execfn()\n except gdb.MemoryError:\n pass\n\n return auxv\n\ndef walk_stack2(offset=0):\n sp = pwndbg.regs.sp\n\n if not sp:\n return AUXV()\n\n #\n # Strategy looks like this:\n #\n # 1) Find the end of the stack.\n # 2) Scan backward from the end of the stack until we find what\n # could be an AT_NULL entry (two consecutive ULONGs)\n # 3) Scan back a little further until we find what could be an\n # AT_ENTRY entry.\n # 4) Keep scanning back until we find something that isn't in the\n # set of known AT_ enums.\n # 5) Vacuum up between the two.\n #\n end = find_stack_boundary(sp)\n p = gdb.Value(end).cast(pwndbg.typeinfo.ulong.pointer())\n\n p -= offset\n\n # So we don't walk off the end of the stack\n p -= 2\n\n # Find a ~guess at where AT_NULL is.\n #\n # Coming up from the end of the stack, there will be a\n # marker at the end which is a single ULONG of zeroes, and then\n # the ARGV and ENVP data.\n #\n # Assuming that the ARGV and ENVP data is formed normally,\n # (i.e. doesn't include 8-16 consecutive zero-length args)\n # this should land us at the *END* of AUXV, which is the\n # AT_NULL vector.\n while p.dereference() != 0 or (p+1).dereference() != 0:\n p -= 2\n\n # Now we want to continue until we fine, at a minumum, AT_BASE.\n # While there's no guarantee that this exists, I've not ever found\n # an instance when it doesn't.\n #\n # This check is needed because the above loop isn't\n # guaranteed to actually get us to AT_NULL, just to some\n # consecutive NULLs. QEMU is pretty generous with NULLs.\n for i in range(1024):\n if p.dereference() == AT_BASE:\n break\n p -= 2\n else:\n return AUXV()\n\n # If we continue to p back, we should bump into the\n # very end of ENVP (and perhaps ARGV if ENVP is empty).\n #\n # The highest value for the vector is AT_SYSINFO_EHDR, 33.\n while (p-2).dereference() < 37:\n p -= 2\n\n # Scan them into our structure\n auxv = AUXV()\n while True:\n const = int((p+0).dereference()) & pwndbg.arch.ptrmask\n value = int((p+1).dereference()) & pwndbg.arch.ptrmask\n\n if const == AT_NULL:\n break\n\n auxv.set(const, value)\n p += 2\n\n return auxv\n\ndef _get_execfn():\n # If the stack is not sane, this won't work\n if not pwndbg.memory.peek(pwndbg.regs.sp):\n return\n\n # QEMU does not put AT_EXECFN in the Auxiliary Vector\n # on the stack.\n #\n # However, it does put it at the very top of the stack.\n #\n # 32c:1960| 0x7fffffffefe0 <-- '/home/user/pwndbg/ld....'\n # 32d:1968| 0x7fffffffefe8 <-- 'er/pwndbg/ld.so'\n # 32e:1970| 0x7fffffffeff0 <-- 0x6f732e646c2f67 /* 'g/ld.so' */\n # 32f:1978| 0x7fffffffeff8 <-- 0\n # 330:1980| 0x7ffffffff000\n addr = pwndbg.stack.find_upper_stack_boundary(pwndbg.regs.sp)\n\n while pwndbg.memory.byte(addr-1) == 0:\n addr -= 1\n\n while pwndbg.memory.byte(addr-1) != 0:\n addr -= 1\n\n v = pwndbg.strings.get(addr, 1024)\n if v:\n return os.path.abspath(v)\n",
"path": "pwndbg/auxv.py"
}
] | diff --git a/pwndbg/auxv.py b/pwndbg/auxv.py
index 17e28b18f23..517c77ddd8b 100644
--- a/pwndbg/auxv.py
+++ b/pwndbg/auxv.py
@@ -113,7 +113,7 @@ def use_info_auxv():
auxv = AUXV()
for line in lines:
- match = re.match('([0-9]+) .* (0x[0-9a-f]+|[0-9]+)', line)
+ match = re.match('([0-9]+) .*? (0x[0-9a-f]+|[0-9]+)', line)
if not match:
print("Warning: Skipping auxv entry '{}'".format(line))
continue
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.