ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7dfb8906618ef3c771e4700e5e33ab56eb41ae7d | # Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import (
TYPE_CHECKING,
Dict,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from matrix_common.versionstring import get_distribution_version_string
from typing_extensions import Literal
from synapse.api.constants import EduTypes
from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import RoomVersions
from synapse.api.urls import FEDERATION_UNSTABLE_PREFIX, FEDERATION_V2_PREFIX
from synapse.federation.transport.server._base import (
Authenticator,
BaseFederationServlet,
)
from synapse.http.servlet import (
parse_boolean_from_args,
parse_integer_from_args,
parse_string_from_args,
parse_strings_from_args,
)
from synapse.types import JsonDict
from synapse.util.ratelimitutils import FederationRateLimiter
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
issue_8631_logger = logging.getLogger("synapse.8631_debug")
class BaseFederationServerServlet(BaseFederationServlet):
"""Abstract base class for federation servlet classes which provides a federation server handler.
See BaseFederationServlet for more information.
"""
def __init__(
self,
hs: "HomeServer",
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self.handler = hs.get_federation_server()
class FederationSendServlet(BaseFederationServerServlet):
PATH = "/send/(?P<transaction_id>[^/]*)/?"
# We ratelimit manually in the handler as we queue up the requests and we
# don't want to fill up the ratelimiter with blocked requests.
RATELIMIT = False
# This is when someone is trying to send us a bunch of data.
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
transaction_id: str,
) -> Tuple[int, JsonDict]:
"""Called on PUT /send/<transaction_id>/
Args:
transaction_id: The transaction_id associated with this request. This
is *not* None.
Returns:
Tuple of `(code, response)`, where
`response` is a python dict to be converted into JSON that is
used as the response body.
"""
# Parse the request
try:
transaction_data = content
logger.debug("Decoded %s: %s", transaction_id, str(transaction_data))
logger.info(
"Received txn %s from %s. (PDUs: %d, EDUs: %d)",
transaction_id,
origin,
len(transaction_data.get("pdus", [])),
len(transaction_data.get("edus", [])),
)
if issue_8631_logger.isEnabledFor(logging.DEBUG):
DEVICE_UPDATE_EDUS = [
EduTypes.DEVICE_LIST_UPDATE,
EduTypes.SIGNING_KEY_UPDATE,
]
device_list_updates = [
edu.get("content", {})
for edu in transaction_data.get("edus", [])
if edu.get("edu_type") in DEVICE_UPDATE_EDUS
]
if device_list_updates:
issue_8631_logger.debug(
"received transaction [%s] including device list updates: %s",
transaction_id,
device_list_updates,
)
except Exception as e:
logger.exception(e)
return 400, {"error": "Invalid transaction"}
code, response = await self.handler.on_incoming_transaction(
origin, transaction_id, self.server_name, transaction_data
)
return code, response
class FederationEventServlet(BaseFederationServerServlet):
PATH = "/event/(?P<event_id>[^/]*)/?"
# This is when someone asks for a data item for a given server data_id pair.
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
event_id: str,
) -> Tuple[int, Union[JsonDict, str]]:
return await self.handler.on_pdu_request(origin, event_id)
class FederationStateV1Servlet(BaseFederationServerServlet):
PATH = "/state/(?P<room_id>[^/]*)/?"
# This is when someone asks for all data for a given room.
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
return await self.handler.on_room_state_request(
origin,
room_id,
parse_string_from_args(query, "event_id", None, required=True),
)
class FederationStateIdsServlet(BaseFederationServerServlet):
PATH = "/state_ids/(?P<room_id>[^/]*)/?"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
return await self.handler.on_state_ids_request(
origin,
room_id,
parse_string_from_args(query, "event_id", None, required=True),
)
class FederationBackfillServlet(BaseFederationServerServlet):
PATH = "/backfill/(?P<room_id>[^/]*)/?"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
versions = [x.decode("ascii") for x in query[b"v"]]
limit = parse_integer_from_args(query, "limit", None)
if not limit:
return 400, {"error": "Did not include limit param"}
return await self.handler.on_backfill_request(origin, room_id, versions, limit)
class FederationTimestampLookupServlet(BaseFederationServerServlet):
"""
API endpoint to fetch the `event_id` of the closest event to the given
timestamp (`ts` query parameter) in the given direction (`dir` query
parameter).
Useful for other homeservers when they're unable to find an event locally.
`ts` is a timestamp in milliseconds where we will find the closest event in
the given direction.
`dir` can be `f` or `b` to indicate forwards and backwards in time from the
given timestamp.
GET /_matrix/federation/unstable/org.matrix.msc3030/timestamp_to_event/<roomID>?ts=<timestamp>&dir=<direction>
{
"event_id": ...
}
"""
PATH = "/timestamp_to_event/(?P<room_id>[^/]*)/?"
PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc3030"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
timestamp = parse_integer_from_args(query, "ts", required=True)
direction = parse_string_from_args(
query, "dir", default="f", allowed_values=["f", "b"], required=True
)
return await self.handler.on_timestamp_to_event_request(
origin, room_id, timestamp, direction
)
class FederationQueryServlet(BaseFederationServerServlet):
PATH = "/query/(?P<query_type>[^/]*)"
# This is when we receive a server-server Query
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
query_type: str,
) -> Tuple[int, JsonDict]:
args = {k.decode("utf8"): v[0].decode("utf-8") for k, v in query.items()}
args["origin"] = origin
return await self.handler.on_query_request(query_type, args)
class FederationMakeJoinServlet(BaseFederationServerServlet):
PATH = "/make_join/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
"""
Args:
origin: The authenticated server_name of the calling server
content: (GETs don't have bodies)
query: Query params from the request.
**kwargs: the dict mapping keys to path components as specified in
the path match regexp.
Returns:
Tuple of (response code, response object)
"""
supported_versions = parse_strings_from_args(query, "ver", encoding="utf-8")
if supported_versions is None:
supported_versions = ["1"]
result = await self.handler.on_make_join_request(
origin, room_id, user_id, supported_versions=supported_versions
)
return 200, result
class FederationMakeLeaveServlet(BaseFederationServerServlet):
PATH = "/make_leave/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
result = await self.handler.on_make_leave_request(origin, room_id, user_id)
return 200, result
class FederationV1SendLeaveServlet(BaseFederationServerServlet):
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
event_id: str,
) -> Tuple[int, Tuple[int, JsonDict]]:
result = await self.handler.on_send_leave_request(origin, content, room_id)
return 200, (200, result)
class FederationV2SendLeaveServlet(BaseFederationServerServlet):
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
PREFIX = FEDERATION_V2_PREFIX
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
event_id: str,
) -> Tuple[int, JsonDict]:
result = await self.handler.on_send_leave_request(origin, content, room_id)
return 200, result
class FederationMakeKnockServlet(BaseFederationServerServlet):
PATH = "/make_knock/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
# Retrieve the room versions the remote homeserver claims to support
supported_versions = parse_strings_from_args(
query, "ver", required=True, encoding="utf-8"
)
result = await self.handler.on_make_knock_request(
origin, room_id, user_id, supported_versions=supported_versions
)
return 200, result
class FederationV1SendKnockServlet(BaseFederationServerServlet):
PATH = "/send_knock/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
event_id: str,
) -> Tuple[int, JsonDict]:
result = await self.handler.on_send_knock_request(origin, content, room_id)
return 200, result
class FederationEventAuthServlet(BaseFederationServerServlet):
PATH = "/event_auth/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
event_id: str,
) -> Tuple[int, JsonDict]:
return await self.handler.on_event_auth(origin, room_id, event_id)
class FederationV1SendJoinServlet(BaseFederationServerServlet):
PATH = "/send_join/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
event_id: str,
) -> Tuple[int, Tuple[int, JsonDict]]:
# TODO(paul): assert that event_id parsed from path actually
# match those given in content
result = await self.handler.on_send_join_request(origin, content, room_id)
return 200, (200, result)
class FederationV2SendJoinServlet(BaseFederationServerServlet):
PATH = "/send_join/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
PREFIX = FEDERATION_V2_PREFIX
def __init__(
self,
hs: "HomeServer",
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self._msc3706_enabled = hs.config.experimental.msc3706_enabled
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
event_id: str,
) -> Tuple[int, JsonDict]:
# TODO(paul): assert that event_id parsed from path actually
# match those given in content
partial_state = False
if self._msc3706_enabled:
partial_state = parse_boolean_from_args(
query, "org.matrix.msc3706.partial_state", default=False
)
result = await self.handler.on_send_join_request(
origin, content, room_id, caller_supports_partial_state=partial_state
)
return 200, result
class FederationV1InviteServlet(BaseFederationServerServlet):
PATH = "/invite/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
event_id: str,
) -> Tuple[int, Tuple[int, JsonDict]]:
# We don't get a room version, so we have to assume its EITHER v1 or
# v2. This is "fine" as the only difference between V1 and V2 is the
# state resolution algorithm, and we don't use that for processing
# invites
result = await self.handler.on_invite_request(
origin, content, room_version_id=RoomVersions.V1.identifier
)
# V1 federation API is defined to return a content of `[200, {...}]`
# due to a historical bug.
return 200, (200, result)
class FederationV2InviteServlet(BaseFederationServerServlet):
PATH = "/invite/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
PREFIX = FEDERATION_V2_PREFIX
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
event_id: str,
) -> Tuple[int, JsonDict]:
# TODO(paul): assert that room_id/event_id parsed from path actually
# match those given in content
room_version = content["room_version"]
event = content["event"]
invite_room_state = content["invite_room_state"]
# Synapse expects invite_room_state to be in unsigned, as it is in v1
# API
event.setdefault("unsigned", {})["invite_room_state"] = invite_room_state
result = await self.handler.on_invite_request(
origin, event, room_version_id=room_version
)
return 200, result
class FederationThirdPartyInviteExchangeServlet(BaseFederationServerServlet):
PATH = "/exchange_third_party_invite/(?P<room_id>[^/]*)"
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
await self.handler.on_exchange_third_party_invite_request(content)
return 200, {}
class FederationClientKeysQueryServlet(BaseFederationServerServlet):
PATH = "/user/keys/query"
async def on_POST(
self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
) -> Tuple[int, JsonDict]:
return await self.handler.on_query_client_keys(origin, content)
class FederationUserDevicesQueryServlet(BaseFederationServerServlet):
PATH = "/user/devices/(?P<user_id>[^/]*)"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
user_id: str,
) -> Tuple[int, JsonDict]:
return await self.handler.on_query_user_devices(origin, user_id)
class FederationClientKeysClaimServlet(BaseFederationServerServlet):
PATH = "/user/keys/claim"
async def on_POST(
self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
) -> Tuple[int, JsonDict]:
response = await self.handler.on_claim_client_keys(origin, content)
return 200, response
class FederationGetMissingEventsServlet(BaseFederationServerServlet):
# TODO(paul): Why does this path alone end with "/?" optional?
PATH = "/get_missing_events/(?P<room_id>[^/]*)/?"
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
limit = int(content.get("limit", 10))
earliest_events = content.get("earliest_events", [])
latest_events = content.get("latest_events", [])
result = await self.handler.on_get_missing_events(
origin,
room_id=room_id,
earliest_events=earliest_events,
latest_events=latest_events,
limit=limit,
)
return 200, result
class On3pidBindServlet(BaseFederationServerServlet):
PATH = "/3pid/onbind"
REQUIRE_AUTH = False
async def on_POST(
self, origin: Optional[str], content: JsonDict, query: Dict[bytes, List[bytes]]
) -> Tuple[int, JsonDict]:
if "invites" in content:
last_exception = None
for invite in content["invites"]:
try:
if "signed" not in invite or "token" not in invite["signed"]:
message = (
"Rejecting received notification of third-"
"party invite without signed: %s" % (invite,)
)
logger.info(message)
raise SynapseError(400, message)
await self.handler.exchange_third_party_invite(
invite["sender"],
invite["mxid"],
invite["room_id"],
invite["signed"],
)
except Exception as e:
last_exception = e
if last_exception:
raise last_exception
return 200, {}
class FederationVersionServlet(BaseFederationServlet):
PATH = "/version"
REQUIRE_AUTH = False
async def on_GET(
self,
origin: Optional[str],
content: Literal[None],
query: Dict[bytes, List[bytes]],
) -> Tuple[int, JsonDict]:
return (
200,
{
"server": {
"name": "Synapse",
"version": get_distribution_version_string("matrix-synapse"),
}
},
)
class FederationRoomHierarchyServlet(BaseFederationServlet):
PATH = "/hierarchy/(?P<room_id>[^/]*)"
def __init__(
self,
hs: "HomeServer",
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self.handler = hs.get_room_summary_handler()
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Mapping[bytes, Sequence[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
suggested_only = parse_boolean_from_args(query, "suggested_only", default=False)
return 200, await self.handler.get_federation_hierarchy(
origin, room_id, suggested_only
)
class RoomComplexityServlet(BaseFederationServlet):
"""
Indicates to other servers how complex (and therefore likely
resource-intensive) a public room this server knows about is.
"""
PATH = "/rooms/(?P<room_id>[^/]*)/complexity"
PREFIX = FEDERATION_UNSTABLE_PREFIX
def __init__(
self,
hs: "HomeServer",
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self._store = self.hs.get_datastores().main
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
is_public = await self._store.is_room_world_readable_or_publicly_joinable(
room_id
)
if not is_public:
raise SynapseError(404, "Room not found", errcode=Codes.INVALID_PARAM)
complexity = await self._store.get_room_complexity(room_id)
return 200, complexity
class FederationAccountStatusServlet(BaseFederationServerServlet):
PATH = "/query/account_status"
PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc3720"
def __init__(
self,
hs: "HomeServer",
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self._account_handler = hs.get_account_handler()
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Mapping[bytes, Sequence[bytes]],
room_id: str,
) -> Tuple[int, JsonDict]:
if "user_ids" not in content:
raise SynapseError(
400, "Required parameter 'user_ids' is missing", Codes.MISSING_PARAM
)
statuses, failures = await self._account_handler.get_account_statuses(
content["user_ids"],
allow_remote=False,
)
return 200, {"account_statuses": statuses, "failures": failures}
FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
FederationSendServlet,
FederationEventServlet,
FederationStateV1Servlet,
FederationStateIdsServlet,
FederationBackfillServlet,
FederationTimestampLookupServlet,
FederationQueryServlet,
FederationMakeJoinServlet,
FederationMakeLeaveServlet,
FederationEventServlet,
FederationV1SendJoinServlet,
FederationV2SendJoinServlet,
FederationV1SendLeaveServlet,
FederationV2SendLeaveServlet,
FederationV1InviteServlet,
FederationV2InviteServlet,
FederationGetMissingEventsServlet,
FederationEventAuthServlet,
FederationClientKeysQueryServlet,
FederationUserDevicesQueryServlet,
FederationClientKeysClaimServlet,
FederationThirdPartyInviteExchangeServlet,
On3pidBindServlet,
FederationVersionServlet,
RoomComplexityServlet,
FederationRoomHierarchyServlet,
FederationV1SendKnockServlet,
FederationMakeKnockServlet,
FederationAccountStatusServlet,
)
|
py | 7dfb899927d1b156e83454569eb917abc7141240 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .availability_set import *
from .dedicated_host import *
from .dedicated_host_group import *
from .disk import *
from .gallery import *
from .gallery_application import *
from .gallery_application_version import *
from .gallery_image import *
from .gallery_image_version import *
from .get_availability_set import *
from .get_dedicated_host import *
from .get_dedicated_host_group import *
from .get_disk import *
from .get_gallery import *
from .get_gallery_application import *
from .get_gallery_application_version import *
from .get_gallery_image import *
from .get_gallery_image_version import *
from .get_image import *
from .get_log_analytic_export_request_rate_by_interval import *
from .get_log_analytic_export_throttled_requests import *
from .get_proximity_placement_group import *
from .get_snapshot import *
from .get_virtual_machine import *
from .get_virtual_machine_extension import *
from .get_virtual_machine_scale_set import *
from .get_virtual_machine_scale_set_extension import *
from .get_virtual_machine_scale_set_vm import *
from .image import *
from .proximity_placement_group import *
from .snapshot import *
from .virtual_machine import *
from .virtual_machine_extension import *
from .virtual_machine_scale_set import *
from .virtual_machine_scale_set_extension import *
from .virtual_machine_scale_set_vm import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:compute/v20190301:AvailabilitySet":
return AvailabilitySet(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:compute/v20190301:DedicatedHost":
return DedicatedHost(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:compute/v20190301:DedicatedHostGroup":
return DedicatedHostGroup(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:compute/v20190301:Disk":
return Disk(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:compute/v20190301:Gallery":
return Gallery(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:compute/v20190301:GalleryApplication":
return GalleryApplication(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:compute/v20190301:GalleryApplicationVersion":
return GalleryApplicationVersion(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:compute/v20190301:GalleryImage":
return GalleryImage(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:compute/v20190301:GalleryImageVersion":
return GalleryImageVersion(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:compute/v20190301:Image":
return Image(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:compute/v20190301:ProximityPlacementGroup":
return ProximityPlacementGroup(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:compute/v20190301:Snapshot":
return Snapshot(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:compute/v20190301:VirtualMachine":
return VirtualMachine(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:compute/v20190301:VirtualMachineExtension":
return VirtualMachineExtension(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:compute/v20190301:VirtualMachineScaleSet":
return VirtualMachineScaleSet(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:compute/v20190301:VirtualMachineScaleSetExtension":
return VirtualMachineScaleSetExtension(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:compute/v20190301:VirtualMachineScaleSetVM":
return VirtualMachineScaleSetVM(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "compute/v20190301", _module_instance)
_register_module()
|
py | 7dfb8a57fde259068faed120a3c7a0ec9e8b551c | #!/usr/bin/env python
"""
Copyright (c) 2019 CIIRC, CTU in Prague
All rights reserved.
This source code is licensed under the BSD-3-Clause license found in the
LICENSE file in the root directory of this source tree.
@author: Zdenek Kasner
"""
import logging
from typing import Any, List
from nltk import ParentedTree
from nlp_crow.modules.CrowModule import CrowModule
import nltk
from nlp_crow.structures.tagging.MorphCategory import POS
from nlp_crow.structures.tagging.ParsedText import ParsedText, TaggedText, ParseTreeNode, TaggedToken
from nlp_crow.structures.tagging.Tag import Tag
#from scripts.test_grammar import get_parse_tree
class NLTK:
def tag(self, text : str) -> TaggedText:
"""
Tags a text.
Parameters
----------
text an input text as string
Returns
-------
a tagged text object
"""
tagged_text = TaggedText()
tokens = nltk.word_tokenize(text)
for pair in nltk.pos_tag(tokens):
tag = Tag()
tag.pos = POS(value=pair[1])
tagged_text.add_tagged_token(token=pair[0], tag=tag)
return tagged_text
class GrammarParser(CrowModule):
"""
Tags and parses the text
"""
def __init__(self):
self.logger = logging.getLogger(__name__)
self.nltk_tagger = NLTK()
def parse(self, sentence : str) -> ParsedText:
"""
Currently used for dummy text parsing. After the text is tagged, it is split on "and" and "." tokens
into sentences. Each sentence has its tokens hanged under an "S" node.
TODO: swap with the parse() method which relies on a grammar
Parameters
----------
sentence an input sentence as a string
Returns
-------
parsed text
"""
# use NLTK for tagging
tagged_text = self.nltk_tagger.tag(sentence)
# create a new object for parsed text
parsed_text = ParsedText()
# save the original text
parsed_text.orig_text = sentence
# create the root of the tree
root = ParseTreeNode(label="T")
parsed_text.parse_tree = root
# create a parent node for the first sentence
sentence_node = ParseTreeNode(label="S")
# sequentially process the tagged tokens
for tagged_token in tagged_text.get_tokens_with_tags():
if tagged_token.token in ["and", "."]:
# in case there is a previous sentence
if sentence_node.subnodes:
# append the previous sentence node under the root node
root.subnodes.append(sentence_node)
# and start a new sentence
sentence_node = ParseTreeNode(label="S")
# append the separating token under the root node
root.subnodes.append(tagged_token)
else:
# append the token to the current sentence
sentence_node.subnodes.append(tagged_token)
if sentence_node.subnodes:
# finalize the last sentence
root.subnodes.append(sentence_node)
self.logger.debug(f"Parsed text: {parsed_text}")
return parsed_text
# TODO this method should be used in the future, relies on a grammar
# def parse(self, sentence : str) -> ParsedText:
# tree = get_parse_tree(sentence)
#
# tokens = nltk.word_tokenize(sentence)
#
# root, _ = self.transform_recursive(tree, tokens)
#
# parsed_text = ParsedText()
# parsed_text.orig_text = sentence
# parsed_text.parse_tree = root
#
# self.logger.debug(f"Parsed text: {parsed_text}")
#
# return parsed_text
def transform_recursive(self, node : Any, tokens : List):
"""
Recursively transforms the tree from the format of the grammar parser to the format used in the NL processing.
Parameters
----------
node a node to be processed - can be either a ParentedTree object or a string
(for the first call this should be the tree root)
tokens a list of tokens (not provided in the tree from the grammar parser)
Returns
-------
the recursively transformed node, the list of remaining tokens
"""
if type(node) == ParentedTree:
return self.transform_node(node, tokens)
elif type(node) == str:
return self.transform_tag(node, tokens[0]), tokens[1:]
def transform_node(self, node, tokens):
"""
Transforms a node by recursively calling transform_recursive() on its subnodes.
"""
label = node._label
parse_tree_node = ParseTreeNode(label=label)
for subnode in node:
parse_tree_subnode, tokens = self.transform_recursive(subnode, tokens)
parse_tree_node.subnodes.append(parse_tree_subnode)
return parse_tree_node, tokens
def transform_tag(self, node, token):
"""
Transforms a single token and its tag (in the string form) into a tagged token.
"""
tagged_token = TaggedToken()
tagged_token.token = token
tagged_token.tag = Tag(pos=POS(node))
return tagged_token |
py | 7dfb8aaf699a92e25e5a3b9235c1a546070b4a66 | import sys
import os
import cv2
from keras.models import load_model
import numpy as np
from face_classification.src.utils.datasets import get_labels
from face_classification.src.utils.inference import detect_faces
from face_classification.src.utils.inference import draw_text
from face_classification.src.utils.inference import draw_bounding_box
from face_classification.src.utils.inference import apply_offsets
from face_classification.src.utils.inference import load_detection_model
from face_classification.src.utils.inference import load_image
from face_classification.src.utils.preprocessor import preprocess_input
import logging
logger = logging.getLogger('FaceEvaluator')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
class FaceEvaluator:
emotion_labels = get_labels('fer2013')
gender_labels = get_labels('imdb')
font = cv2.FONT_HERSHEY_SIMPLEX
# hyper-parameters for bounding boxes shape
gender_offsets = (30, 60)
gender_offsets = (10, 10)
emotion_offsets = (20, 40)
emotion_offsets = (0, 0)
def load_params(cd):
# parameters for loading data and images
FaceEvaluator.detection_model_path = os.path.join(cd, 'models/face_classification/trained_models/detection_models/haarcascade_frontalface_default.xml')
FaceEvaluator.emotion_model_path = os.path.join(cd, 'models/face_classification/trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5')
FaceEvaluator.gender_model_path = os.path.join(cd, 'models/face_classification/trained_models/gender_models/simple_CNN.81-0.96.hdf5')
# loading models
FaceEvaluator.face_detection = load_detection_model(FaceEvaluator.detection_model_path)
FaceEvaluator.emotion_classifier = load_model(FaceEvaluator.emotion_model_path, compile=False)
FaceEvaluator.gender_classifier = load_model(FaceEvaluator.gender_model_path, compile=False)
# getting input model shapes for inference
FaceEvaluator.emotion_target_size = FaceEvaluator.emotion_classifier.input_shape[1:3]
FaceEvaluator.gender_target_size = FaceEvaluator.gender_classifier.input_shape[1:3]
@staticmethod
def describe_face(npimg):
# loading images
rgb_image = cv2.cvtColor(npimg, cv2.COLOR_BGR2RGB)
gray_image = cv2.cvtColor(npimg, cv2.COLOR_BGR2GRAY)
#gray_image = npimg.convert('LA')
#gray_image = np.squeeze(gray_image)
#gray_image = gray_image.astype('uint8')
#cv2.imwrite('c:/users/andre/documents/github/hcEye/test/name.png', gray_image)
faces = detect_faces(FaceEvaluator.face_detection, gray_image)
face_features = []
for face_coordinates in faces:
x1, x2, y1, y2 = apply_offsets(face_coordinates, FaceEvaluator.gender_offsets)
rgb_face = rgb_image[y1:y2, x1:x2]
x1, x2, y1, y2 = apply_offsets(face_coordinates, FaceEvaluator.emotion_offsets)
gray_face = gray_image[y1:y2, x1:x2]
try:
rgb_face = cv2.resize(rgb_face, (FaceEvaluator.gender_target_size))
gray_face = cv2.resize(gray_face, (FaceEvaluator.emotion_target_size))
except:
continue
rgb_face = preprocess_input(rgb_face, False)
rgb_face = np.expand_dims(rgb_face, 0)
#cv2.imwrite('c:/users/andre/documents/github/hcEye/test/face.png', gray_face)
gender_prediction = FaceEvaluator.gender_classifier.predict(rgb_face)
gender_label_arg = np.argmax(gender_prediction)
gender_text = FaceEvaluator.gender_labels[gender_label_arg]
gray_face = preprocess_input(gray_face, True)
gray_face = np.expand_dims(gray_face, 0)
gray_face = np.expand_dims(gray_face, -1)
emotion_prediction = FaceEvaluator.emotion_classifier.predict(gray_face)
emotion_label_arg = np.argmax(emotion_prediction)
emotion_text = FaceEvaluator.emotion_labels[emotion_label_arg]
FaceEvaluator.emotion_probability = np.max(emotion_prediction)
face_features.append([face_coordinates.tolist(), gender_text, emotion_text])
#print(face_features)
return face_features
|
py | 7dfb8d8bc089c9016567102746ead1de6d81f6d7 | import numpy as np
import torch, math
import torch.nn as nn
from onmt.modules.BaseModel import DecoderState
from onmt.modules.Transformer.Models import TransformerDecodingState
from collections import defaultdict
import torch.nn.functional as F
class FusionNetwork(nn.Module):
"""Main model in 'Attention is all you need' """
def __init__(self, tm_model, lm_model):
super(FusionNetwork, self).__init__()
self.tm_model = tm_model
self.lm_model = lm_model
# freezing the parameters for the language model
for param in self.lm_model.parameters():
param.requires_grad = False
def forward(self, batch):
"""
Inputs Shapes:
src: len_src x batch_size
tgt: len_tgt x batch_size
Outputs Shapes:
out: batch_size*len_tgt x model_size
"""
nmt_output_dict = self.tm_model(batch)
# no gradient for the LM side
with torch.no_grad():
lm_output_dict = self.lm_model(batch)
output_dict = defaultdict(lambda: None)
output_dict['tm'] = nmt_output_dict
output_dict['lm'] = lm_output_dict
return output_dict
# an utility function to fuse two states
# return log prob
def fuse_states(self, tm_state, lm_state):
# PRENORM algorithm
# (1) generate the log P_lm
with torch.no_grad():
log_lm = self.lm_model.generator[0](lm_state, log_softmax=True)
# (2) generate the logits for tm
tm_logits = self.tm_model.generator[0](tm_state, log_softmax=False)
# (3) add the bias of lm to the logits
dists = F.log_softmax(tm_logits + log_lm, dim=-1)
# ## POSTNORM
# # (1) generate the P_lm
# with torch.no_grad():
# lm_logits = self.lm_model.generator[0](lm_state, log_softmax=False)
#
# # (2) generate the logits for tm
# tm_logits = self.tm_model.generator[0](tm_state, log_softmax=False)
#
# dists = F.log_softmax(F.softmax(tm_logits, dim=-1) * F.softmax(lm_logits, dim=-1), dim=-1)
return dists
def renew_buffer(self, new_len):
self.tm_model.decoder.renew_buffer(new_len)
self.lm_model.decoder.renew_buffer(new_len)
def decode(self, batch):
"""
:param batch: (onmt.Dataset.Batch) an object containing tensors needed for training
:return: gold_scores (torch.Tensor) log probs for each sentence
gold_words (Int) the total number of non-padded tokens
allgold_scores (list of Tensors) log probs for each word in the sentence
"""
src = batch.get('source')
tgt_input = batch.get('target_input')
tgt_output = batch.get('target_output')
# transpose to have batch first
src = src.transpose(0, 1)
tgt_input = tgt_input.transpose(0, 1)
batch_size = tgt_input.size(0)
# (1) we decode using language model
context = self.tm_model.encoder(src)['context']
if (hasattr(self,
'autoencoder') and self.autoencoder and self.autoencoder.representation == "EncoderHiddenState"):
context = self.autoencoder.autocode(context)
decoder_output = self.tm_model.decoder(tgt_input, context, src)['hidden']
output = decoder_output
if (hasattr(self, 'autoencoder')
and self.autoencoder and self.autoencoder.representation == "DecoderHiddenState"):
output = self.autoencoder.autocode(output)
gold_scores = context.new(batch_size).zero_()
gold_words = 0
allgold_scores = list()
# (2) decode using the language model
lm_decoder_output = self.lm_model.decoder(tgt_input)['hidden']
for dec_t, lm_t, tgt_t in zip(decoder_output, lm_decoder_output, tgt_output):
# generate the current step distribution from both states
gen_t = self.fuse_states(dec_t, lm_t)
tgt_t = tgt_t.unsqueeze(1)
scores = gen_t.gather(1, tgt_t)
scores.masked_fill_(tgt_t.eq(onmt.Constants.PAD), 0)
gold_scores += scores.squeeze(1).type_as(gold_scores)
gold_words += tgt_t.ne(onmt.Constants.PAD).sum().item()
allgold_scores.append(scores.squeeze(1).type_as(gold_scores))
return gold_words, gold_scores, allgold_scores
def step(self, input_t, decoder_state):
"""
Decoding function:
generate new decoder output based on the current input and current decoder state
the decoder state is updated in the process
:param input_t: the input word index at time t
:param decoder_state: object FusionDecoderState containing the buffers required for decoding
:return: a dictionary containing: log-prob output and the attention coverage
"""
# (1) decode using the translation model
tm_hidden, coverage = self.tm_model.decoder.step(input_t, decoder_state.tm_state)
# (2) decode using the translation model
lm_hidden, ________ = self.lm_model.decoder.step(input_t, decoder_state.lm_state)
log_prob = self.fuse_states(tm_hidden, lm_hidden)
# log_prob = self.tm_model.generator[0](tm_hidden)
last_coverage = coverage[:, -1, :].squeeze(1)
output_dict = defaultdict(lambda: None)
output_dict['log_prob'] = log_prob
output_dict['coverage'] = last_coverage
return output_dict
def create_decoder_state(self, batch, beam_size=1):
"""
Generate a new decoder state based on the batch input
:param batch: Batch object (may not contain target during decoding)
:param beam_size: Size of beam used in beam search
:return:
"""
tm_decoder_state = self.tm_model.create_decoder_state(batch, beam_size=beam_size)
lm_decoder_state = self.lm_model.create_decoder_state(batch, beam_size=beam_size)
decoder_state = FusionDecodingState(tm_decoder_state, lm_decoder_state)
return decoder_state
class FusionDecodingState(DecoderState):
def __init__(self, tm_state, lm_state):
self.tm_state = tm_state
self.lm_state = lm_state
self.original_src = tm_state.original_src
self.beam_size = tm_state.beam_size
def update_beam(self, beam, b, remaining_sents, idx):
self.tm_state.update_beam(beam, b, remaining_sents, idx)
self.lm_state.update_beam(beam, b, remaining_sents, idx)
# in this section, the sentences that are still active are
# compacted so that the decoder is not run on completed sentences
def prune_complete_beam(self, active_idx, remaining_sents):
self.tm_state.prune_complete_beam(active_idx, remaining_sents)
self.lm_state.prune_complete_beam(active_idx, remaining_sents)
|
py | 7dfb8ef04bd5799ccab905c8102fe422e6e91368 | #!/usr/bin/python
# Given (Q)CNF file, read number of variables, and list them in either forward or reverse order
import sys
def usage(name):
sys.stderr.write("Usage: %s [-r] FILE.(q)cnf\n" % name)
def trim(s):
while len(s) > 0 and s[-1] == '\n':
s = s[:-1]
return s
def error(msg):
sys.stderr.write("ERROR: %s\n" % msg)
sys.exit(1)
def doit(cname, reverse):
nvars = 0
try:
cfile = open(cname, 'r')
except:
error("Couldn't open (Q)CNF file '%s'" % cname)
for line in cfile:
line = trim(line)
fields = line.split()
if len(fields) == 0:
continue
elif fields[0] == 'c':
continue
elif fields[0] == 'p':
try:
nvars = int(fields[2])
except:
error("Couldn't read line '%s'" % line)
break
else:
error("Unrecognized line before header: '%s'" % line)
return
cfile.close()
if nvars == 0:
error("Didn't determine number of variables")
slist = [str(i) for i in range(1,nvars+1)]
if reverse:
slist.reverse()
print(" ".join(slist))
def run(name, args):
reverse = False
index = 0
if len(args) < 1 or len(args) > 2:
usage(name)
return
if args[0][0] == '-':
if args[0][1] == 'r':
reverse = True
index += 1
else:
error("Unrecognized option '%s'" % args[0])
cname = args[index]
doit(cname, reverse)
if __name__ == "__main__":
run(sys.argv[0], sys.argv[1:])
|
py | 7dfb8f49a087ad5b921a5186a02a30acecf4369b | """
@Brief: convert data extracted from Soundpaces from Colmap format to NeRF format
@Author: Ty Nguyen
@Email: [email protected]
"""
from builtins import breakpoint
import os
import numpy as np
from utils.get_rgbds_options import get_args
from utils.colmap_read_write import read_model
from utils.colmap_read_write import get_single_cam_params
from utils.colmap_read_write import get_cvCam2W_transformations
def gen_pose(basedir, ext=".txt", order_poses_by_image_names=True):
"""
@Brief: generate NeRF poses.
This is modified from the original code and this thread: https://github.com/Fyusion/LLFF/issues/10
"""
colmap_data_root = os.path.join(basedir, "map")
colmap_cameras, colmap_images = read_model(colmap_data_root, ext)
# Get camera intrinsics parameters
# TODO: handle multiple camera cases.
# For now, assume there is only a single camera
hwf_params = get_single_cam_params(colmap_cameras)
# Get OpenCV cam to world transformations
cvCam2W_mats, near_far_distances, sorrted_image_names = get_cvCam2W_transformations(
colmap_images,
order_poses_by_image_names=order_poses_by_image_names,
get_image_names=True,
) # (Num_poses, 4 x 4)
# Get poses in NeRF format (3x5 x num_images)
poses = cvCam2W_mats[:, :3, :4].transpose([1, 2, 0]) # 3 x 4 x num_images
# Concatenate poses with hwf (camera parameters)
poses = np.concatenate(
[poses, np.tile(hwf_params[..., np.newaxis], [1, 1, poses.shape[-1]])], 1
)
# Poses now is 3x5 x num_images where the first 4 columns represent the cvCam2W matrix, the
# last column represents h,w,f
# Swap columns to represent the transformation from
# must switch to [-u, r, -t] from [r, -u, t], NOT [r, u, -t]
# The following column swapping will swap from order 0,1,2,3,4 to 1,0,(-2),3,4.
# This is equivalent to right multiplication of the current rotations with
# [[0,1,0],[1,0,0],[0,0,-1]] a rotation matrix from the "weird" coord (down, right, backward) that I'm refering as NeRF coord
# to the OpenCV coord (right, down, toward). Not sure why excluding the translation values in this swapping
poses = np.concatenate(
[
poses[:, 1:2, :],
poses[:, 0:1, :],
-poses[:, 2:3, :],
poses[:, 3:4, :],
poses[:, 4:5, :],
],
1,
)
# Flatten this matrix
poses = poses.transpose([2, 0, 1]) # num_images x 3 x 5
poses = poses.reshape([-1, 15]) # num_images x 15
# Combine the two to get num_images x 15 array
nerf_poses = np.column_stack([poses, near_far_distances])
# Save
np.save(os.path.join(basedir, "poses_bounds.npy"), nerf_poses)
print(f"[Info] Saved nerf poses to {basedir}/poses_bounds.npy")
with open(
os.path.join(basedir, "image_names_corresponding_to_poses_bounds.txt"), "w"
) as fin:
for image_name in sorrted_image_names:
fin.write(image_name + "\n")
print(
f"[Info] Saved image names for poses to {basedir}/image_names_corresponding_to_poses_bounds.txt"
)
if __name__ == "__main__":
args = get_args("replica")
scene_obs_dir = os.path.join(args.data_saving_root, args.scene)
gen_pose(scene_obs_dir, ".txt", order_poses_by_image_names=True)
|
py | 7dfb8fa9243629a5144fb39a8085c1d325528a09 | from .api_client import Bit2c_client
from .Symbol import Symbol
import requests
# This is very important
# bit2c api knows to work only with ip v4, this is how we force requests to use it
requests.packages.urllib3.util.connection.HAS_IPV6 = False
|
py | 7dfb906eaf15e208b07cc601807cd12aeaff443d | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Jaime Irurzun <[email protected]>
# Pablo Orduña <[email protected]>
#
import numbers
from sqlalchemy.orm.exc import NoResultFound
import voodoo.log as log
from voodoo.log import logged
from voodoo.typechecker import typecheck
from weblab.db import db
import weblab.db.model as model
from weblab.data import ValidDatabaseSessionId
from weblab.data.command import Command
import weblab.data.dto.experiments as ExperimentAllowed
from weblab.data.experiments import ExperimentUsage, CommandSent, FileSent
import weblab.core.exc as DbErrors
import weblab.permissions as permissions
DEFAULT_VALUE = object()
class DatabaseGateway(object):
forbidden_access = 'forbidden_access'
def __init__(self, cfg_manager):
super(DatabaseGateway, self).__init__()
self.cfg_manager = cfg_manager
self.Session, self.engine = db.initialize(cfg_manager)
@typecheck(basestring)
@logged()
def get_user_by_name(self, user_login):
session = self.Session()
try:
return self._get_user(session, user_login).to_dto()
finally:
session.close()
@logged()
def list_clients(self):
"""Lists the ExperimentClients """
session = self.Session()
try:
clients = {}
for experiment in session.query(model.DbExperiment).all():
exp = experiment.to_business()
clients[exp.name, exp.category.name] = exp.client
return clients
finally:
session.close()
@logged()
def get_client_id(self, experiment_name, category_name):
"""Lists the ExperimentClients """
session = self.Session()
try:
category = session.query(model.DbExperimentCategory).filter_by(name = category_name).first()
if category is None:
return None
experiment = session.query(model.DbExperiment).filter_by(name = experiment_name, category = category).first()
if experiment is None:
return None
return experiment.client
finally:
session.close()
# @typecheck(basestring, (basestring, None), (basestring, None))
@logged()
def list_experiments(self, user_login, exp_name = None, cat_name = None):
session = self.Session()
try:
user = self._get_user(session, user_login)
user_permissions = self._gather_permissions(session, user, 'experiment_allowed')
grouped_experiments = {}
for permission in user_permissions:
p_permanent_id = self._get_parameter_from_permission(session, permission, 'experiment_permanent_id')
p_category_id = self._get_parameter_from_permission(session, permission, 'experiment_category_id')
p_time_allowed = self._get_float_parameter_from_permission(session, permission, 'time_allowed')
p_priority = self._get_int_parameter_from_permission(session, permission, 'priority', ExperimentAllowed.DEFAULT_PRIORITY)
p_initialization_in_accounting = self._get_bool_parameter_from_permission(session, permission, 'initialization_in_accounting', ExperimentAllowed.DEFAULT_INITIALIZATION_IN_ACCOUNTING)
# If a filter is passed, ignore those permissions on other experiments
if cat_name is not None and exp_name is not None:
if p_category_id != cat_name or p_permanent_id != exp_name:
continue
experiments = [ exp for exp in session.query(model.DbExperiment).filter_by(name=p_permanent_id).all() if exp.category.name == p_category_id ]
if len(experiments) == 0:
continue
experiment = experiments[0]
if isinstance(permission, model.DbUserPermission):
permission_scope = 'user'
elif isinstance(permission, model.DbGroupPermission):
permission_scope = 'group'
elif isinstance(permission, model.DbRolePermission):
permission_scope = 'role'
else:
permission_scope = 'unknown'
experiment_allowed = ExperimentAllowed.ExperimentAllowed(experiment.to_business(), p_time_allowed, p_priority, p_initialization_in_accounting, permission.permanent_id, permission.id, permission_scope)
experiment_unique_id = p_permanent_id+"@"+p_category_id
if experiment_unique_id in grouped_experiments:
grouped_experiments[experiment_unique_id].append(experiment_allowed)
else:
grouped_experiments[experiment_unique_id] = [experiment_allowed]
# If any experiment is duplicated, only the less restrictive one is given
experiments = []
for experiment_unique_id in grouped_experiments:
less_restrictive_experiment_allowed = grouped_experiments[experiment_unique_id][0]
for experiment_allowed in grouped_experiments[experiment_unique_id]:
if experiment_allowed.time_allowed > less_restrictive_experiment_allowed.time_allowed:
less_restrictive_experiment_allowed = experiment_allowed
experiments.append(less_restrictive_experiment_allowed)
experiments.sort(lambda x,y: cmp(x.experiment.category.name, y.experiment.category.name))
return tuple(experiments)
finally:
session.close()
@typecheck(basestring)
@logged()
def is_access_forward(self, user_login):
session = self.Session()
try:
user = self._get_user(session, user_login)
user_permissions = self._gather_permissions(session, user, 'access_forward')
return len(user_permissions) > 0
finally:
session.close()
@typecheck(basestring)
@logged()
def is_admin(self, user_login):
session = self.Session()
try:
user = self._get_user(session, user_login)
user_permissions = self._gather_permissions(session, user, 'admin_panel_access')
return len(user_permissions) > 0
finally:
session.close()
@typecheck(basestring)
@logged()
def is_instructor(self, user_login):
session = self.Session()
try:
user = self._get_user(session, user_login)
admin_permissions = self._gather_permissions(session, user, 'admin_panel_access')
instructor_permissions = self._gather_permissions(session, user, 'instructor_of_group')
return user.role.name == 'instructor' or len(admin_permissions) > 0 or len(instructor_permissions) > 0
finally:
session.close()
@typecheck(basestring, ExperimentUsage)
@logged()
def store_experiment_usage(self, user_login, experiment_usage):
session = self.Session()
try:
use = model.DbUserUsedExperiment(
self._get_user(session, user_login),
self._get_experiment(session, experiment_usage.experiment_id.exp_name, experiment_usage.experiment_id.cat_name),
experiment_usage.start_date,
experiment_usage.from_ip,
experiment_usage.coord_address.address,
experiment_usage.reservation_id,
experiment_usage.end_date,
)
session.add(use)
# TODO: The c.response of an standard command is an object with
# a commandstring, whereas the response to an async command is
# a simple string to identify the request. The way in which the logger
# currently handles these cases is somewhat shady.
for c in experiment_usage.commands:
# If we have a response, the c.response will be an object and not
# a string. Generally, we will, unless the command was asynchronous
# and it didn't finish executing.
if type(c.response) != type(""):
session.add(model.DbUserCommand(
use,
c.command.commandstring,
c.timestamp_before,
c.response.commandstring,
c.timestamp_after
))
else:
# In this other case, the response is a string, which means
# that we have not updated it with the real response. Probably,
# it was an asynchronous command which did not finish executing
# by the time the experiment ended.
session.add(model.DbUserCommand(
use,
c.command.commandstring,
c.timestamp_before,
"[RESPONSE NOT AVAILABLE]",
c.timestamp_after
))
for f in experiment_usage.sent_files:
if f.is_loaded():
saved = f.save(self.cfg_manager, experiment_usage.reservation_id)
else:
saved = f
session.add(model.DbUserFile(
use,
saved.file_path,
saved.file_hash,
saved.timestamp_before,
saved.file_info,
saved.response.commandstring,
saved.timestamp_after
))
permission_scope = experiment_usage.request_info.pop('permission_scope')
permission_id = experiment_usage.request_info.pop('permission_id')
if permission_scope == 'group':
use.group_permission_id = permission_id
elif permission_scope == 'user':
use.user_permission_id = permission_id
elif permission_scope == 'role':
use.role_permission_id = permission_id
for reservation_info_key in experiment_usage.request_info:
db_key = session.query(model.DbUserUsedExperimentProperty).filter_by(name = reservation_info_key).first()
if db_key is None:
db_key = model.DbUserUsedExperimentProperty(reservation_info_key)
session.add(db_key)
value = experiment_usage.request_info[reservation_info_key]
session.add(model.DbUserUsedExperimentPropertyValue( unicode(value), db_key, use ))
session.commit()
finally:
session.close()
@typecheck(basestring, float, CommandSent)
@logged()
def finish_experiment_usage(self, reservation_id, end_date, last_command ):
session = self.Session()
try:
user_used_experiment = session.query(model.DbUserUsedExperiment).filter_by(reservation_id = reservation_id).first()
if user_used_experiment is None:
return False
user_used_experiment.set_end_date(end_date)
session.add(user_used_experiment)
session.add(model.DbUserCommand(
user_used_experiment,
last_command.command.commandstring,
last_command.timestamp_before,
last_command.response.commandstring,
last_command.timestamp_after
))
session.commit()
return True
finally:
session.close()
@logged()
def store_commands(self, complete_commands, command_requests, command_responses, complete_files, file_requests, file_responses):
""" Stores all the commands in a single transaction; retrieving the ids of the file and command requests """
request_mappings = {
# entry_id : command_id
}
session = self.Session()
try:
db_commands_and_files = []
for reservation_id, entry_id, command in complete_commands:
db_command = self._append_command(session, reservation_id, command)
if db_command == False:
request_mappings[entry_id] = False
for reservation_id, entry_id, command in complete_files:
db_file = self._append_file(session, reservation_id, command)
if db_file == False:
request_mappings[entry_id] = False
for entry_id in command_requests:
reservation_id, command = command_requests[entry_id]
db_command = self._append_command(session, reservation_id, command)
if db_command == False:
request_mappings[entry_id] = False
else:
db_commands_and_files.append((entry_id, db_command))
for entry_id in file_requests:
reservation_id, file = file_requests[entry_id]
db_file = self._append_file(session, reservation_id, file)
if db_file == False:
request_mappings[entry_id] = False
else:
db_commands_and_files.append((entry_id, db_file))
for entry_id, command_id, response, timestamp in command_responses:
if not self._update_command(session, command_id, response, timestamp):
request_mappings[entry_id] = False
for entry_id, file_id, response, timestamp in file_responses:
if not self._update_file(session, file_id, response, timestamp):
request_mappings[entry_id] = False
session.commit()
for entry_id, db_command in db_commands_and_files:
request_mappings[entry_id] = db_command.id
finally:
session.close()
return request_mappings
@typecheck(basestring, CommandSent)
@logged()
def append_command(self, reservation_id, command ):
session = self.Session()
try:
db_command = self._append_command(session, reservation_id, command)
session.commit()
return db_command.id
finally:
session.close()
def _append_command(self, session, reservation_id, command):
user_used_experiment = session.query(model.DbUserUsedExperiment).filter_by(reservation_id = reservation_id).first()
if user_used_experiment is None:
return False
db_command = model.DbUserCommand(
user_used_experiment,
command.command.commandstring,
command.timestamp_before,
command.response.commandstring if command.response is not None else None,
command.timestamp_after
)
session.add(db_command)
return db_command
@typecheck(numbers.Integral, Command, float)
@logged()
def update_command(self, command_id, response, end_timestamp ):
session = self.Session()
try:
if self._update_command(session, command_id, response, end_timestamp):
session.commit()
return True
return False
finally:
session.close()
def _update_command(self, session, command_id, response, end_timestamp):
db_command = session.query(model.DbUserCommand).filter_by(id = command_id).first()
if db_command is None:
return False
db_command.response = response.commandstring if response is not None else None
db_command.set_timestamp_after(end_timestamp)
session.add(db_command)
return True
@typecheck(basestring, FileSent)
@logged()
def append_file(self, reservation_id, file_sent):
session = self.Session()
try:
db_file_sent = self._append_file(session, reservation_id, file_sent)
session.commit()
return db_file_sent.id
finally:
session.close()
def _append_file(self, session, reservation_id, file_sent):
user_used_experiment = session.query(model.DbUserUsedExperiment).filter_by(reservation_id = reservation_id).first()
if user_used_experiment is None:
return False
db_file_sent = model.DbUserFile(
user_used_experiment,
file_sent.file_path,
file_sent.file_hash,
file_sent.timestamp_before,
file_sent.file_info,
file_sent.response.commandstring if file_sent.response is not None else None,
file_sent.timestamp_after
)
session.add(db_file_sent)
return db_file_sent
@typecheck(numbers.Integral, Command, float)
@logged()
def update_file(self, file_id, response, end_timestamp ):
session = self.Session()
try:
if self._update_file(session, file_id, response, end_timestamp):
session.commit()
return True
return False
finally:
session.close()
def _update_file(self, session, file_id, response, end_timestamp):
db_file_sent = session.query(model.DbUserFile).filter_by(id = file_id).first()
if db_file_sent is None:
return False
db_file_sent.response = response.commandstring if response is not None else None
db_file_sent.set_timestamp_after(end_timestamp)
session.add(db_file_sent)
return True
@logged()
def list_usages_per_user(self, user_login, first=0, limit=20):
session = self.Session()
try:
user = self._get_user(session, user_login)
uses = session.query(model.DbUserUsedExperiment).filter_by(user=user).offset(first).limit(limit).all()
return [ use.to_business_light() for use in uses ]
finally:
session.close()
@logged()
def retrieve_usage(self, usage_id):
session = self.Session()
try:
use = session.query(model.DbUserUsedExperiment).filter_by(id=usage_id).one()
return use.to_business()
finally:
session.close()
@logged()
def get_experiment_uses_by_id(self, user_login, reservation_ids):
""" Retrieve the full information of these reservation_ids, if the user has permissions to do so. By default
a user can only access to those reservations that he made in the past."""
results = []
session = self.Session()
try:
user = session.query(model.DbUser).filter_by(login = user_login).first()
if user is None:
return [self.forbidden_access] * len(reservation_ids)
for reservation_id in reservation_ids:
experiment_use = session.query(model.DbUserUsedExperiment).filter_by(reservation_id = reservation_id.id).first()
if experiment_use is None:
results.append(None)
else:
if experiment_use.user == user:
results.append(experiment_use.to_business())
else:
results.append(self.forbidden_access)
finally:
session.close()
return results
@logged()
def get_user_permissions(self, user_login):
session = self.Session()
try:
user = self._get_user(session, user_login)
user_permissions = []
for pt in permissions.permission_types:
user_permissions.extend(self._gather_permissions(session, user, pt))
dto_permissions = [ permission.to_dto() for permission in user_permissions ]
return tuple(dto_permissions)
finally:
session.close()
def _get_user(self, session, user_login):
try:
return session.query(model.DbUser).filter_by(login=user_login).one()
except NoResultFound:
raise DbErrors.DbProvidedUserNotFoundError("Unable to find a User with the provided login: '%s'" % user_login)
def _get_experiment(self, session, exp_name, cat_name):
try:
return session.query(model.DbExperiment) \
.filter(model.DbExperimentCategory.name == cat_name) \
.filter_by(name=exp_name).one()
except NoResultFound:
raise DbErrors.DbProvidedExperimentNotFoundError("Unable to find an Experiment with the provided unique id: '%s@%s'" % (exp_name, cat_name))
def _gather_groups_permissions(self, session, group, permission_type_name, permissions, remaining_list):
if group.id in remaining_list:
return
remaining_list.append(group.id)
self._add_or_replace_permissions(permissions, self._get_permissions(session, group, permission_type_name))
if group.parent is not None:
self._gather_groups_permissions(session, group.parent, permission_type_name, permissions, remaining_list)
def _gather_permissions(self, session, user, permission_type_name):
permissions = []
self._add_or_replace_permissions(permissions, self._get_permissions(session, user.role, permission_type_name))
remaining_list = []
for group in user.groups:
self._gather_groups_permissions(session, group, permission_type_name, permissions, remaining_list)
self._add_or_replace_permissions(permissions, self._get_permissions(session, user, permission_type_name))
return permissions
def _add_or_replace_permissions(self, permissions, permissions_to_add):
permissions.extend(permissions_to_add)
def _get_permissions(self, session, user_or_role_or_group_or_ee, permission_type_name):
return [ pi for pi in user_or_role_or_group_or_ee.permissions if pi.get_permission_type() == permission_type_name ]
def _get_parameter_from_permission(self, session, permission, parameter_name, default_value = DEFAULT_VALUE):
try:
param = [ p for p in permission.parameters if p.get_name() == parameter_name ][0]
except IndexError:
if default_value == DEFAULT_VALUE:
raise DbErrors.DbIllegalStatusError(
permission.get_permission_type() + " permission without " + parameter_name
)
else:
return default_value
return param.value
def _get_float_parameter_from_permission(self, session, permission, parameter_name, default_value = DEFAULT_VALUE):
value = self._get_parameter_from_permission(session, permission, parameter_name, default_value)
try:
return float(value)
except ValueError:
raise DbErrors.InvalidPermissionParameterFormatError(
"Expected float as parameter '%s' of '%s', found: '%s'" % (
parameter_name,
permission.get_permission_type(),
value
)
)
def _get_int_parameter_from_permission(self, session, permission, parameter_name, default_value = DEFAULT_VALUE):
value = self._get_parameter_from_permission(session, permission, parameter_name, default_value)
try:
return int(value)
except ValueError:
raise DbErrors.InvalidPermissionParameterFormatError(
"Expected int as parameter '%s' of '%s', found: '%s'" % (
parameter_name,
permission.get_permission_type(),
value
)
)
def _get_bool_parameter_from_permission(self, session, permission, parameter_name, default_value = DEFAULT_VALUE):
return self._get_parameter_from_permission(session, permission, parameter_name, default_value)
def _delete_all_uses(self):
""" IMPORTANT: SHOULD NEVER BE USED IN PRODUCTION, IT'S HERE ONLY FOR TESTS """
session = self.Session()
try:
uu = session.query(model.DbUserUsedExperiment).all()
for i in uu:
session.delete(i)
session.commit()
finally:
session.close()
def _insert_user_used_experiment(self, user_login, experiment_name, experiment_category_name, start_time, origin, coord_address, reservation_id, end_date, commands = None, files = None):
""" IMPORTANT: SHOULD NEVER BE USED IN PRODUCTION, IT'S HERE ONLY FOR TESTS """
if commands is None:
commands = []
if files is None:
files = []
session = self.Session()
try:
user = session.query(model.DbUser).filter_by(login=user_login).one()
category = session.query(model.DbExperimentCategory).filter_by(name=experiment_category_name).one()
experiment = session.query(model.DbExperiment). \
filter_by(name=experiment_name). \
filter_by(category=category).one()
experiment_id = experiment.id
exp_use = model.DbUserUsedExperiment(user, experiment, start_time, origin, coord_address, reservation_id, end_date)
session.add(exp_use)
session.commit()
return experiment_id
finally:
session.close()
@logged()
def retrieve_role_and_user_auths(self, username):
""" Retrieve the role and user auths for a given username."""
session = self.Session()
try:
try:
user = session.query(model.DbUser).filter_by(login=username).one()
except NoResultFound:
raise DbErrors.DbUserNotFoundError("User '%s' not found in database" % username)
all_user_auths = session.query(model.DbUserAuth).filter_by(user=user).all()
#
sorted_user_auths = sorted(all_user_auths, lambda x, y: cmp(x.auth.priority, y.auth.priority))
if len(sorted_user_auths) > 0:
return user.login, user.role.name, [ user_auth.to_business() for user_auth in sorted_user_auths ]
else:
raise DbErrors.DbNoUserAuthNorPasswordFoundError(
"No UserAuth found"
)
finally:
session.close()
@logged()
def check_external_credentials(self, external_id, system):
""" Given an External ID, such as the ID in Facebook or Moodle or whatever, and selecting
the system, return the first username that matches with that user_id. The method will
expect that the system uses something that starts by the id"""
session = self.Session()
try:
try:
auth_type = session.query(model.DbAuthType).filter_by(name=system).one()
if len(auth_type.auths) == 0:
raise DbErrors.DbUserNotFoundError("No instance of system '%s' found in database." % system)
except NoResultFound:
raise DbErrors.DbUserNotFoundError("System '%s' not found in database" % system)
try:
user_auth = session.query(model.DbUserAuth).filter(model.DbUserAuth.auth_id.in_([auth.id for auth in auth_type.auths]), model.DbUserAuth.configuration==external_id).one()
except NoResultFound:
raise DbErrors.DbUserNotFoundError("User '%s' not found in database" % external_id)
user = user_auth.user
return ValidDatabaseSessionId( user.login, user.role.name)
finally:
session.close()
###########################################################################
################## grant_external_credentials #########################
###########################################################################
@logged()
def grant_external_credentials(self, username, external_id, system):
""" Given a system and an external_id, grant access with those credentials for user user_id. Before calling
this method, the system has checked that this user is the owner of external_id and of user_id"""
session = self.Session()
try:
try:
auth_type = session.query(model.DbAuthType).filter_by(name=system).one()
auth = auth_type.auths[0]
except (NoResultFound, KeyError):
raise DbErrors.DbUserNotFoundError("System '%s' not found in database" % system)
try:
user = session.query(model.DbUser).filter_by(login=username).one()
except NoResultFound:
raise DbErrors.DbUserNotFoundError("User '%s' not found in database" % user)
for user_auth in user.auths:
if user_auth.auth == auth:
raise DbErrors.DbUserNotFoundError("User '%s' already has credentials in system %s" % (username, system))
user_auth = model.DbUserAuth(user = user, auth = auth, configuration=str(external_id))
session.add(user_auth)
session.commit()
finally:
session.close()
#####################################################################
################## create_external_user #########################
#####################################################################
@logged()
def create_external_user(self, external_user, external_id, system, group_names):
session = self.Session()
try:
try:
auth_type = session.query(model.DbAuthType).filter_by(name=system).one()
auth = auth_type.auths[0]
except (NoResultFound, KeyError):
raise DbErrors.DbUserNotFoundError("System '%s' not found in database" % system)
groups = []
for group_name in group_names:
try:
group = session.query(model.DbGroup).filter_by(name=group_name).one()
except NoResultFound:
raise DbErrors.DbUserNotFoundError("Group '%s' not found in database" % group_name)
groups.append(group)
try:
role = session.query(model.DbRole).filter_by(name=external_user.role.name).one()
user = model.DbUser(external_user.login, external_user.full_name, external_user.email, role = role)
user_auth = model.DbUserAuth(user, auth, configuration = external_id)
for group in groups:
group.users.append(user)
session.add(user)
session.add(user_auth)
session.commit()
except Exception as e:
log.log( DatabaseGateway, log.level.Warning, "Couldn't create user: %s" % e)
log.log_exc(DatabaseGateway, log.level.Info)
raise DbErrors.DatabaseError("Couldn't create user! Contact administrator")
finally:
session.close()
def create_gateway(cfg_manager):
return DatabaseGateway(cfg_manager)
|
py | 7dfb9150f30c51e6596aa3f642618b27e7aa1079 | from collections import deque
from dealer.dealer import Dealer
from dealer.deck import Deck
from evolution.player.player import Player
from evo_json.convert_py_json.convert_player import convert_from_pj_lop_plus, \
convert_to_pj_lop_plus
from evo_json.convert_py_json.convert_trait import convert_from_pj_loc, \
convert_to_pj_loc
def process_configuration(pyjson_config):
"""
Processes a PyJSON Configuration by calling feed1 once
:param pyjson_config: the Configuration to be updated
:type pyjson_config: PyJSON
:return: the updated Configuration
:rtype: PyJSON
"""
dealer = convert_config_to_dealer(pyjson_config)
dealer.feed1(deque(range(len(dealer.player_states))))
return convert_dealer_to_config(dealer)
def convert_config_to_dealer(pyjson_config):
"""
Converts a PyJSON configuration to a Dealer
:param pyjson_config: the Configuration
:type pyjson_config: PyJSON
:return: the dealer from the given configuration
:rtype: Dealer
"""
player_states = convert_from_pj_lop_plus(pyjson_config[0])
deck = Deck(convert_from_pj_loc(pyjson_config[2]))
wateringhole = pyjson_config[1]
return Dealer.make_dealer(player_states, deck, wateringhole)
def convert_dealer_to_config(dealer):
"""
Converts a Dealer to a PyJSON Configuration
:param dealer: the dealer to be converted
:type dealer: Dealer
:return: The Configuration of the Dealer
:rtype: PyJSON
"""
lop = convert_to_pj_lop_plus(dealer.player_states)
wateringhole = dealer.wateringhole
loc = convert_to_pj_loc(dealer.deck.loc)
return [lop, wateringhole, loc]
|
py | 7dfb921e6e890550f6ef217b6132a1a67fc6ee94 | ###
# Copyright (c) 2002-2004, Jeremiah Fincher
# Copyright (c) 2008, James Vega
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
class MathTestCase(PluginTestCase):
plugins = ('Math',)
def testBase(self):
self.assertNotRegexp('base 56 asdflkj', 'ValueError')
self.assertResponse('base 16 2 F', '1111')
self.assertResponse('base 2 16 1111', 'F')
self.assertResponse('base 20 BBBB', '92631')
self.assertResponse('base 10 20 92631', 'BBBB')
self.assertResponse('base 2 36 10', '2')
self.assertResponse('base 36 2 10', '100100')
self.assertResponse('base 2 1010101', '85')
self.assertResponse('base 2 2 11', '11')
self.assertResponse('base 12 0', '0')
self.assertResponse('base 36 2 0', '0')
self.assertNotError("base 36 " +\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ")
self.assertResponse("base 10 36 [base 36 " +\
"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"\
"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"\
"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"\
"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"\
"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"\
"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"\
"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz]",
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"\
"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ")
self.assertResponse('base 2 10 [base 10 2 12]', '12')
self.assertResponse('base 16 2 [base 2 16 110101]', '110101')
self.assertResponse('base 10 8 [base 8 76532]', '76532')
self.assertResponse('base 10 36 [base 36 csalnwea]', 'CSALNWEA')
self.assertResponse('base 5 4 [base 4 5 212231]', '212231')
self.assertError('base 37 1')
self.assertError('base 1 1')
self.assertError('base 12 1 1')
self.assertError('base 1 12 1')
self.assertError('base 1.0 12 1')
self.assertError('base A 1')
self.assertError('base 4 4')
self.assertError('base 10 12 A')
print
print "If we have not fixed a bug with Math.base, the following ",
print "tests will hang the test-suite."
self.assertRegexp('base 2 10 [base 10 2 -12]', '-12')
self.assertRegexp('base 16 2 [base 2 16 -110101]', '-110101')
def testCalc(self):
self.assertResponse('calc 5*0.06', str(5*0.06))
self.assertResponse('calc 2.0-7.0', str(2-7))
self.assertResponse('calc (-1)**.5', 'i')
self.assertResponse('calc e**(i*pi)+1', '0')
self.assertResponse('calc (-5)**.5', '2.2360679775i')
self.assertResponse('calc -((-5)**.5)', '-2.2360679775i')
self.assertNotRegexp('calc [9, 5] + [9, 10]', 'TypeError')
self.assertError('calc [9, 5] + [9, 10]')
self.assertNotError('calc degrees(2)')
self.assertNotError('calc (2 * 3) - 2*(3*4)')
self.assertNotError('calc (3) - 2*(3*4)')
self.assertNotError('calc (1600 * 1200) - 2*(1024*1280)')
self.assertNotError('calc 3-2*4')
self.assertNotError('calc (1600 * 1200)-2*(1024*1280)')
self.assertError('calc factorial(99)')
def testCalcNoNameError(self):
self.assertNotRegexp('calc foobar(x)', 'NameError')
def testCalcImaginary(self):
self.assertResponse('calc 3 + sqrt(-1)', '3+i')
def testCalcFloorWorksWithSqrt(self):
self.assertNotError('calc floor(sqrt(5))')
def testCaseInsensitive(self):
self.assertNotError('calc PI**PI')
def testCalcMaxMin(self):
self.assertResponse('calc max(1,2)', '2')
self.assertResponse('calc min(1,2)', '1')
def testCalcStrFloat(self):
self.assertResponse('calc 3+33333333333333', '33333333333336')
def testICalc(self):
self.assertResponse('icalc 1^1', '0')
self.assertResponse('icalc 10**24', '1' + '0'*24)
self.assertRegexp('icalc 49/6', '8.16')
self.assertNotError('icalc factorial(99)')
def testRpn(self):
self.assertResponse('rpn 5 2 +', '7')
self.assertResponse('rpn 1 2 3 +', 'Stack: [1, 5]')
self.assertResponse('rpn 1 dup', 'Stack: [1, 1]')
self.assertResponse('rpn 2 3 4 + -', str(2-7))
self.assertNotError('rpn 2 degrees')
def testRpnSwap(self):
self.assertResponse('rpn 1 2 swap', 'Stack: [2, 1]')
def testRpmNoSyntaxError(self):
self.assertNotRegexp('rpn 2 3 foobar', 'SyntaxError')
def testConvert(self):
self.assertResponse('convert 1 m to cm', '100')
self.assertResponse('convert m to cm', '100')
self.assertResponse('convert 3 metres to km', '0.003')
self.assertResponse('convert 32 F to C', '0')
self.assertResponse('convert 32 C to F', '89.6')
self.assertResponse('convert [calc 2*pi] rad to degree', '360')
self.assertResponse('convert amu to atomic mass unit',
'1')
self.assertResponse('convert [calc 2*pi] rad to circle', '1')
self.assertError('convert 1 meatball to bananas')
self.assertError('convert 1 gram to meatballs')
self.assertError('convert 1 mol to grams')
self.assertError('convert 1 m to kpa')
def testConvertSingularPlural(self):
self.assertResponse('convert [calc 2*pi] rads to degrees', '360')
self.assertResponse('convert 1 carat to grams', '0.2')
self.assertResponse('convert 10 lbs to oz', '160')
self.assertResponse('convert mA to amps', '0.001')
def testConvertCaseSensitivity(self):
self.assertError('convert MA to amps')
self.assertError('convert M to amps')
self.assertError('convert Radians to rev')
def testUnits(self):
self.assertNotError('units')
self.assertNotError('units mass')
self.assertNotError('units flux density')
def testAbs(self):
self.assertResponse('calc abs(2)', '2')
self.assertResponse('calc abs(-2)', '2')
self.assertResponse('calc abs(2.0)', '2')
self.assertResponse('calc abs(-2.0)', '2')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
py | 7dfb925061887440e19eccd3b30f2df9a8c6bca4 | from .udkanbun2spacy import UDKanbunLanguage,UDKanbunTokenizer,load,to_conllu
|
py | 7dfb929b57d70127e1d06497d3083325debafdb7 | import json
def get_radical_records(fp):
for l in fp.readlines():
if not l.startswith('#'):
symbols = l.split()
yield symbols[0], symbols[2:]
def generate_shared_radicals(kanji, radicals_table, significant=4):
def get_radical_scores():
for l in kanji:
for r in kanji:
if l != r:
if l in radicals_table and r in radicals_table:
common = len(radicals_table[l] & radicals_table[r])
if common >= significant:
yield "{}:{}".format(l, r), common
shared_radical_counts = {kanji: rad_match for kanji, rad_match in get_radical_scores()}
return shared_radical_counts
if __name__ == '__main__':
with open('../data/kradfile-u.txt') as f:
radicals = {kanji: set(radicals) for kanji, radicals in get_radical_records(f)}
shared_counts = generate_shared_radicals(radicals.keys(), radicals)
with open('../data/radical_scores.json', 'w+') as j:
json.dump(shared_counts, j)
|
py | 7dfb934077e81e3f85f73e150114581220e4b05e | from bbpyp.lexicomb.parser.binary_operator import BinaryOperator
from bbpyp.lexicomb.parser.model.operator_enum import OperatorEnum
class ArithmeticBinaryOperator(BinaryOperator):
BLANK_SPACE_SENTINEL = None
def __init__(self, operator, real_number_factory, string_factory, *args, **kwargs):
super().__init__(operator, *args, **kwargs)
self._real_number_factory = real_number_factory
self._string_factory = string_factory
def __repr__(self):
return f"({self.operator} {self.lhs} {self.rhs})"
def _eval(self, frame):
lhs_value = self.lhs.eval(frame)
rhs_value = self.rhs.eval(frame)
value = None
if self.operator.value == OperatorEnum.ARITHMETIC_ADDITION.value:
try:
lhs_value = " " if lhs_value == ArithmeticBinaryOperator.BLANK_SPACE_SENTINEL else lhs_value
rhs_value = " " if rhs_value == ArithmeticBinaryOperator.BLANK_SPACE_SENTINEL else rhs_value
value = lhs_value + rhs_value
except TypeError:
value = f"{lhs_value}{rhs_value}"
elif self.operator.value == OperatorEnum.ARITHMETIC_SUBTRACTION.value:
value = lhs_value - rhs_value
elif self.operator.value == OperatorEnum.ARITHMETIC_MULTIPLICATION.value:
value = lhs_value * rhs_value
elif self.operator.value == OperatorEnum.ARITHMETIC_DIVISION.value:
value = lhs_value / rhs_value
try:
return self._real_number_factory(value).eval(frame)
except ValueError:
return self._string_factory(value).eval(frame)
|
py | 7dfb93c98de7bfa677e78f18d60efe10e97f1efe | from pywebapp.route_webapp import RouteWebApp
from pywebapp import http_helper
AUTHENTICATION_ATTR = 'requires_authentication'
class SecureWebApp(RouteWebApp):
def is_authenticated(self, request):
# TODO: override this for stronger authentication / guarantees
return 'REMOTE_USER' in request.env
def unauthenticated_request_handler(self, request):
status = 'UNAUTHORIZED'
request.response.send(status_code=http_helper.status[status])
yield 'HTTP {code} {text}'.format(code=http_helper.status[status], text=status)
def handle_request(self, request):
route, args = self.route_lookup(request)
method = self.route_target(route)
if (hasattr(method, AUTHENTICATION_ATTR) and not getattr(method, AUTHENTICATION_ATTR))\
or self.is_authenticated(request):
for result in self.route_call(request, route, args):
yield result
else:
for result in self.unauthenticated_request_handler(request):
yield result
|
py | 7dfb9410a18384ac259fd479fda64735e6f756b4 | import itertools
pl = []
def compsite():
p = 2
global pl
pl = [p]
for i in itertools.count(p + 1):
isPrime = True
for p0 in pl:
if i % p0 == 0:
isPrime = False
break
if p0 * p0 > i:
break
if isPrime:
pl.append(i)
else:
yield i
squarelst = set({1})
msquare = 1
def incSquareLst(squarelst, ms, n):
if ms ** 2 > n:
return squarelst, ms
for i in itertools.count(ms):
v = i ** 2
squarelst.add(v)
if v > n:
break
return squarelst, i
def isSquare(n):
global msquare, squarelst
squarelst, msquare = incSquareLst(squarelst, msquare, n)
return n in squarelst
def isConj(n):
for p in pl:
if n < p:
return False
d = n - p
if d % 2 == 0 and isSquare(d / 2):
return True
comp = compsite()
while 1:
n = comp.next()
if n % 2 == 0:
continue
if not isConj(n):
print n
break
|
py | 7dfb95c0027dfec09c4309e972bd51539e33ee3d | #!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from marshmallow import fields, validate
import polyaxon_sdk
from polyaxon.schemas.base import BaseCamelSchema, BaseConfig
class ResourceType(polyaxon_sdk.V1ResourceType):
INT = polyaxon_sdk.V1ResourceType.INT
FLOAT = polyaxon_sdk.V1ResourceType.FLOAT
INT_VALUES = {INT, INT.upper(), INT.capitalize()}
FLOAT_VALUES = {FLOAT, FLOAT.upper(), FLOAT.capitalize()}
VALUES = INT_VALUES | FLOAT_VALUES
@classmethod
def is_int(cls, value):
return value in cls.INT_VALUES
@classmethod
def is_float(cls, value):
return value in cls.FLOAT_VALUES
class V1Optimization(polyaxon_sdk.V1Optimization):
MAXIMIZE = polyaxon_sdk.V1Optimization.MAXIMIZE
MINIMIZE = polyaxon_sdk.V1Optimization.MINIMIZE
MAXIMIZE_VALUES = [MAXIMIZE, MAXIMIZE.upper(), MAXIMIZE.capitalize()]
MINIMIZE_VALUES = [MINIMIZE, MINIMIZE.upper(), MINIMIZE.capitalize()]
VALUES = MAXIMIZE_VALUES + MINIMIZE_VALUES
@classmethod
def maximize(cls, value):
return value in cls.MAXIMIZE_VALUES
@classmethod
def minimize(cls, value):
return value in cls.MINIMIZE_VALUES
class OptimizationMetricSchema(BaseCamelSchema):
name = fields.Str()
optimization = fields.Str(
allow_none=True, validate=validate.OneOf(V1Optimization.VALUES)
)
@staticmethod
def schema_config():
return V1OptimizationMetric
class V1OptimizationMetric(BaseConfig, polyaxon_sdk.V1OptimizationMetric):
SCHEMA = OptimizationMetricSchema
IDENTIFIER = "optimization_metric"
def get_for_sort(self):
if self.optimization == V1Optimization.MINIMIZE:
return self.name
return "-{}".format(self.name)
class OptimizationResourceSchema(BaseCamelSchema):
name = fields.Str()
type = fields.Str(allow_none=True, validate=validate.OneOf(ResourceType.VALUES))
@staticmethod
def schema_config():
return V1OptimizationResource
class V1OptimizationResource(BaseConfig, polyaxon_sdk.V1OptimizationResource):
SCHEMA = OptimizationResourceSchema
IDENTIFIER = "optimization_resource"
def cast_value(self, value):
if ResourceType.is_int(self.type):
return int(value)
if ResourceType.is_float(self.type):
return float(value)
return value
|
py | 7dfb9696a5f6e6e5c0d631d000d39e620ae4cd6e |
from __future__ import division
import time
import Adafruit_PCA9685
pwm = Adafruit_PCA9685.PCA9685(address=0x40)
pwm = Adafruit_PCA9685.PCA9685(address=0x41)
|
py | 7dfb98973ca4bdd54d703b86143ea811bce7f7b1 | import logging
import os
import time
import uuid
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.authorization import AuthorizationManagementClient
from azure.mgmt.compute.models import ResourceIdentityType
# If you wish to use User Assigned
from azure.mgmt.msi import ManagedServiceIdentityClient
# If you wish to debug
# logging.basicConfig(level=logging.DEBUG)
# Resource
LOCATION = 'westcentralus'
GROUP_NAME = 'azure-msi-sample-group2'
# Network
VNET_NAME = 'azure-sample-vnet'
SUBNET_NAME = 'azure-sample-subnet'
PUBLIC_IP_NAME = 'azure-sample-pip'
NIC_NAME = 'azure-sample-nic'
IP_CONFIG_NAME = 'azure-sample-ip-config'
# VM
VM_NAME = 'azuretestmsi'
ADMIN_LOGIN = 'Foo12'
ADMIN_PASSWORD = 'BaR@123' + GROUP_NAME
# Switch this to false if you don't want to create a User Assigned Identity
USER_ASSIGNED_IDENTITY = True
# Switch this to false if you don't want to create a System Assigned Identity
SYSTEM_ASSIGNED_IDENTITY = True
# Create a Linux VM with MSI enabled. The MSI token will have Contributor role within
# the Resource Group of the VM.
#
# Important: to execute this sample, your Service Principal credential needs the
# "Owner" role, or at least the "Microsoft.Authorization/*/write" permission.
#
# This script expects that the following environment vars are set:
#
# AZURE_TENANT_ID: with your Azure Active Directory tenant id or domain
# AZURE_CLIENT_ID: with your Azure Active Directory Application Client ID
# AZURE_CLIENT_SECRET: with your Azure Active Directory Application Secret
# AZURE_SUBSCRIPTION_ID: with your Azure Subscription Id
#
def run_example():
"""Resource Group management example."""
#
# Create the Resource Manager Client with an Application (service principal) token provider
#
subscription_id = os.environ.get(
'AZURE_SUBSCRIPTION_ID',
'11111111-1111-1111-1111-111111111111') # your Azure Subscription Id
credentials = ServicePrincipalCredentials(
client_id=os.environ['AZURE_CLIENT_ID'],
secret=os.environ['AZURE_CLIENT_SECRET'],
tenant=os.environ['AZURE_TENANT_ID']
)
resource_client = ResourceManagementClient(credentials, subscription_id)
compute_client = ComputeManagementClient(credentials, subscription_id)
network_client = NetworkManagementClient(credentials, subscription_id)
authorization_client = AuthorizationManagementClient(
credentials, subscription_id)
# Create Resource group
print('\nCreate Resource Group')
resource_group = resource_client.resource_groups.create_or_update(
GROUP_NAME,
{'location': LOCATION}
)
print_item(resource_group)
if USER_ASSIGNED_IDENTITY:
# Create a User Assigned Identity if needed
print("\nCreate User Assigned Identity")
msi_client = ManagedServiceIdentityClient(credentials, subscription_id)
user_assigned_identity = msi_client.user_assigned_identities.create_or_update(
GROUP_NAME,
"myMsiIdentity", # Any name, just a human readable ID
LOCATION
)
print_item(user_assigned_identity)
print("\nCreate Network")
# Create Network components of the VM
# This is not MSI related and is just required to create the VM
subnet = create_virtual_network(network_client)
public_ip = create_public_ip(network_client)
nic = create_network_interface(network_client, subnet, public_ip)
print_item(nic)
params_identity = {}
if USER_ASSIGNED_IDENTITY and SYSTEM_ASSIGNED_IDENTITY:
params_identity['type'] = ResourceIdentityType.system_assigned_user_assigned
params_identity['user_assigned_identities'] = {
user_assigned_identity.id: {}
}
elif USER_ASSIGNED_IDENTITY: # User Assigned only
params_identity['type'] = ResourceIdentityType.user_assigned
params_identity['user_assigned_identities'] = {
user_assigned_identity.id: {}
}
elif SYSTEM_ASSIGNED_IDENTITY: # System assigned only
params_identity['type'] = ResourceIdentityType.system_assigned
# Create a VM MSI enabled
params_create = {
'location': LOCATION,
'os_profile': get_os_profile(),
'hardware_profile': get_hardware_profile(),
'network_profile': get_network_profile(nic.id),
'storage_profile': get_storage_profile(),
# Activate MSI on that VM
'identity': params_identity
}
print("\nCreate VM")
vm_poller = compute_client.virtual_machines.create_or_update(
GROUP_NAME,
VM_NAME,
params_create,
)
vm_result = vm_poller.result()
print_item(vm_result)
# Get the PublicIP after VM creation, since assignment is dynamic
public_ip = network_client.public_ip_addresses.get(
GROUP_NAME,
PUBLIC_IP_NAME
)
# By default, the MSI accounts have no permissions
# Next part is assignment of permissions to the account
# Example is Resource Group access as Contributor, but
# you can any permissions you need.
msi_accounts_to_assign = []
if SYSTEM_ASSIGNED_IDENTITY:
msi_accounts_to_assign.append(vm_result.identity.principal_id)
if USER_ASSIGNED_IDENTITY:
msi_accounts_to_assign.append(user_assigned_identity.principal_id)
print("\nAssign permissions to MSI identities")
# Get "Contributor" built-in role as a RoleDefinition object
role_name = 'Contributor'
roles = list(authorization_client.role_definitions.list(
resource_group.id,
filter="roleName eq '{}'".format(role_name)
))
assert len(roles) == 1
contributor_role = roles[0]
# Add RG scope to the MSI identities:
for msi_identity in msi_accounts_to_assign:
role_assignment = authorization_client.role_assignments.create(
resource_group.id,
uuid.uuid4(), # Role assignment random name
{
'role_definition_id': contributor_role.id,
'principal_id': msi_identity
}
)
print_item(role_assignment)
print("You can connect to the VM using:")
print("ssh {}@{}".format(
ADMIN_LOGIN,
public_ip.ip_address,
))
print("And password: {}\n".format(ADMIN_PASSWORD))
input("Press enter to delete this Resource Group.")
# Delete Resource group and everything in it
print('Delete Resource Group')
delete_async_operation = resource_client.resource_groups.delete(GROUP_NAME)
delete_async_operation.wait()
print("\nDeleted: {}".format(GROUP_NAME))
def print_item(group):
"""Print a ResourceGroup instance."""
print("\tName: {}".format(group.name))
print("\tId: {}".format(group.id))
if hasattr(group, 'location'):
print("\tLocation: {}".format(group.location))
print_properties(getattr(group, 'properties', None))
def print_properties(props):
"""Print a ResourceGroup propertyies instance."""
if props and hasattr(props, 'provisioning_state'):
print("\tProperties:")
print("\t\tProvisioning State: {}".format(props.provisioning_state))
print("\n\n")
###### Network creation, not specific to MSI scenario ######
def create_virtual_network(network_client):
params_create = {
'location': LOCATION,
'address_space': {
'address_prefixes': ['10.0.0.0/16'],
},
'subnets': [{
'name': SUBNET_NAME,
'address_prefix': '10.0.0.0/24',
}],
}
vnet_poller = network_client.virtual_networks.create_or_update(
GROUP_NAME,
VNET_NAME,
params_create,
)
vnet_poller.wait()
return network_client.subnets.get(
GROUP_NAME,
VNET_NAME,
SUBNET_NAME,
)
def create_public_ip(network_client):
params_create = {
'location': LOCATION,
'public_ip_allocation_method': 'dynamic',
}
pip_poller = network_client.public_ip_addresses.create_or_update(
GROUP_NAME,
PUBLIC_IP_NAME,
params_create,
)
return pip_poller.result()
def create_network_interface(network_client, subnet, public_ip):
params_create = {
'location': LOCATION,
'ip_configurations': [{
'name': IP_CONFIG_NAME,
'private_ip_allocation_method': "Dynamic",
'subnet': subnet,
'public_ip_address': {
'id': public_ip.id
}
}]
}
nic_poller = network_client.network_interfaces.create_or_update(
GROUP_NAME,
NIC_NAME,
params_create,
)
return nic_poller.result()
###### VM creation, not specific to MSI scenario ######
def get_os_profile():
return {
'admin_username': ADMIN_LOGIN,
'admin_password': ADMIN_PASSWORD,
'computer_name': 'testmsi',
}
def get_hardware_profile():
return {
'vm_size': 'standard_a0'
}
def get_network_profile(network_interface_id):
return {
'network_interfaces': [{
'id': network_interface_id,
}],
}
def get_storage_profile():
return {
'image_reference': {
'publisher': 'Canonical',
'offer': 'UbuntuServer',
'sku': '16.04.0-LTS',
'version': 'latest'
}
}
if __name__ == "__main__":
run_example()
|
py | 7dfb99fae642274c00138799db547b5d4df0c53a | """
infant, child, male, female, preg, lactation for all plus macroUpper
so 6 * 5 classes + 3 for macroDistRange of 1-3, 4-18 and adults
total 33 classes = 33 collections
"""
from flask import current_app as app
from cnf.models.model_macronutrients_distrange import NutrientsDocument
# Just a shorthand
db = app.db # MongoEngine(cnf) in main.py
class UpperElementsRDI(NutrientsDocument):
meta = {
'collection': 'upperMineralsRDI'
}
# mg/d = m, ug/d = u, g/d = g
arsenic = db.StringField(default='')
boron = db.StringField(default='')
calcium = db.StringField(default='') # m
chromium = db.StringField(default='') # u
copper = db.StringField(default="") # u
fluoride = db.StringField(default='') # m
iodine = db.StringField(default='') # u
iron = db.StringField(default='') # m
magnesium = db.StringField(default='') # m
manganese = db.StringField(default='') # u
molybdenum = db.StringField(default="") # u
nickel = db.StringField(default='') #mg
phosphorus = db.StringField(default='') # m
potassium = db.StringField(default="")
selenium = db.StringField(default='') # u
silicon = db.StringField(default='')
sulfate = db.StringField(default='')
vanadium = db.StringField(default='')
zinc = db.StringField(default='') # m
sodium = db.StringField(default='') # m
chloride = db.StringField(default='') # g
class UpperVitaminsRDI(NutrientsDocument):
meta = {
'collection': 'upperVitaminsRDI'
}
# mg/d = m, ug/d = u, g/d = g
vitaminA = db.StringField(default='') # u
vitaminC = db.StringField(default='') # m
vitaminD = db.StringField(default="") # u
vitaminE = db.StringField(default='') # m
vitaminK = db.StringField(default='') # u
thiamin = db.StringField(default='') # m
riboflavin = db.StringField(default='') # m
niacin = db.StringField(default='') # m
vitaminB6 = db.StringField(default="") # m
folate = db.StringField(default='') # u
vitaminB12 = db.StringField(default='') # u
pantothenicAcid = db.StringField(default='') # m
biotin = db.StringField(default='') # m
choline = db.StringField(default='') # m
carotenoids = db.StringField(default='') #all ND |
py | 7dfb9a1815fdc449107b8ce339a9d61eb28363fd | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..model import Binarize
def test_Binarize_inputs():
input_map = dict(
abs=dict(argstr='--abs', ),
args=dict(argstr='%s', ),
bin_col_num=dict(argstr='--bincol', ),
bin_val=dict(argstr='--binval %d', ),
bin_val_not=dict(argstr='--binvalnot %d', ),
binary_file=dict(
argstr='--o %s',
extensions=None,
genfile=True,
),
count_file=dict(argstr='--count %s', ),
dilate=dict(argstr='--dilate %d', ),
environ=dict(
nohash=True,
usedefault=True,
),
erode=dict(argstr='--erode %d', ),
erode2d=dict(argstr='--erode2d %d', ),
frame_no=dict(argstr='--frame %s', ),
in_file=dict(
argstr='--i %s',
copyfile=False,
extensions=None,
mandatory=True,
),
invert=dict(argstr='--inv', ),
mask_file=dict(
argstr='--mask maskvol',
extensions=None,
),
mask_thresh=dict(argstr='--mask-thresh %f', ),
match=dict(argstr='--match %d...', ),
max=dict(
argstr='--max %f',
xor=['wm_ven_csf'],
),
merge_file=dict(
argstr='--merge %s',
extensions=None,
),
min=dict(
argstr='--min %f',
xor=['wm_ven_csf'],
),
out_type=dict(argstr='', ),
rmax=dict(argstr='--rmax %f', ),
rmin=dict(argstr='--rmin %f', ),
subjects_dir=dict(),
ventricles=dict(argstr='--ventricles', ),
wm=dict(argstr='--wm', ),
wm_ven_csf=dict(
argstr='--wm+vcsf',
xor=['min', 'max'],
),
zero_edges=dict(argstr='--zero-edges', ),
zero_slice_edge=dict(argstr='--zero-slice-edges', ),
)
inputs = Binarize.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_Binarize_outputs():
output_map = dict(
binary_file=dict(extensions=None, ),
count_file=dict(extensions=None, ),
)
outputs = Binarize.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
py | 7dfb9b497ea6dabf9399bd27c03a325fd6d3f709 | import json
from decimal import Decimal
from click.testing import CliRunner
from average_gameplay_time import analyze, average_seconds, filtered_targets
runner = CliRunner()
with open('test_data.json') as infile:
test_data = json.load(infile)
def test_command_exists():
result = runner.invoke(analyze, ['test_data.json'])
assert result.exit_code == 0
def test_command_returns_number():
result = runner.invoke(analyze, ['test_data.json'])
Decimal(result.output)
def test_command_returns_two_digits():
result = runner.invoke(analyze, ['test_data.json'])
(before, after) = result.output.split('.')
assert len(after.strip()) == 2
def test_filtered_targets():
'Verify that the correct number of target are filtered'
targets = list(filtered_targets(test_data))
assert len(targets) == 2
def test_average_seconds():
'''Verify that the correct targets pass filter
Undesired targets have been given elapsed times of >= 1 second'''
result = average_seconds(test_data)
assert result == 0.25
|
py | 7dfb9b49b806640e468f11c2b7f72aaa1fe0998e | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('../ffta'))
sys.path.insert(0, os.path.abspath('../ffta/acquisition'))
sys.path.insert(0, os.path.abspath('../ffta/analysis'))
sys.path.insert(0, os.path.abspath('../ffta/gkpfm'))
sys.path.insert(0, os.path.abspath('../ffta/hdf_utils'))
sys.path.insert(0, os.path.abspath('../ffta/load'))
sys.path.insert(0, os.path.abspath('../ffta/pixel_utils'))
sys.path.insert(0, os.path.abspath('../ffta/simulation'))
autodoc_mock_imports = ['scipy', 'numpy', 'watchdog', 'igor', 'pandas', 'pywt',
'matplotlib', 'pyUSID', 'numexpr', 'pycroscopy', 'pywavelets',
'h5py', 'sklearn']
# -- Project information -----------------------------------------------------
project = 'FFTA'
copyright = '2020, Raj Giridharagopal'
author = 'Raj Giridharagopal'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] |
py | 7dfb9b49b9d726218dce25593f5355e059f0b4a2 | # THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class ModeldbUpdateExperimentNameOrDescriptionResponse(BaseType):
def __init__(self, experiment=None):
required = {
"experiment": False,
}
self.experiment = experiment
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
from .ModeldbExperiment import ModeldbExperiment
tmp = d.get('experiment', None)
if tmp is not None:
d['experiment'] = ModeldbExperiment.from_json(tmp)
return ModeldbUpdateExperimentNameOrDescriptionResponse(**d)
|
py | 7dfb9baef6949884cb074165aa57718ad3440fe5 | from typing import Optional, List
import torch
from torch.nn.utils.rnn import pad_sequence
from src.models.base import NeuralModel
from src.models.convert import get_stack_values, get_stack_features
from src.models.embeddings.embeddings import StackNeuralEmbedding, AnnotationNeuralEmbedding
from src.models.ranking.similarities.base import SimModule
from src.preprocess.token import Token
from src.utils import device
class NeuralFeaturesRanker(NeuralModel):
def __init__(self, stack_emb_module: StackNeuralEmbedding, fixer_emb_module: StackNeuralEmbedding,
annotation_emb_module: AnnotationNeuralEmbedding, sim_module: SimModule):
super(NeuralFeaturesRanker, self).__init__()
self._stack_emb_module = stack_emb_module
self._fixer_emb_module = fixer_emb_module
self._annotation_emb_module = annotation_emb_module
self._sim_module = sim_module
def raw_stack_emb(self, stack: List[Token[int]]) -> torch.Tensor:
stack_len = torch.tensor([len(stack)], dtype=torch.long).to(device)
stack = torch.tensor([get_stack_values(stack)], dtype=torch.long).to(device)
return self._stack_emb_module.emb(stack, stack_len)[0]
def raw_fixers_emb(self, fixers_stacks: List[List[Token[int]]]) -> torch.Tensor:
stacks_lens = torch.tensor([len(stack) for stack in fixers_stacks], dtype=torch.long).to(device)
stacks = pad_sequence([torch.tensor(get_stack_values(stack), dtype=torch.long)
for stack in fixers_stacks], batch_first=True, padding_value=0).to(device)
embs = []
for stack in fixers_stacks:
annotations_lens = torch.tensor([len(token.data.annotations) for token in stack],
dtype=torch.long).to(device)
annotations = pad_sequence([torch.tensor(token.data.annotations, dtype=torch.float)
for token in stack], batch_first=True, padding_value=0).to(device)
annotations_emb = self._annotation_emb_module.emb(annotations,
annotations_lens) # -> (num_annotations, emb_dim)
features = torch.tensor(get_stack_features(stack), dtype=torch.float).to(device)
embs.append(torch.cat((annotations_emb, features), dim=-1))
return self._fixer_emb_module.emb(stacks, stacks_lens, pad_sequence(embs, batch_first=True, padding_value=0))
def raw_sim(self, stack_emb: torch.Tensor, fixers_emb: torch.Tensor,
overall_features: Optional[List[List[float]]] = None) -> torch.Tensor:
if overall_features:
overall_features = torch.tensor(overall_features, dtype=torch.float).to(device)
return self._sim_module.sim(stack_emb, fixers_emb, overall_features)
def raw_predict(self, stack: List[Token[int]], fixers_stacks: List[List[Token[int]]],
overall_features: Optional[List[List[float]]] = None) -> torch.Tensor:
stack_emb = self.raw_stack_emb(stack)
fixers_emb = self.raw_fixers_emb(fixers_stacks)
return self.raw_sim(stack_emb, fixers_emb, overall_features)
|
py | 7dfb9c7ef50b4e24926c229e9467d2785792faf5 | """Tests for project init"""
from subprocess import check_output
from unittest import TestCase, main
import os,shutil
from project.commands import init
class TestInit(TestCase):
pass # for now
class TestMakeProject(TestCase):
# some mock data
@classmethod
def setUpClass(self):
# make temp dir
os.makedirs('../../temp')
# navigate to that dir
os.chdir('../../temp')
empty_dict = {
'owner_name' : '',
'owner_email' : '',
'entry_point' : '',
'description' : '',
'license' : '',
'readme' : '',
}
self.p = init.Project(empty_dict)
@classmethod
def tearDownClass(self):
os.chdir('..')
#remove created directory
shutil.rmtree('temp')
def test_empty_project(self):
# make project
self.p.make()
#check file existance
self.assertTrue('main.py' in os.listdir('.'))
if __name__ == '__main__':
main() |
py | 7dfb9ca58312566f0160b16cd8356f98d523b458 | # Generated by Django 3.2.9 on 2021-11-24 17:24
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('run', '0009_alter_run_start_time'),
]
operations = [
migrations.AddField(
model_name='run',
name='date',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='day run'),
preserve_default=False,
),
]
|
py | 7dfb9cd1fc1685fc133e1d77ba35310e68ce2215 | """Android Calculator App Test: Divide By Zero Test Case"""
# Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
import allure
import pytest
from tests.android_native.calculator_tests.calculator_base_testcase import AndroidCalculatorBaseTestCase
@allure.epic('Android Native App')
@allure.parent_suite('Functional Test')
@allure.suite("Calculator Test Suite")
@allure.sub_suite("Negative Tests")
@allure.feature("Invalid Scenarios")
@allure.story('Division By Zero')
class TestDivideByZeroCase(AndroidCalculatorBaseTestCase):
"""
Android Calculator App Test: Divide By Zero Test Case
Should report error for division by 0
Test the condition where some number divided by zero
"""
def test_report_error_for_division_by_zero(self):
"""
Should report error for division by 0
1500 / 0 = Error
:return:
"""
allure.dynamic.title("Report error for division by 0 test")
allure.dynamic.severity(allure.severity_level.CRITICAL)
# Perform division by zero
with allure.step("Check error for division by 0: 1500 / 0 -> ERROR"):
self.enter_digit(1500)
self.app.division.tap()
self.enter_digit(0)
self.app.equal.tap()
with allure.step("Verify error message"):
expected = 'Can\'t divide by 0'
assert self.app.screen_result.formatted_text == expected
|
py | 7dfb9dbe75acdd97ba1245e91b3a805775506497 | # Copyright 2019, A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from taskflow.patterns import graph_flow
from taskflow.patterns import linear_flow
from octavia.common import constants
from octavia.controller.worker.tasks import database_tasks
from octavia.controller.worker.tasks import lifecycle_tasks
from octavia.controller.worker.tasks import network_tasks
from a10_octavia.common import a10constants
from a10_octavia.controller.worker.tasks import a10_database_tasks
from a10_octavia.controller.worker.tasks import a10_network_tasks
from a10_octavia.controller.worker.tasks import cert_tasks
from a10_octavia.controller.worker.tasks import nat_pool_tasks
from a10_octavia.controller.worker.tasks import virtual_port_tasks
from a10_octavia.controller.worker.tasks import vthunder_tasks
class ListenerFlows(object):
def get_create_listener_flow(self, topology):
"""Flow to create a listener"""
create_listener_flow = linear_flow.Flow(constants.CREATE_LISTENER_FLOW)
create_listener_flow.add(lifecycle_tasks.ListenerToErrorOnRevertTask(
requires=[constants.LISTENER]))
create_listener_flow.add(vthunder_tasks.VthunderInstanceBusy(
requires=a10constants.COMPUTE_BUSY))
create_listener_flow.add(a10_database_tasks.GetVThunderByLoadBalancer(
requires=constants.LOADBALANCER,
provides=a10constants.VTHUNDER))
if topology == constants.TOPOLOGY_ACTIVE_STANDBY:
create_listener_flow.add(vthunder_tasks.GetMasterVThunder(
name=a10constants.GET_MASTER_VTHUNDER,
requires=a10constants.VTHUNDER,
provides=a10constants.VTHUNDER))
create_listener_flow.add(self.handle_ssl_cert_flow(flow_type='create'))
create_listener_flow.add(a10_database_tasks.GetFlavorData(
rebind={a10constants.LB_RESOURCE: constants.LISTENER},
provides=constants.FLAVOR_DATA))
create_listener_flow.add(nat_pool_tasks.NatPoolCreate(
requires=(constants.LOADBALANCER,
a10constants.VTHUNDER, constants.FLAVOR_DATA)))
create_listener_flow.add(virtual_port_tasks.ListenerCreate(
requires=[constants.LOADBALANCER, constants.LISTENER,
a10constants.VTHUNDER, constants.FLAVOR_DATA]))
create_listener_flow.add(a10_network_tasks.UpdateVIP(
requires=constants.LOADBALANCER))
create_listener_flow.add(a10_database_tasks.
MarkLBAndListenerActiveInDB(
requires=[constants.LOADBALANCER,
constants.LISTENER]))
create_listener_flow.add(vthunder_tasks.WriteMemory(
requires=a10constants.VTHUNDER))
create_listener_flow.add(a10_database_tasks.SetThunderUpdatedAt(
requires=a10constants.VTHUNDER))
return create_listener_flow
def handle_ssl_cert_flow(self, flow_type='create', listener_name=constants.LISTENER):
if flow_type == 'create':
configure_ssl = self.get_ssl_certificate_create_flow()
elif flow_type == 'update':
configure_ssl = self.get_ssl_certificate_update_flow()
else:
configure_ssl = self.get_ssl_certificate_delete_flow(listener_name)
configure_ssl_flow = graph_flow.Flow(
a10constants.LISTENER_TYPE_DECIDER_FLOW)
check_ssl = cert_tasks.CheckListenerType(
name='check_listener_type_' + listener_name,
requires=constants.LISTENER,
rebind={constants.LISTENER: listener_name})
configure_ssl_flow.add(check_ssl, configure_ssl)
configure_ssl_flow.link(check_ssl, configure_ssl,
decider=self._check_ssl_data, decider_depth='flow')
return configure_ssl_flow
def _check_ssl_data(self, history):
return list(history.values())[0]
def get_delete_listener_flow(self, topology):
"""Flow to delete a listener"""
delete_listener_flow = linear_flow.Flow(constants.DELETE_LISTENER_FLOW)
delete_listener_flow.add(lifecycle_tasks.ListenerToErrorOnRevertTask(
requires=constants.LISTENER))
delete_listener_flow.add(vthunder_tasks.VthunderInstanceBusy(
requires=a10constants.COMPUTE_BUSY))
delete_listener_flow.add(a10_database_tasks.GetVThunderByLoadBalancer(
requires=constants.LOADBALANCER,
provides=a10constants.VTHUNDER))
if topology == constants.TOPOLOGY_ACTIVE_STANDBY:
delete_listener_flow.add(vthunder_tasks.GetMasterVThunder(
name=a10constants.GET_MASTER_VTHUNDER,
requires=a10constants.VTHUNDER,
provides=a10constants.VTHUNDER))
delete_listener_flow.add(self.handle_ssl_cert_flow(flow_type='delete'))
delete_listener_flow.add(virtual_port_tasks.ListenerDelete(
requires=[constants.LOADBALANCER, constants.LISTENER, a10constants.VTHUNDER]))
delete_listener_flow.add(network_tasks.UpdateVIPForDelete(
requires=constants.LOADBALANCER))
delete_listener_flow.add(database_tasks.DeleteListenerInDB(
requires=constants.LISTENER))
delete_listener_flow.add(database_tasks.DecrementListenerQuota(
requires=constants.LISTENER))
delete_listener_flow.add(database_tasks.MarkLBActiveInDB(
requires=constants.LOADBALANCER))
delete_listener_flow.add(vthunder_tasks.WriteMemory(
requires=a10constants.VTHUNDER))
delete_listener_flow.add(a10_database_tasks.SetThunderUpdatedAt(
requires=a10constants.VTHUNDER))
return delete_listener_flow
def get_cascade_delete_listener_internal_flow(self, listener_name, compute_flag):
"""Create a flow to delete a listener
(will skip deletion on the amp and marking LB active)
:returns: The flow for deleting a listener
"""
delete_listener_flow = linear_flow.Flow(constants.DELETE_LISTENER_FLOW)
delete_listener_flow.add(self.handle_ssl_cert_flow(
flow_type='delete', listener_name=listener_name))
if compute_flag:
delete_listener_flow.add(network_tasks.UpdateVIPForDelete(
name='delete_update_vip_' + listener_name,
requires=constants.LOADBALANCER))
delete_listener_flow.add(database_tasks.DeleteListenerInDB(
name='delete_listener_in_db_' + listener_name,
requires=constants.LISTENER,
rebind={constants.LISTENER: listener_name}))
delete_listener_flow.add(database_tasks.DecrementListenerQuota(
name='decrement_listener_quota_' + listener_name,
requires=constants.LISTENER,
rebind={constants.LISTENER: listener_name}))
return delete_listener_flow
def get_delete_rack_listener_flow(self):
"""Flow to delete a rack listener """
delete_listener_flow = linear_flow.Flow(constants.DELETE_LISTENER_FLOW)
delete_listener_flow.add(lifecycle_tasks.ListenerToErrorOnRevertTask(
requires=constants.LISTENER))
delete_listener_flow.add(a10_database_tasks.GetVThunderByLoadBalancer(
requires=constants.LOADBALANCER,
provides=a10constants.VTHUNDER))
delete_listener_flow.add(self.handle_ssl_cert_flow(flow_type='delete'))
delete_listener_flow.add(virtual_port_tasks.ListenerDelete(
requires=[constants.LOADBALANCER, constants.LISTENER, a10constants.VTHUNDER]))
delete_listener_flow.add(database_tasks.DeleteListenerInDB(
requires=constants.LISTENER))
delete_listener_flow.add(database_tasks.DecrementListenerQuota(
requires=constants.LISTENER))
delete_listener_flow.add(database_tasks.MarkLBActiveInDB(
requires=constants.LOADBALANCER))
delete_listener_flow.add(vthunder_tasks.WriteMemory(
requires=a10constants.VTHUNDER))
delete_listener_flow.add(a10_database_tasks.SetThunderUpdatedAt(
requires=a10constants.VTHUNDER))
return delete_listener_flow
def get_update_listener_flow(self, topology):
"""Flow to update a listener"""
update_listener_flow = linear_flow.Flow(constants.UPDATE_LISTENER_FLOW)
update_listener_flow.add(lifecycle_tasks.ListenerToErrorOnRevertTask(
requires=[constants.LISTENER]))
update_listener_flow.add(vthunder_tasks.VthunderInstanceBusy(
requires=a10constants.COMPUTE_BUSY))
update_listener_flow.add(a10_database_tasks.GetVThunderByLoadBalancer(
requires=constants.LOADBALANCER,
provides=a10constants.VTHUNDER))
if topology == constants.TOPOLOGY_ACTIVE_STANDBY:
update_listener_flow.add(vthunder_tasks.GetMasterVThunder(
name=a10constants.GET_MASTER_VTHUNDER,
requires=a10constants.VTHUNDER,
provides=a10constants.VTHUNDER))
update_listener_flow.add(self.handle_ssl_cert_flow(flow_type='update'))
update_listener_flow.add(a10_database_tasks.GetFlavorData(
rebind={a10constants.LB_RESOURCE: constants.LISTENER},
provides=constants.FLAVOR_DATA))
update_listener_flow.add(virtual_port_tasks.ListenerUpdate(
requires=[constants.LOADBALANCER, constants.LISTENER,
a10constants.VTHUNDER, constants.FLAVOR_DATA, constants.UPDATE_DICT]))
update_listener_flow.add(database_tasks.UpdateListenerInDB(
requires=[constants.LISTENER, constants.UPDATE_DICT]))
update_listener_flow.add(a10_database_tasks.
MarkLBAndListenerActiveInDB(
requires=[constants.LOADBALANCER,
constants.LISTENER]))
update_listener_flow.add(vthunder_tasks.WriteMemory(
requires=a10constants.VTHUNDER))
update_listener_flow.add(a10_database_tasks.SetThunderUpdatedAt(
requires=a10constants.VTHUNDER))
return update_listener_flow
def get_rack_vthunder_create_listener_flow(self, project_id):
"""Create a flow to create a rack listener"""
create_listener_flow = linear_flow.Flow(constants.CREATE_LISTENER_FLOW)
create_listener_flow.add(lifecycle_tasks.ListenerToErrorOnRevertTask(
requires=[constants.LISTENER]))
create_listener_flow.add(a10_database_tasks.GetVThunderByLoadBalancer(
requires=constants.LOADBALANCER,
provides=a10constants.VTHUNDER))
create_listener_flow.add(self.handle_ssl_cert_flow(flow_type='create'))
create_listener_flow.add(a10_database_tasks.GetFlavorData(
rebind={a10constants.LB_RESOURCE: constants.LISTENER},
provides=constants.FLAVOR_DATA))
create_listener_flow.add(nat_pool_tasks.NatPoolCreate(
requires=(constants.LOADBALANCER,
a10constants.VTHUNDER, constants.FLAVOR_DATA)))
create_listener_flow.add(virtual_port_tasks.ListenerCreate(
requires=[constants.LOADBALANCER, constants.LISTENER,
a10constants.VTHUNDER, constants.FLAVOR_DATA]))
create_listener_flow.add(a10_database_tasks.
MarkLBAndListenerActiveInDB(
requires=[constants.LOADBALANCER,
constants.LISTENER]))
create_listener_flow.add(vthunder_tasks.WriteMemory(
requires=a10constants.VTHUNDER))
create_listener_flow.add(a10_database_tasks.SetThunderUpdatedAt(
requires=a10constants.VTHUNDER))
return create_listener_flow
def get_ssl_certificate_create_flow(self):
create_ssl_cert_flow = linear_flow.Flow(
a10constants.CREATE_SSL_CERT_FLOW)
create_ssl_cert_flow.add(cert_tasks.GetSSLCertData(
requires=[constants.LOADBALANCER, constants.LISTENER],
provides=a10constants.CERT_DATA))
create_ssl_cert_flow.add(cert_tasks.SSLCertCreate(
requires=[a10constants.CERT_DATA, a10constants.VTHUNDER]))
create_ssl_cert_flow.add(cert_tasks.SSLKeyCreate(
requires=[a10constants.CERT_DATA, a10constants.VTHUNDER]))
create_ssl_cert_flow.add(cert_tasks.ClientSSLTemplateCreate(
requires=[a10constants.CERT_DATA, a10constants.VTHUNDER]))
return create_ssl_cert_flow
def get_ssl_certificate_delete_flow(self, listener):
delete_ssl_cert_flow = linear_flow.Flow(
a10constants.DELETE_SSL_CERT_FLOW)
delete_ssl_cert_flow.add(cert_tasks.GetSSLCertData(
name='get_ssl_cert_data_' + listener,
requires=[constants.LOADBALANCER, constants.LISTENER],
rebind={constants.LISTENER: listener},
provides=a10constants.CERT_DATA))
delete_ssl_cert_flow.add(cert_tasks.ClientSSLTemplateDelete(
name='client_ssl_template_delete_' + listener,
requires=[a10constants.CERT_DATA, a10constants.VTHUNDER]))
delete_ssl_cert_flow.add(cert_tasks.SSLCertDelete(
name='ssl_cert_delete_' + listener,
requires=[a10constants.CERT_DATA, a10constants.VTHUNDER]))
delete_ssl_cert_flow.add(cert_tasks.SSLKeyDelete(
name='ssl_key_delete_' + listener,
requires=[a10constants.CERT_DATA, a10constants.VTHUNDER]))
return delete_ssl_cert_flow
def get_ssl_certificate_update_flow(self):
update_ssl_cert_flow = linear_flow.Flow(
a10constants.DELETE_SSL_CERT_FLOW)
update_ssl_cert_flow.add(cert_tasks.GetSSLCertData(
requires=[constants.LOADBALANCER, constants.LISTENER],
provides=a10constants.CERT_DATA))
update_ssl_cert_flow.add(cert_tasks.SSLCertUpdate(
requires=[a10constants.CERT_DATA, a10constants.VTHUNDER]))
update_ssl_cert_flow.add(cert_tasks.SSLKeyUpdate(
requires=[a10constants.CERT_DATA, a10constants.VTHUNDER]))
update_ssl_cert_flow.add(cert_tasks.ClientSSLTemplateUpdate(
requires=[a10constants.CERT_DATA, a10constants.VTHUNDER]))
return update_ssl_cert_flow
|
py | 7dfb9dc23508eafe8c8e1af955ff5262dff827fe | from tensorflow.keras import callbacks
from tensorflow.keras import layers, regularizers
from tensorflow.keras import optimizers, metrics, losses
from tensorflow.keras.models import Model
from tensorflow.keras.models import load_model
from tensorflow.keras import backend as K
from tensorflow import keras
import tensorflow as tf
l2 = regularizers.l2
w_decay=1e-3 #0.0#2e-4#1e-3, 2e-4 # please define weight decay
K.clear_session()
# weight_init = tf.initializers.RandomNormal(mean=0.,stddev=0.01)
# weight_init = tf.initializers.glorot_normal()
weight_init = tf.initializers.glorot_uniform()
class _DenseLayer(layers.Layer):
"""_DenseBlock model.
Arguments:
out_features: number of output features
"""
def __init__(self, out_features,**kwargs):
super(_DenseLayer, self).__init__(**kwargs)
k_reg = None if w_decay is None else l2(w_decay)
self.layers = []
self.layers.append(tf.keras.Sequential(
[
layers.ReLU(),
layers.Conv2D(
filters=out_features, kernel_size=(3,3), strides=(1,1), padding='same',
use_bias=True, kernel_initializer=weight_init,
kernel_regularizer=k_reg),
layers.BatchNormalization(),
layers.ReLU(),
layers.Conv2D(
filters=out_features, kernel_size=(3,3), strides=(1,1), padding='same',
use_bias=True, kernel_initializer=weight_init,
kernel_regularizer=k_reg),
layers.BatchNormalization(),
])) # first relu can be not needed
def call(self, inputs):
x1, x2 = tuple(inputs)
new_features = x1
for layer in self.layers:
new_features = layer(new_features)
return 0.5 * (new_features + x2), x2
class _DenseBlock(layers.Layer):
"""DenseBlock layer.
Arguments:
num_layers: number of _DenseLayer's per block
out_features: number of output features
"""
def __init__(self,
num_layers,
out_features,**kwargs):
super(_DenseBlock, self).__init__(**kwargs)
self.layers = [_DenseLayer(out_features) for i in range(num_layers)]
def call(self, inputs):
for layer in self.layers:
inputs = layer(inputs)
return inputs
class UpConvBlock(layers.Layer):
"""UpConvDeconvBlock layer.
Arguments:
up_scale: int
"""
def __init__(self, up_scale,**kwargs):
super(UpConvBlock, self).__init__(**kwargs)
constant_features = 16
k_reg = None if w_decay is None else l2(w_decay)
features = []
total_up_scale = 2 ** up_scale
for i in range(up_scale):
out_features = 1 if i == up_scale-1 else constant_features
if i==up_scale-1:
features.append(layers.Conv2D(
filters=out_features, kernel_size=(1,1), strides=(1,1), padding='same',
activation='relu', kernel_initializer=tf.initializers.TruncatedNormal(stddev=0.1),
kernel_regularizer=k_reg,use_bias=True)) #tf.initializers.TruncatedNormal(mean=0.)
features.append(layers.Conv2DTranspose(
out_features, kernel_size=(total_up_scale,total_up_scale),
strides=(2,2), padding='same',
kernel_initializer=tf.initializers.TruncatedNormal(stddev=0.1),
kernel_regularizer=k_reg,use_bias=True)) # stddev=0.1
else:
features.append(layers.Conv2D(
filters=out_features, kernel_size=(1,1), strides=(1,1), padding='same',
activation='relu',kernel_initializer=weight_init,
kernel_regularizer=k_reg,use_bias=True))
features.append(layers.Conv2DTranspose(
out_features, kernel_size=(total_up_scale,total_up_scale),
strides=(2,2), padding='same', use_bias=True,
kernel_initializer=weight_init, kernel_regularizer=k_reg))
self.features = keras.Sequential(features)
def call(self, inputs):
return self.features(inputs)
class SingleConvBlock(layers.Layer):
"""SingleConvBlock layer.
Arguments:
out_features: number of output features
stride: stride per convolution
"""
def __init__(self, out_features, k_size=(1,1),stride=(1,1),
use_bs=False, use_act=False,w_init=None,**kwargs): # bias_init=tf.constant_initializer(0.0)
super(SingleConvBlock, self).__init__(**kwargs)
self.use_bn = use_bs
self.use_act = use_act
k_reg = None if w_decay is None else l2(w_decay)
self.conv = layers.Conv2D(
filters=out_features, kernel_size=k_size, strides=stride,
padding='same',kernel_initializer=w_init,
kernel_regularizer=k_reg)#, use_bias=True, bias_initializer=bias_init
if self.use_bn:
self.bn = layers.BatchNormalization()
if self.use_act:
self.relu = layers.ReLU()
def call(self, inputs):
x =self.conv(inputs)
if self.use_bn:
x = self.bn(x)
if self.use_act:
x = self.relu(x)
return x
class DoubleConvBlock(layers.Layer):
"""DoubleConvBlock layer.
Arguments:
mid_features: number of middle features
out_features: number of output features
stride: stride per mid-layer convolution
"""
def __init__(self, mid_features, out_features=None, stride=(1,1),
use_bn=True,use_act=True,**kwargs):
super(DoubleConvBlock, self).__init__(**kwargs)
self.use_bn =use_bn
self.use_act =use_act
out_features = mid_features if out_features is None else out_features
k_reg = None if w_decay is None else l2(w_decay)
self.conv1 = layers.Conv2D(
filters=mid_features, kernel_size=(3, 3), strides=stride, padding='same',
use_bias=True, kernel_initializer=weight_init,
kernel_regularizer=k_reg)
self.bn1 = layers.BatchNormalization()
self.conv2 = layers.Conv2D(
filters=out_features, kernel_size=(3, 3), padding='same',strides=(1,1),
use_bias=True, kernel_initializer=weight_init,
kernel_regularizer=k_reg)
self.bn2 = layers.BatchNormalization()
self.relu = layers.ReLU()
def call(self, inputs):
x = self.conv1(inputs)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
if self.use_act:
x = self.relu(x)
return x
class DexiNed(tf.keras.Model):
"""DexiNet model."""
def __init__(self,rgb_mean=None,
**kwargs):
super(DexiNed, self).__init__(**kwargs)
self.rgbn_mean = rgb_mean
self.block_1 = DoubleConvBlock(32, 64, stride=(2,2),use_act=False)
self.block_2 = DoubleConvBlock(128,use_act=False)
self.dblock_3 = _DenseBlock(2, 256)
self.dblock_4 = _DenseBlock(3, 512)
self.dblock_5 = _DenseBlock(3, 512)
self.dblock_6 = _DenseBlock(3, 256)
self.maxpool = layers.MaxPool2D(pool_size=(3, 3), strides=2, padding='same')
# first skip connection
self.side_1 = SingleConvBlock(128,k_size=(1,1),stride=(2,2),use_bs=True,
w_init=weight_init)
self.side_2 = SingleConvBlock(256,k_size=(1,1),stride=(2,2),use_bs=True,
w_init=weight_init)
self.side_3 = SingleConvBlock(512,k_size=(1,1),stride=(2,2),use_bs=True,
w_init=weight_init)
self.side_4 = SingleConvBlock(512,k_size=(1,1),stride=(1,1),use_bs=True,
w_init=weight_init)
# self.side_5 = SingleConvBlock(256,k_size=(1,1),stride=(1,1),use_bs=True,
# w_init=weight_init)
self.pre_dense_2 = SingleConvBlock(256,k_size=(1,1),stride=(2,2),
w_init=weight_init) # use_bn=True
self.pre_dense_3 = SingleConvBlock(256,k_size=(1,1),stride=(1,1),use_bs=True,
w_init=weight_init)
self.pre_dense_4 = SingleConvBlock(512,k_size=(1,1),stride=(1,1),use_bs=True,
w_init=weight_init)
self.pre_dense_5_0 = SingleConvBlock(512, k_size=(1,1),stride=(2,2),
w_init=weight_init) # use_bn=True
self.pre_dense_5 = SingleConvBlock(512,k_size=(1,1),stride=(1,1),use_bs=True,
w_init=weight_init)
self.pre_dense_6 = SingleConvBlock(256,k_size=(1,1),stride=(1,1),use_bs=True,
w_init=weight_init)
self.up_block_1 = UpConvBlock(1)
self.up_block_2 = UpConvBlock(1)
self.up_block_3 = UpConvBlock(2)
self.up_block_4 = UpConvBlock(3)
self.up_block_5 = UpConvBlock(4)
self.up_block_6 = UpConvBlock(4)
self.block_cat = SingleConvBlock(
1,k_size=(1,1),stride=(1,1),
w_init=tf.constant_initializer(1/5))
def slice(self, tensor, slice_shape):
height, width = slice_shape
return tensor[..., :height, :width]
def call(self, x):
# Block 1
x = x-self.rgbn_mean[:-1]
block_1 = self.block_1(x)
block_1_side = self.side_1(block_1)
# Block 2
block_2 = self.block_2(block_1)
block_2_down = self.maxpool(block_2) # the key for the second skip connec...
block_2_add = block_2_down + block_1_side
block_2_side = self.side_2(block_2_add) #
# Block 3
block_3_pre_dense = self.pre_dense_3(block_2_down)
block_3, _ = self.dblock_3([block_2_add, block_3_pre_dense])
block_3_down = self.maxpool(block_3)
block_3_add = block_3_down + block_2_side
block_3_side = self.side_3(block_3_add)
# Block 4
block_4_pre_dense_256 = self.pre_dense_2(block_2_down)
block_4_pre_dense = self.pre_dense_4(block_4_pre_dense_256 + block_3_down)
block_4, _ = self.dblock_4([block_3_add, block_4_pre_dense])
block_4_down = self.maxpool(block_4)
block_4_add = block_4_down + block_3_side
block_4_side = self.side_4(block_4_add)
# Block 5
block_5_pre_dense_512 = self.pre_dense_5_0(block_4_pre_dense_256)
block_5_pre_dense = self.pre_dense_5(block_5_pre_dense_512 + block_4_down )
block_5, _ = self.dblock_5([block_4_add, block_5_pre_dense])
block_5_add = block_5 + block_4_side
# Block 6
block_6_pre_dense = self.pre_dense_6(block_5)
block_6, _ = self.dblock_6([block_5_add, block_6_pre_dense])
# upsampling blocks
height, width = x.shape[1:3]
slice_shape = (height, width)
out_1 = self.up_block_1(block_1) # self.slice(, slice_shape)
out_2 = self.up_block_2(block_2)
out_3 = self.up_block_3(block_3)
out_4 = self.up_block_4(block_4)
out_5 = self.up_block_5(block_5)
out_6 = self.up_block_6(block_6)
results = [out_1, out_2, out_3, out_4, out_5, out_6]
# concatenate multiscale outputs
block_cat = tf.concat(results, 3) # BxHxWX6
block_cat = self.block_cat(block_cat) # BxHxWX1
results.append(block_cat)
return results
def weighted_cross_entropy_loss(input, label):
y = tf.cast(label,dtype=tf.float32)
negatives = tf.math.reduce_sum(1.-y)
positives = tf.math.reduce_sum(y)
beta = negatives/(negatives + positives)
pos_w = beta/(1-beta)
cost = tf.nn.weighted_cross_entropy_with_logits(
labels=label, logits=input, pos_weight=pos_w, name=None)
cost = tf.reduce_sum(cost*(1-beta))
return tf.where(tf.equal(positives, 0.0), 0.0, cost)
def pre_process_binary_cross_entropy(bc_loss,input, label,arg, use_tf_loss=False):
# preprocess data
y = label
loss = 0
w_loss=1.0
preds = []
for tmp_p in input:
# tmp_p = input[i]
# loss processing
tmp_y = tf.cast(y, dtype=tf.float32)
mask = tf.dtypes.cast(tmp_y > 0., tf.float32)
b,h,w,c=mask.get_shape()
positives = tf.math.reduce_sum(mask, axis=[1, 2, 3], keepdims=True)
# positives = tf.math.reduce_sum(mask)
negatives = h*w*c-positives
# negatives = tf.math.reduce_sum(1. - tmp_y)
beta2 = positives / (negatives + positives) # negatives in hed
beta = negatives/ (positives + negatives) # positives in hed
# pos_w = beta/(1-beta)
pos_w = tf.where(tf.greater(y, 0.0), beta, beta2)
# pos_w = tf.where(tf.equal(mask, 0.0), beta, beta2)
logits = tf.sigmoid(tmp_p)
l_cost = bc_loss(y_true=tmp_y, y_pred=logits,
sample_weight=pos_w)
# cost = tf.math.reduce_mean(cost * (1 - beta))
# l_cost= tf.where(tf.equal(positives, 0.0), 0.0, cost)
preds.append(logits)
loss += (l_cost*1.0)
# mask[mask != 0] = negatives / (positives + negatives)
# mask[mask == 0] = positives / (positives + negatives)
return preds, loss |
py | 7dfb9ef3ff110db14711fb2caf289a2b305fd83f | # TODO: Implement a python module which scans the snapformats folder,
# and returns a list of available formats and formatting classes.
|
py | 7dfb9f958e80b7f20bb000a27b670ed36624032c | #!/usr/bin/python
"""
The MIT License (MIT)
Copyright (c) 2016 Steffen Karlsson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__author__ = 'Steffen Karlsson'
__version__ = '0.1-dev'
__licence__ = 'MIT'
from colorama import init, Fore
from requests import get, codes
from argparse import ArgumentParser
from json import loads
from collections import defaultdict
from urllib import quote
from bs4 import BeautifulSoup
from tabulate import tabulate
PAGE = "http://www.thesaurus.com/browse/%s?s=t"
COLORS = [Fore.RED, Fore.YELLOW, Fore.GREEN]
class NoResultException(Exception):
pass
class MisspelledQueryException(Exception):
def __init__(self, message):
super(MisspelledQueryException, self).__init__(message)
def is_okay(res):
return res.status_code == codes.ok
def search(query):
res = get(PAGE % quote(query))
html = res.text
if not is_okay(res):
bs = BeautifulSoup(html, "lxml")
misspelled = bs.find('div', class_="misspelled")
if misspelled is not None:
correction_header = misspelled.find('div', class_="heading-row")
if not correction_header:
raise NoResultException()
correction = str(correction_header.find('a').text).strip()
raise MisspelledQueryException(correction)
return html
def find_synonyms(html):
bs = BeautifulSoup(html, "lxml")
if bs.find('div', class_="ermsg") is not None or bs.find('li', id="words-gallery-no-results") is not None:
raise NoResultException()
mask = bs.find('div', class_="mask")
synonym_groups = {}
for tab in mask.find_all('a', class_="pos-tab"):
synonym = tab.find('strong', class_="ttl").text
word_type = tab.find('em', class_="txt").text
synonym_groups["%s %s" % (word_type, synonym)] = defaultdict(list)
relevancy_lists = bs.find_all('div', class_="relevancy-list")
if relevancy_lists is None or not relevancy_lists or len(relevancy_lists) != len(synonym_groups):
raise NoResultException()
for idx, relevancy in enumerate(relevancy_lists):
for common_word in relevancy.find_all('a'):
category = int(loads(common_word["data-category"])["name"].split("relevant-")[1])
synonym = common_word.find('span', class_="text").text
synonym_groups[synonym_groups.keys()[idx]][category].append(synonym)
return synonym_groups
def get_arguments():
parser = ArgumentParser(description="A command line tool for thesaurus.com")
parser.add_argument('q', help="Query to search for at thesaurus")
args = parser.parse_args()
if args.q is None:
exit(1)
return args.q
def present_synonyms(synonym_groups, query):
table = defaultdict(list)
for sg in sorted(synonym_groups.keys()):
synonyms = synonym_groups[sg]
table[sg] = [COLORS[category - 1] + word
for category in sorted(synonyms.keys(), reverse=True)
for word in sorted(synonyms[category])]
table[sg].append(Fore.WHITE + "")
print ">> Results from searching: %s, where the first row denotes the context of where the synonyms are used.\n" % query
print tabulate(table, tablefmt="rst", headers="keys")
if __name__ == '__main__':
init()
query = get_arguments()
try:
html = search(query)
synonym_groups = find_synonyms(html)
present_synonyms(synonym_groups, query)
except NoResultException:
print "No matching results for query: %s" % query
except MisspelledQueryException, e:
print "Did you mean %s instead of %s?, otherwise no matching results." % (e.message, query)
|
py | 7dfb9fb88f9de0dfb6ddb74e5d7946d0ced594a9 | # -*- coding: utf-8 -*-
# File : comm.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import queue
import collections
import threading
__all__ = ['FutureResult', 'SlavePipe', 'SyncMaster']
class FutureResult(object):
"""A thread-safe future implementation. Used only as one-to-one pipe."""
def __init__(self):
self._result = None
self._lock = threading.Lock()
self._cond = threading.Condition(self._lock)
def put(self, result):
with self._lock:
assert self._result is None, 'Previous result has\'t been fetched.'
self._result = result
self._cond.notify()
def get(self):
with self._lock:
if self._result is None:
self._cond.wait()
res = self._result
self._result = None
return res
_MasterRegistry = collections.namedtuple('MasterRegistry', ['result'])
_SlavePipeBase = collections.namedtuple(
'_SlavePipeBase', ['identifier', 'queue', 'result'])
class SlavePipe(_SlavePipeBase):
"""Pipe for master-slave communication."""
def run_slave(self, msg):
self.queue.put((self.identifier, msg))
ret = self.result.get()
self.queue.put(True)
return ret
class SyncMaster(object):
"""An abstract `SyncMaster` object.
- During the replication, as the data parallel will trigger an callback of each module, all slave devices should
call `register(id)` and obtain an `SlavePipe` to communicate with the master.
- During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected,
and passed to a registered callback.
- After receiving the messages, the master device should gather the information and determine to message passed
back to each slave devices.
"""
def __init__(self, master_callback):
"""
Args:
master_callback: a callback to be invoked after having collected messages from slave devices.
"""
self._master_callback = master_callback
self._queue = queue.Queue()
self._registry = collections.OrderedDict()
self._activated = False
def __getstate__(self):
return {'master_callback': self._master_callback}
def __setstate__(self, state):
self.__init__(state['master_callback'])
def register_slave(self, identifier):
"""
Register an slave device.
Args:
identifier: an identifier, usually is the device id.
Returns: a `SlavePipe` object which can be used to communicate with the master device.
"""
if self._activated:
assert self._queue.empty(), 'Queue is not clean before next initialization.'
self._activated = False
self._registry.clear()
future = FutureResult()
self._registry[identifier] = _MasterRegistry(future)
return SlavePipe(identifier, self._queue, future)
def run_master(self, master_msg):
"""
Main entry for the master device in each forward pass.
The messages were first collected from each devices (including the master device), and then
an callback will be invoked to compute the message to be sent back to each devices
(including the master device).
Args:
master_msg: the message that the master want to send to itself. This will be placed as the first
message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example.
Returns: the message to be sent back to the master device.
"""
self._activated = True
intermediates = [(0, master_msg)]
for i in range(self.nr_slaves):
intermediates.append(self._queue.get())
results = self._master_callback(intermediates)
assert results[0][0] == 0, 'The first result should belongs to the master.'
for i, res in results:
if i == 0:
continue
self._registry[i].result.put(res)
for i in range(self.nr_slaves):
assert self._queue.get() is True
return results[0][1]
@property
def nr_slaves(self):
return len(self._registry)
|
py | 7dfba0521a29b6c251fb7da9cbb6b4774e790841 | #!/usr/bin/env python
from flask import Flask, session, request, render_template, url_for, redirect
import sys, re, macAdder, myldap, os
import os
from config import secret_key
app = Flask(__name__)
app.secret_key = secret_key
@app.route("/", methods=["GET", "POST"])
def index():
if not session.get("logged_in"):
return redirect(url_for('login'))
if request.method == "POST":
macs = request.form['macs']
if macs == '':
return render_template("index.html")
macs = str(macs).lower().splitlines()
for mac in macs:
if not re.match("[0-9a-f]{2}([:]?)[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$", mac):
return render_template("index.html", result="MAC addresses provided are not in specified format")
try:
if request.form['action'] == 'Add':
macAdder.add_macs(macs)
return render_template("index.html", result="Successfully added all MAC addresses to both whitelists!")
elif request.form['action'] == 'Remove':
macAdder.remove_macs(macs)
return render_template("index.html", result="Successfully removed all MAC addresses from both whitelists!")
else:
return render_template("index.html", result="Please choose whether to add or remove the MAC addresses")
except:
e = sys.exc_info()
return render_template("index.html", result="Error adding MAC addresses", error=e)
return render_template("index.html")
@app.route('/login', methods=['GET', 'POST'])
def login():
if session.get("logged_in"):
return redirect(url_for('index'))
if request.method == "POST":
result = auth(request.form["username"], request.form["password"])
if result[0]:
session["logged_in"] = True
return redirect(url_for('index'))
return render_template("login.html", result=result[1])
return render_template("login.html")
@app.route("/logout")
def logout():
session['logged_in'] = False
return redirect(url_for('index'))
# implement your own authentication. return (bool: success, str: message)
def auth(username, password):
# for example, use LDAP authentication
return myldap.auth(username, password)
|
py | 7dfba089de0900f3cc21ff1b70a8ec65956881f8 | from pkonfig.base import (
Field,
TypeMapper,
BaseConfig,
BaseOuterConfig,
)
from pkonfig.storage import *
from pkonfig.fields import (
Bool,
Int,
Float,
DecimalField,
Str,
Byte,
ByteArray,
PathField,
File,
Folder,
EnumField,
LogLevel,
Choice,
)
from pkonfig.config import DefaultMapper, Config, EmbeddedConfig
|
py | 7dfba1eeea39772d676eed47856e849ca9e531ba | #!/usr/bin/env python2
import termios
import select
import socket
import os
import fcntl
import argparse
from sctp import *
class PTY:
def __init__(self, slave=0, pid=os.getpid()):
# apparently python GC's modules before class instances so, here
# we have some hax to ensure we can restore the terminal state.
self.termios, self.fcntl = termios, fcntl
# open our controlling PTY
self.pty = open(os.readlink("/proc/%d/fd/%d" % (pid, slave)), "rb+")
# store our old termios settings so we can restore after
# we are finished
self.oldtermios = termios.tcgetattr(self.pty)
# get the current settings se we can modify them
newattr = termios.tcgetattr(self.pty)
# set the terminal to uncanonical mode and turn off
# input echo.
newattr[3] &= ~termios.ICANON & ~termios.ECHO
# don't handle ^C / ^Z / ^\
newattr[6][termios.VINTR] = '\x00'
newattr[6][termios.VQUIT] = '\x00'
newattr[6][termios.VSUSP] = '\x00'
# set our new attributes
termios.tcsetattr(self.pty, termios.TCSADRAIN, newattr)
# store the old fcntl flags
self.oldflags = fcntl.fcntl(self.pty, fcntl.F_GETFL)
# fcntl.fcntl(self.pty, fcntl.F_SETFD, fcntl.FD_CLOEXEC)
# make the PTY non-blocking
fcntl.fcntl(self.pty, fcntl.F_SETFL, self.oldflags | os.O_NONBLOCK)
def read(self, size=8192):
return self.pty.read(size)
def write(self, data):
ret = self.pty.write(data)
self.pty.flush()
return ret
def fileno(self):
return self.pty.fileno()
def __del__(self):
# restore the terminal settings on deletion
self.termios.tcsetattr(self.pty, self.termios.TCSAFLUSH, self.oldtermios)
self.fcntl.fcntl(self.pty, self.fcntl.F_SETFL, self.oldflags)
class Shell:
def __init__(self, addr, bind=True):
self.bind = bind
self.addr = addr
if self.bind:
self.sock = sctpsocket_tcp(socket.AF_INET)
self.sock.bind(self.addr)
self.sock.listen(5)
def handle(self, addr=None):
addr = addr or self.addr
if self.bind:
sock, addr = self.sock.accept()
else:
sock = sctpsocket_tcp(socket.AF_INET)
sock.connect(addr)
# create our PTY
pty = PTY()
# input buffers for the fd's
buffers = [ [ sock, [] ], [ pty, [] ] ]
def buffer_index(fd):
for index, buffer in enumerate(buffers):
if buffer[0] == fd:
return index
readable_fds = [ sock, pty ]
data = " "
# keep going until something deds
while data:
# if any of the fd's need to be written to, add them to the
# writable_fds
writable_fds = []
for buffer in buffers:
if buffer[1]:
writable_fds.append(buffer[0])
r, w, x = select.select(readable_fds, writable_fds, [])
# read from the fd's and store their input in the other fd's buffer
for fd in r:
buffer = buffers[buffer_index(fd) ^ 1][1]
if hasattr(fd, "read"):
data = fd.read(8192)
else:
data = fd.recv(8192)
if data:
buffer.append(data)
# send data from each buffer onto the proper FD
for fd in w:
buffer = buffers[buffer_index(fd)][1]
data = buffer[0]
if hasattr(fd, "write"):
fd.write(data)
else:
fd.send(data)
buffer.remove(data)
# close the socket
sock.close()
if __name__ == "__main__":
# I could do this validation with regex.. but meh.
def AddressString(value):
address = value.split(":")
if len(address) != 2:
raise argparse.ArgumentTypeError("Address must be in format IP:Port.")
if len(address[0].split(".")) != 4:
raise argparse.ArgumentTypeError("Invalid IP length.")
for octet in address[0].split("."):
try:
if int(octet) > 255 or int(octet) < 0:
raise argparse.ArgumentTypeError("Invalid octet in address.")
except ValueError:
raise argparse.ArgumentTypeError("Invalid octet in address.")
try:
address[1] = int(address[1])
if address[1] < 0 or address[1] > 65535:
raise argparse.ArgumentTypeError("Invalid port number")
except ValueError:
raise argparse.ArgumentTypeError("Invalid port number.")
return tuple(address)
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-b", "--bind", help="Reverse shell handler.",
action="store_true")
group.add_argument("-c", "--connect", help="Bind shell handler.",
action="store_true")
parser.add_argument("address", type=AddressString,
help="IP address/port to bind/connect to.")
args = parser.parse_args()
s = Shell(args.address, bind=args.bind)
s.handle()
|
py | 7dfba244804cf02763cf301d676af0ed73c561fb | """
entradas
numeroa-->a-->int
numerob-->b-->int
numeroc-->c-->int
numerod-->d-->int
salidas
resultado-->r-->int
"""
#entradas
a=int(input("Ingrese a "))
b=int(input("Ingrese b "))
c=int(input("Ingrese c "))
d=int(input("Ingrese d "))
#cajanegra
if d==0:
r=(a-c)**2
elif d>0:
r=((a-b)**3)/d
#salidas
print("El resultado es ",r) |
py | 7dfba2a109561f996de79a1b10886662b53f9944 | # -*- coding: utf-8 -*- #
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The command group for the DeploymentManager CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.GA,
base.ReleaseTrack.BETA,
base.ReleaseTrack.ALPHA)
class DmV2(base.Group):
"""Manage deployments of cloud resources.
The {command} command group lets you manage the deployment of Google Cloud
Platform resources using Google Cloud Deployment Manager.
Google Cloud Deployment Manager allows you to specify all the resources needed
for your application in a declarative format using YAML. You can also use
Python or Jinja2 templates to parameterize the configuration and allow reuse
of common deployment paradigms such as a load balanced, auto-scaled instance
group.
More information on Cloud Deployment Manager can be found here:
https://cloud.google.com/deployment-manager and detailed documentation can be
found here: https://cloud.google.com/deployment-manager/docs/
"""
category = base.GCLOUD_MANAGEMENT_TOOLS_CATEGORY
def Filter(self, context, args):
del context, args
base.DisableUserProjectQuota()
|
py | 7dfba333acb6903dcc3934c2e9af0df9963c607d | # flake8: noqa
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SliderItemCategory'
db.create_table('hero_slider_slideritemcategory', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=128)),
))
db.send_create_signal('hero_slider', ['SliderItemCategory'])
# Adding model 'SliderItemCategoryTitle'
db.create_table('hero_slider_slideritemcategorytitle', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('slider_item_category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['hero_slider.SliderItemCategory'])),
('language', self.gf('django.db.models.fields.CharField')(max_length=5)),
))
db.send_create_signal('hero_slider', ['SliderItemCategoryTitle'])
# Changing field 'SliderItemTitle.language'
db.alter_column('hero_slider_slideritemtitle', 'language', self.gf('django.db.models.fields.CharField')(max_length=5))
def backwards(self, orm):
# Deleting model 'SliderItemCategory'
db.delete_table('hero_slider_slideritemcategory')
# Deleting model 'SliderItemCategoryTitle'
db.delete_table('hero_slider_slideritemcategorytitle')
# Changing field 'SliderItemTitle.language'
db.alter_column('hero_slider_slideritemtitle', 'language', self.gf('django.db.models.fields.CharField')(max_length=2))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'hero_slider.slideritem': {
'Meta': {'object_name': 'SliderItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'external_url': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.File']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'hero_slider.slideritemcategory': {
'Meta': {'object_name': 'SliderItemCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128'})
},
'hero_slider.slideritemcategorytitle': {
'Meta': {'object_name': 'SliderItemCategoryTitle'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slider_item_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['hero_slider.SliderItemCategory']"})
},
'hero_slider.slideritemtitle': {
'Meta': {'object_name': 'SliderItemTitle'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'slider_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['hero_slider.SliderItem']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'})
}
}
complete_apps = ['hero_slider']
|
py | 7dfba3a6b1e989f31a0ecfbe0730a2c440f3a982 | import hcipy as hp
from .base import hcipyComponent
__all__ = ['IdealCoronagraph']
class IdealCoronagraph(hcipyComponent):
'''
Ideal coronagraph
'''
def __init__(self, aperture_function, order=2):
self.aperture = aperture_function
self.order = order
@property
def input_grid(self):
return self.grid
@property
def input_grid_type(self):
return 'pupil'
@property
def output_grid(self):
return self.grid
@property
def output_grid_type(self):
return 'pupil'
def initialise_for(self, prev_component):
self.prev_component = prev_component
self.grid = prev_component.output_grid
self.coronagraph = hp.PerfectCoronagraph(self.aperture(self.grid), self.order)
def forward(self, wf):
return self.coronagraph.forward(wf)
|
py | 7dfba3bb446e1ec9f3f20f1d14f548d2f68872e1 | """
ADAPTED FROM __init__.py AT https://github.com/honeycombio/libhoney-py/
"""
from __future__ import annotations
from datetime import datetime
from typing import Any, Callable, Dict, List, Optional
import libevent.constants as constants
import libevent.state as state
from libevent.client import Client
from libevent.event import Event
from libevent.handler import Handler
from libevent.stdout_handler import StdoutHandler
"""
Sample usage:
libevent.init()
# ...
evt = libevent.new_event()
evt.add_field(...)
# ...
evt.send()
"""
def init(handlers: Optional[List[Handler]] = None) -> None:
if handlers is None:
handlers = [StdoutHandler()]
state.CLIENT = Client(handlers)
# Set to False to not spam handlers with warnings if we call init late.
state.WARNED_UNINITIALIZED = False
def add_field(name: str, val: Any) -> None:
if state.CLIENT is None:
state.warn_uninitialized()
return
state.CLIENT.add_field(name, val)
def add(data: Dict) -> None:
if state.CLIENT is None:
state.warn_uninitialized()
return
state.CLIENT.add(data)
def new_event(data: Optional[Dict] = None,
calling_func: Callable = None) -> Event:
evt = Event(data=data, client=state.CLIENT)
evt.add_field(constants.TIMESTAMP_KEY, datetime.utcnow())
if calling_func:
evt.add_field(constants.OPERATION_KEY, calling_func.__name__)
return evt
|
py | 7dfba4d7aa0b9ffa0b1fb1b7501b1e23ed019ae8 | import random
import string
from typing import Optional
from application.data.game_state import GameState
from application.data.word_manager import WordManager
class GameManager:
"""
Manages all the games.
"""
def __init__(self, word_manager: WordManager):
self.games = {}
self.word_manager = word_manager
def create_game(self) -> GameState:
"""
Creates a new game.
Returns:
the game state
"""
game_name = self._create_game_name()
while game_name in self.games:
game_name = self._create_game_name()
return self.create_game_for_name(game_name)
def create_game_for_name(self, game_name: str) -> GameState:
"""
Creates a new game with the given game name.
Returns:
the game state
"""
game_state = GameState(game_name, self.word_manager)
self.games[game_name] = game_state
return game_state
def get_game_state(self, game_name: str) -> Optional[GameState]:
"""
Returns the game state for the given game name if one exists.
Args:
game_name: the game name
Returns:
the game state if one exists
"""
game_name = game_name.upper()
return self.games.get(game_name, None)
@staticmethod
def _create_game_name() -> str:
game_name = ""
for i in range(0, 4):
game_name += random.choice(string.ascii_uppercase)
return game_name
def _expire_game(self):
# TODO we should expire games so that they don't live in memory forever.
pass
|
py | 7dfba50266f8d07ab88cf9c8547c8a1758078348 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .alias import *
from .event_source_mapping import *
from .function import *
from .function_event_invoke_config import *
from .get_alias import *
from .get_function import *
from .get_invocation import *
from .get_layer_version import *
from .layer_version import *
from .permission import *
from .provisioned_concurrency_config import *
from ._inputs import *
from . import outputs
|
py | 7dfba529cfce1b0565026150d7a7d4bc06f9fda7 | #
# Signature/DSS.py : DSS.py
#
# ===================================================================
#
# Copyright (c) 2014, Legrandin <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
"""
Digital Signature Standard (DSS), as specified in `FIPS PUB 186-3`__.
A sender signs a message in the following way:
>>> from Cryptodome.Hash import SHA256
>>> from Cryptodome.PublicKey import ECC
>>> from Cryptodome.Signature import DSS
>>>
>>> message = b'I give my permission to order #4355'
>>> key = ECC.import_key(open('privkey.der').read())
>>> h = SHA256.new(message)
>>> signer = DSS.new(key, 'fips-186-3')
>>> signature = signer.sign(h)
The receiver can verify authenticity of the message:
>>> key = ECC.import_key(open('pubkey.der').read())
>>> h = SHA256.new(received_message)
>>> verifier = DSS.new(key, 'fips-186-3')
>>> try:
>>> verifier.verify(h, signature):
>>> print "The message is authentic."
>>> except ValueError:
>>> print "The message is not authentic."
.. __: http://csrc.nist.gov/publications/fips/fips186-3/fips_186-3.pdf
"""
__all__ = ['new', 'DssSigScheme']
from Cryptodome.Util.py3compat import bchr, b
from Cryptodome.Util.asn1 import DerSequence
from Cryptodome.Util.number import long_to_bytes
from Cryptodome.Math.Numbers import Integer
from Cryptodome.Hash import HMAC
from Cryptodome.PublicKey.ECC import _curve, EccKey
class DssSigScheme(object):
"""This signature scheme can perform DSS signature or verification.
:undocumented: __init__
"""
def __init__(self, key, encoding, order):
"""Create a new Digital Signature Standard (DSS) object.
Do not instantiate this object directly,
use `Cryptodome.Signature.DSS.new` instead.
"""
self._key = key
self._encoding = encoding
self._order = order
self._order_bits = self._order.size_in_bits()
self._order_bytes = (self._order_bits - 1) // 8 + 1
def can_sign(self):
"""Return True if this signature object can be used
for signing messages."""
return self._key.has_private()
def _compute_nonce(self, msg_hash):
raise NotImplementedError("To be provided by subclasses")
def _valid_hash(self, msg_hash):
raise NotImplementedError("To be provided by subclasses")
def sign(self, msg_hash):
"""Produce the DSS signature of a message.
:Parameters:
msg_hash : hash object
The hash that was carried out over the message.
The object belongs to the `Cryptodome.Hash` package.
Under mode *'fips-186-3'*, the hash must be a FIPS
approved secure hash (SHA-1 or a member of the SHA-2 family),
of cryptographic strength appropriate for the DSA key.
For instance, a 3072/256 DSA key can only be used
in combination with SHA-512.
:Return: The signature encoded as a byte string.
:Raise ValueError:
If the hash algorithm is incompatible to the DSA key.
:Raise TypeError:
If the DSA key has no private half.
"""
if not self._valid_hash(msg_hash):
raise ValueError("Hash is not sufficiently strong")
# Generate the nonce k (critical!)
nonce = self._compute_nonce(msg_hash)
# Perform signature using the raw API
z = Integer.from_bytes(msg_hash.digest()[:self._order_bytes])
sig_pair = self._key._sign(z, nonce)
# Encode the signature into a single byte string
if self._encoding == 'binary':
output = b("").join([long_to_bytes(x, self._order_bytes)
for x in sig_pair])
else:
# Dss-sig ::= SEQUENCE {
# r OCTET STRING,
# s OCTET STRING
# }
output = DerSequence(sig_pair).encode()
return output
def verify(self, msg_hash, signature):
"""Verify that a certain DSS signature is authentic.
This function checks if the party holding the private half of the key
really signed the message.
:Parameters:
msg_hash : hash object
The hash that was carried out over the message.
This is an object belonging to the `Cryptodome.Hash` module.
Under mode *'fips-186-3'*, the hash must be a FIPS
approved secure hash (SHA-1 or a member of the SHA-2 family),
of cryptographic strength appropriate for the DSA key.
For instance, a 3072/256 DSA key can only be used in
combination with SHA-512.
signature : byte string
The signature that needs to be validated.
:Raise ValueError:
If the signature is not authentic.
"""
if not self._valid_hash(msg_hash):
raise ValueError("Hash does not belong to SHS")
if self._encoding == 'binary':
if len(signature) != (2 * self._order_bytes):
raise ValueError("The signature is not authentic (length)")
r_prime, s_prime = [Integer.from_bytes(x)
for x in (signature[:self._order_bytes],
signature[self._order_bytes:])]
else:
try:
der_seq = DerSequence().decode(signature)
except (ValueError, IndexError):
raise ValueError("The signature is not authentic (DER)")
if len(der_seq) != 2 or not der_seq.hasOnlyInts():
raise ValueError("The signature is not authentic (DER content)")
r_prime, s_prime = der_seq[0], der_seq[1]
if not (0 < r_prime < self._order) or not (0 < s_prime < self._order):
raise ValueError("The signature is not authentic (d)")
z = Integer.from_bytes(msg_hash.digest()[:self._order_bytes])
result = self._key._verify(z, (r_prime, s_prime))
if not result:
raise ValueError("The signature is not authentic")
# Make PyCryptodome code to fail
return False
class DeterministicDsaSigScheme(DssSigScheme):
# Also applicable to ECDSA
def __init__(self, key, encoding, order, private_key):
super(DeterministicDsaSigScheme, self).__init__(key, encoding, order)
self._private_key = private_key
def _bits2int(self, bstr):
"""See 2.3.2 in RFC6979"""
result = Integer.from_bytes(bstr)
q_len = self._order.size_in_bits()
b_len = len(bstr) * 8
if b_len > q_len:
result >>= (b_len - q_len)
return result
def _int2octets(self, int_mod_q):
"""See 2.3.3 in RFC6979"""
assert 0 < int_mod_q < self._order
return long_to_bytes(int_mod_q, self._order_bytes)
def _bits2octets(self, bstr):
"""See 2.3.4 in RFC6979"""
z1 = self._bits2int(bstr)
if z1 < self._order:
z2 = z1
else:
z2 = z1 - self._order
return self._int2octets(z2)
def _compute_nonce(self, mhash):
"""Generate k in a deterministic way"""
# See section 3.2 in RFC6979.txt
# Step a
h1 = mhash.digest()
# Step b
mask_v = bchr(1) * mhash.digest_size
# Step c
nonce_k = bchr(0) * mhash.digest_size
for int_oct in 0, 1:
# Step d/f
nonce_k = HMAC.new(nonce_k,
mask_v + bchr(int_oct) +
self._int2octets(self._private_key) +
self._bits2octets(h1), mhash).digest()
# Step e/g
mask_v = HMAC.new(nonce_k, mask_v, mhash).digest()
nonce = -1
while not (0 < nonce < self._order):
# Step h.C (second part)
if nonce != -1:
nonce_k = HMAC.new(nonce_k, mask_v + bchr(0),
mhash).digest()
mask_v = HMAC.new(nonce_k, mask_v, mhash).digest()
# Step h.A
mask_t = b("")
# Step h.B
while len(mask_t) < self._order_bytes:
mask_v = HMAC.new(nonce_k, mask_v, mhash).digest()
mask_t += mask_v
# Step h.C (first part)
nonce = self._bits2int(mask_t)
return nonce
def _valid_hash(self, msg_hash):
return True
class FipsDsaSigScheme(DssSigScheme):
#: List of L (bit length of p) and N (bit length of q) combinations
#: that are allowed by FIPS 186-3. The security level is provided in
#: Table 2 of FIPS 800-57 (rev3).
_fips_186_3_L_N = (
(1024, 160), # 80 bits (SHA-1 or stronger)
(2048, 224), # 112 bits (SHA-224 or stronger)
(2048, 256), # 128 bits (SHA-256 or stronger)
(3072, 256) # 256 bits (SHA-512)
)
def __init__(self, key, encoding, order, randfunc):
super(FipsDsaSigScheme, self).__init__(key, encoding, order)
self._randfunc = randfunc
L = Integer(key.p).size_in_bits()
if (L, self._order_bits) not in self._fips_186_3_L_N:
error = ("L/N (%d, %d) is not compliant to FIPS 186-3"
% (L, self._order_bits))
raise ValueError(error)
def _compute_nonce(self, msg_hash):
# hash is not used
return Integer.random_range(min_inclusive=1,
max_exclusive=self._order,
randfunc=self._randfunc)
def _valid_hash(self, msg_hash):
"""Verify that SHA-1, SHA-2 or SHA-3 are used"""
return (msg_hash.oid == "1.3.14.3.2.26" or
msg_hash.oid.startswith("2.16.840.1.101.3.4.2."))
class FipsEcDsaSigScheme(DssSigScheme):
def __init__(self, key, encoding, order, randfunc):
super(FipsEcDsaSigScheme, self).__init__(key, encoding, order)
self._randfunc = randfunc
def _compute_nonce(self, msg_hash):
return Integer.random_range(min_inclusive=1,
max_exclusive=_curve.order,
randfunc=self._randfunc)
def _valid_hash(self, msg_hash):
"""Verify that SHA-[23] (256|384|512) bits are used to
match the 128-bit security of P-256"""
approved = ("2.16.840.1.101.3.4.2.1",
"2.16.840.1.101.3.4.2.2",
"2.16.840.1.101.3.4.2.3",
"2.16.840.1.101.3.4.2.8",
"2.16.840.1.101.3.4.2.9",
"2.16.840.1.101.3.4.2.10")
return msg_hash.oid in approved
def new(key, mode, encoding='binary', randfunc=None):
"""Return a signature scheme object `DSS_SigScheme` that
can be used to perform DSS signature or verification.
:Parameters:
key : a `Cryptodome.PublicKey.DSA` or `Cryptodome.PublicKey.ECC` key object
If the key has got its private half, both signature and
verification are possible.
If it only has the public half, verification is possible
but not signature generation.
For DSA keys, let *L* and *N* be the bit lengths of the modules *p*
and *q*: the combination *(L,N)* must appear in the following list,
in compliance to section 4.2 of `FIPS-186`__:
- (1024, 160)
- (2048, 224)
- (2048, 256)
- (3072, 256)
mode : string
The parameter can take these values:
- *'fips-186-3'*. The signature generation is carried out
according to `FIPS-186`__: the nonce *k* is taken from the RNG.
- *'deterministic-rfc6979'*. The signature generation
process does not rely on a random generator.
See RFC6979_.
encoding : string
How the signature is encoded. This value determines the output of
``sign`` and the input of ``verify``.
The following values are accepted:
- *'binary'* (default), the signature is the raw concatenation
of *r* and *s*. The size in bytes of the signature is always
two times the size of *q*.
- *'der'*, the signature is a DER encoded SEQUENCE with two
INTEGERs, *r* and *s*. The size of the signature is variable.
randfunc : callable
The source of randomness. If ``None``, the internal RNG is used.
Only used for the *'fips-186-3'* mode.
.. __: http://csrc.nist.gov/publications/fips/fips186-3/fips_186-3.pdf
.. __: http://csrc.nist.gov/publications/fips/fips186-3/fips_186-3.pdf
.. _RFC6979: http://tools.ietf.org/html/rfc6979
"""
# The goal of the 'mode' parameter is to avoid to
# have the current version of the standard as default.
#
# Over time, such version will be superseded by (for instance)
# FIPS 186-4 and it will be odd to have -3 as default.
if encoding not in ('binary', 'der'):
raise ValueError("Unknown encoding '%s'" % encoding)
if isinstance(key, EccKey):
order = _curve.order
private_key_attr = 'd'
else:
order = Integer(key.q)
private_key_attr = 'x'
if key.has_private():
private_key = getattr(key, private_key_attr)
else:
private_key = None
if mode == 'deterministic-rfc6979':
return DeterministicDsaSigScheme(key, encoding, order, private_key)
elif mode == 'fips-186-3':
if isinstance(key, EccKey):
return FipsEcDsaSigScheme(key, encoding, order, randfunc)
else:
return FipsDsaSigScheme(key, encoding, order, randfunc)
else:
raise ValueError("Unknown DSS mode '%s'" % mode)
|
py | 7dfba55eb9f9e9f4219137b13bc52b91cb6088b9 | from terrabot.util.streamer import Streamer
from terrabot.events.events import Events
class Packet83Parser(object):
def parse(self, world, player, data, ev_man):
pass
|
py | 7dfba6e3e4dc77c21026ca43bffea59b3c938a19 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
import glob
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("../.."))
sys.path.insert(0, os.path.abspath("../../nemo"))
sys.path.insert(0, os.path.abspath("../../nemo_text_processing"))
from package_info import __version__
autodoc_mock_imports = [
'torch',
'torch.nn',
'torch.utils',
'torch.optim',
'torch.utils.data',
'torch.utils.data.sampler',
'torchvision',
'torchvision.models',
'torchtext',
'ruamel.yaml', # ruamel.yaml has ., which is troublesome for this regex
'hydra', # hydra-core in requirements, hydra during import
'dateutil', # part of core python
'transformers.tokenization_bert', # has ., troublesome for this regex
'megatron', # megatron-lm in requirements, megatron in import
'sklearn',
'nemo_text_processing.inverse_text_normalization', # Not installed automatically
'nemo_text_processing.text_normalization', # Not installed automatically
'attr', # attrdict in requirements, attr in import
'torchmetrics', # inherited from PTL
]
_skipped_autodoc_mock_imports = ['wrapt', 'numpy']
for req_path in sorted(list(glob.glob("../../requirements/*.txt"))):
if "docs.txt" in req_path:
continue
req_file = os.path.abspath(os.path.expanduser(req_path))
with open(req_file, 'r') as f:
for line in f:
line = line.replace("\n", "")
req = re.search(r"([a-zA-Z0-9-_]*)", line)
if req:
req = req.group(1)
req = req.replace("-", "_")
if req not in autodoc_mock_imports:
if req in _skipped_autodoc_mock_imports:
print(f"Skipping req : `{req}` (lib {line})")
continue
autodoc_mock_imports.append(req)
print(f"Adding req : `{req}` to autodoc mock requirements (lib {line})")
else:
print(f"`{req}` already added to autodoc mock requirements (lib {line})")
#
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"sphinx.ext.githubpages",
"sphinxcontrib.bibtex",
"sphinx.ext.inheritance_diagram",
"sphinx.ext.intersphinx",
"sphinx.ext.autosectionlabel",
"sphinxcontrib.bibtex",
"sphinx_rtd_theme",
]
bibtex_bibfiles = [
'asr/asr_all.bib',
'nlp/nlp_all.bib',
'tools/tools_all.bib',
'nemo_text_processing/textprocessing_all.bib',
'tts_all.bib',
]
intersphinx_mapping = {
'pytorch': ('https://pytorch.org/docs/stable', None),
'pytorch-lightning': ('https://pytorch-lightning.readthedocs.io/en/latest/', None),
}
# Set default flags for all classes.
autodoc_default_options = {'members': None, 'undoc-members': None, 'show-inheritance': True}
locale_dirs = ['locale/'] # path is example but recommended.
gettext_compact = False # optional.
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "NVIDIA NeMo"
copyright = "2021-, NVIDIA CORPORATION"
author = "NVIDIA CORPORATION"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
# version = "0.10.0"
version = __version__
# The full version, including alpha/beta/rc tags.
# release = "0.9.0"
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "default"
### Previous NeMo theme
# # NVIDIA theme settings.
# html_theme = 'nvidia_theme'
# html_theme_path = ["."]
# html_theme_options = {
# 'display_version': True,
# 'project_version': version,
# 'project_name': project,
# 'logo_path': None,
# 'logo_only': True,
# }
# html_title = 'Introduction'
# html_logo = html_theme_options["logo_path"]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "nemodoc"
### from TLT conf.py
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme = "sphinx_rtd_theme"
html_logo = os.path.join('nv_logo.png')
html_theme_options = {
'logo_only': True,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': False,
'style_nav_header_background': '#000000',
# Toc options
'collapse_navigation': False,
'sticky_navigation': False,
# 'navigation_depth': 10,
'includehidden': False,
'titles_only': False,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_favicon = 'favicon.ico'
html_static_path = ['_static']
html_last_updated_fmt = ''
def setup(app):
app.add_css_file('css/custom.css')
app.add_js_file('js/pk_scripts.js')
# html_css_files = [
# './custom.css',
# ]
# html_js_files = [
# './pk_scripts.js',
# ]
|
py | 7dfba6f53e6cfbf70c89f77e221f0b550e27dd4d | # -*- coding: utf-8 -*-
import cv2
from math import *
import numpy as np
from PIL import Image
from torchvision import transforms
import torch
from . import crnn
class resizeNormalize(object):
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
self.toTensor = transforms.ToTensor()
def __call__(self, img):
img = img.resize(self.size, self.interpolation)
img = self.toTensor(img)
img.sub_(0.5).div_(0.5)
return img
def load_crnn_model(cpt_path, char_set_path):
char_set_lines = open(char_set_path, 'r', encoding='utf-8').readlines()
char_set = ''.join([ch.strip('\n') for ch in char_set_lines[1:]] + ['卍'])
n_class = len(char_set)
crnn_model = crnn.CRNN(32, 1, n_class, 256)
crnn_model.load_state_dict(torch.load(cpt_path, map_location=torch.device('cpu')))
return crnn_model, char_set
def sort_box(box):
box = sorted(box, key=lambda x: sum([x[1], x[3], x[5], x[7]]))
return box
def dumpRotateImage(img, degree, pt1, pt2, pt3, pt4):
height, width = img.shape[:2]
heightNew = int(width * fabs(sin(radians(degree))) + height * fabs(cos(radians(degree))))
widthNew = int(height * fabs(sin(radians(degree))) + width * fabs(cos(radians(degree))))
matRotation = cv2.getRotationMatrix2D((width // 2, height // 2), degree, 1)
matRotation[0, 2] += (widthNew - width) // 2
matRotation[1, 2] += (heightNew - height) // 2
imgRotation = cv2.warpAffine(img, matRotation, (widthNew, heightNew), borderValue=(255, 255, 255))
pt1 = list(pt1)
pt3 = list(pt3)
[[pt1[0]], [pt1[1]]] = np.dot(matRotation, np.array([[pt1[0]], [pt1[1]], [1]]))
[[pt3[0]], [pt3[1]]] = np.dot(matRotation, np.array([[pt3[0]], [pt3[1]], [1]]))
ydim, xdim = imgRotation.shape[:2]
imgOut = imgRotation[max(1, int(pt1[1])): min(ydim - 1, int(pt3[1])),
max(1, int(pt1[0])): min(xdim - 1, int(pt3[0]))]
return imgOut
def decode(preds, char_set):
pred_text = ''
for i in range(len(preds)):
if preds[i] != 0 and ((i == 0) or (i != 0 and preds[i] != preds[i - 1])):
pred_text += char_set[int(preds[i]) - 1]
return pred_text
def predict(img, model, char_set):
(w, h) = img.size
size_h = 32
ratio = size_h / float(h)
size_w = int(w * ratio)
transform = resizeNormalize((size_w, size_h))
image = transform(img)
image = image.unsqueeze(0)
model.eval()
preds = model(image)
preds = preds.max(2)[1]
preds = preds.squeeze()
pred_text = decode(preds, char_set)
return pred_text
def recognize_char(img, text_recs, model, char_set, adjust=False):
text_recs = sort_box(text_recs)
results = []
xDim, yDim = img.shape[1], img.shape[0]
for rec in text_recs:
xlength = int((rec[6] - rec[0]) * 0.1)
ylength = int((rec[7] - rec[1]) * 0.2)
if adjust:
pt1 = (max(1, rec[0] - xlength), max(1, rec[1] - ylength))
pt2 = (rec[2], rec[3])
pt3 = (min(rec[6] + xlength, xDim - 2), min(yDim - 2, rec[7] + ylength))
pt4 = (rec[4], rec[5])
else:
pt1 = (max(1, rec[0]), max(1, rec[1]))
pt2 = (rec[2], rec[3])
pt3 = (min(rec[6], xDim - 2), min(yDim - 2, rec[7]))
pt4 = (rec[4], rec[5])
degree = degrees(atan2(pt2[1] - pt1[1], pt2[0] - pt1[0])) # 图像倾斜角度
partImg = dumpRotateImage(img, degree, pt1, pt2, pt3, pt4)
if partImg.shape[0] < 1 or partImg.shape[1] < 1 or partImg.shape[0] > partImg.shape[1]: # 过滤异常图片
continue
image = Image.fromarray(partImg).convert('L')
text = predict(image, model, char_set)
if len(text) > 0:
results.append(((rec[0], rec[1], rec[6], rec[7]), text))
return results
|
py | 7dfba80eb8010df91acdc520f5e4411735783909 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Find a chain of leaks given some starting address.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
from queue import *
import gdb
import pwndbg.color.chain as C
import pwndbg.color.memory as M
import pwndbg.color.message as message
import pwndbg.color.theme as theme
import pwndbg.commands
import pwndbg.vmmap
from pwndbg.chain import config_arrow_right
# Used to recursively print the pointer chain.
# addr is a pointer. It is taken to be a child pointer.
# visited_map is a map of children -> (parent,parent_start)
def get_rec_addr_string(addr,visited_map):
page = pwndbg.vmmap.find(addr)
arrow_right = C.arrow(' %s ' % config_arrow_right)
if not page is None:
if not addr in visited_map:
return ""
parent_info = visited_map[addr]
parent = parent_info[0]
parent_base_addr = parent_info[1]
if parent - parent_base_addr < 0:
curText = hex(parent_base_addr) + hex(parent - parent_base_addr)
else:
curText = hex(parent_base_addr) + "+" + hex(parent - parent_base_addr)
if parent_base_addr == addr:
return ""
return get_rec_addr_string(parent_base_addr,visited_map) + M.get(parent_base_addr,text=curText) + arrow_right
else:
return ""
# Useful for debugging. Prints a map of child -> (parent, parent_start)
def dbg_print_map(maps):
for child, parent_info in maps.items():
print("0x%x + (0x%x, 0x%x)" % (child, parent_info[0], parent_info[1]))
parser = argparse.ArgumentParser()
parser.description = """
Attempt to find a leak chain given a starting address.
Scans memory near the given address, looks for pointers, and continues that process to attempt to find leaks.
Example: leakfind $rsp --page_name=filename --max_offset=0x48 --max_depth=6. This would look for any chains of leaks \
that point to a section in filename which begin near $rsp, are never 0x48 bytes further from a known pointer, \
and are a maximum length of 6.
"""
parser.formatter_class=argparse.RawDescriptionHelpFormatter
parser.add_argument("address",help="Starting address to find a leak chain from")
parser.add_argument("-p", "--page_name", type=str, nargs="?", default=None, help="Substring required to be part of the name of any found pages")
parser.add_argument("-o", "--max_offset", default=0x48, nargs="?", help="Max offset to add to addresses when looking for leak")
parser.add_argument("-d", "--max_depth", default=0x4, nargs="?", help="Maximum depth to follow pointers to")
parser.add_argument("-s", "--step", nargs="?", default=0x1, help="Step to add between pointers so they are considered. For example, if this is 4 it would only consider pointers at an offset divisible by 4 from the starting pointer")
parser.add_argument('--negative_offset',nargs="?", default=0x0, help="Max negative offset to search before an address when looking for a leak")
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def leakfind(address=None, page_name=None, max_offset=0x40, max_depth=0x4, step=0x1, negative_offset=0x0):
if address is None:
raise argparse.ArgumentTypeError('No starting address provided.')
foundPages = pwndbg.vmmap.find(address)
if not foundPages:
raise argparse.ArgumentTypeError('Starting address is not mapped.')
if not pwndbg.memory.peek(address):
raise argparse.ArgumentTypeError('Unable to read from starting address.')
max_depth = int(max_depth)
# Just warn the user that a large depth might be slow.
# Probably worth checking offset^depth < threshold. Do this when more benchmarking is established.
if max_depth > 8:
print(message.warn("leakfind may take a while to run on larger depths."))
stride = int(step)
address = int(address)
max_offset = int(max_offset)
negative_offset = int(negative_offset)
# The below map stores a map of child address->(parent_address,parent_start_address)
# In the above tuple, parent_address is the exact address with a pointer to the child adddress.
# parent_start_address is an address that a previous address pointed to.
# We need to store both so that we can nicely create our leak chain.
visited_map = {}
visited_set = {int(address)}
address_queue = Queue()
address_queue.put(int(address))
depth = 0
time_to_depth_increase = 0
# Run a bfs
# TODO look into performance gain from checking if an address is mapped before calling pwndbg.memory.pvoid()
# TODO also check using pwndbg.memory.read for possible performance boosts.
while address_queue.qsize() > 0 and depth < max_depth:
if time_to_depth_increase == 0:
depth = depth + 1
time_to_depth_increase = address_queue.qsize()
cur_start_addr = address_queue.get()
time_to_depth_increase -= 1
for cur_addr in range(cur_start_addr - negative_offset, cur_start_addr + max_offset, stride):
try:
cur_addr &= pwndbg.arch.ptrmask
result = int(pwndbg.memory.pvoid(cur_addr))
if result in visited_map or result in visited_set:
continue
visited_map[result] = (cur_addr, cur_start_addr) # map is of form child->(parent,parent_start)
address_queue.put(result)
visited_set.add(result)
except gdb.error:
# That means the memory was unmapped. Just skip it if we can't read it.
break
# A map of length->list of lines. Used to let us print in a somewhat nice manner.
output_map = {}
arrow_right = C.arrow(' %s ' % config_arrow_right)
for child in visited_map:
child_page = pwndbg.vmmap.find(child)
if child_page is not None:
if page_name is not None and page_name not in child_page.objfile:
continue
line = get_rec_addr_string(child,visited_map) + M.get(child) + " " + M.get(child,text=child_page.objfile)
chain_length = line.count(arrow_right)
if chain_length in output_map:
output_map[chain_length].append(line)
else:
output_map[chain_length] = [line]
# Output sorted by length of chain
for chain_length in output_map:
for line in output_map[chain_length]:
print(line)
if pwndbg.qemu.is_qemu():
print("\n[QEMU target detected - leakfind result might not be accurate; see `help vmmap`]")
|
py | 7dfba87b9ca89ed158a002e92a3a4e5c805408cd | # Programowanie I R
# Pakiet matplotlib
# Zapis do pliku - przykład 1.: podstawy.
import matplotlib.pyplot as plt
# Określamy punkty na wykresie, podając odpowiadająca sobie
# wartości współrzędnych x i y.
x = [1, 2, 3, 4, 5]
y = [4, 2, 1, 5, 3]
# Rysujemy łamaną łączącą te punkty.
plt.plot(x, y)
# Podpisujemy osie.
plt.xlabel("Odcięta")
plt.ylabel("Rzędna")
# Nadajemy wykresowi tytuł.
plt.title("Łamana")
# Zapisujemy wykres w pliku PNG (grafika rastrowa).
plt.savefig("lamana.png",
dpi = 300, # Rozdzielczość (par. opcjonalny).
transparent = True # Przezroczystość tła (par. opcjonalny).
)
# Zapisujemy wykres w pliku JPG (grafika rastrowa).
plt.savefig("lamana.jpg",
dpi = 300 # Rozdzielczość (par. opcjonalny).
)
# Można eksperymentować z zaawandowanymi opcjami (quality, optimize, progressive),
# pozwalającymi zmniejszyć rozmiar pliku przy zachowaniu porównywalnej jakości.
# Zapisujemy wykres w pliku SVG (grafika wektorowa).
plt.savefig("lamana.svg")
# Zapisujemy wykres w pliku PDF (grafika wektorowa).
plt.savefig("lamana.pdf") |
py | 7dfba95825775eeca3d2af0388dd5a9b89d6bb1e | from peewee import *
from model.abstract_entity import AbstractEntity
class User(AbstractEntity):
id = PrimaryKeyField()
name = CharField(unique=True)
last_updated = DateTimeField(null=True)
|
py | 7dfba99f0256e700939646135335b6ff5e41004a | # Principal da Busca em Largura
from BuscaLargura import *
print('BUSCA EM LARGURA')
executaBFS()
|
py | 7dfbab8c2329bd0df2e3f329ccaf8d3f0c9ed0f8 | import numpy as np
import cv2
import tensorflow as tf
from weapons.CTC_0a import ctc_recog_model
def sparseTuples2dense(sparseTensor):
pred_dense = -np.ones(sparseTensor[2])
for i in range(len(sparseTensor[0])):
pred_dense[sparseTensor[0][i][0],sparseTensor[0][i][1]] = sparseTensor[1][i]
return pred_dense
class recognizer:
def __init__(self, model_path):
tf.reset_default_graph()
provinces = ["皖", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏",
"浙", "京", "闽", "赣", "鲁","豫", "鄂", "湘", "粤", "桂", "琼","川",
"贵", "云", "藏", "陕", "甘", "青", "宁", "新", "警", "学", "_"]
alphabets = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '_']
ads = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P',
'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '0', '1', '2', '3',
'4', '5', '6', '7', '8', '9', '_']
self.labels_list = []
for item in provinces+ads+alphabets:
if item != "_" and item not in self.labels_list:
self.labels_list.append(item)
self.labels_list.append("_")
self.model = ctc_recog_model(len(self.labels_list)+2)
self.sess = tf.Session()
saver = tf.train.Saver()
saver.restore(self.sess, model_path)
def predict(self, imgs):
"""
imgs channels should RGB
"""
x_shape = (300,150)
xs = []
for img in imgs:
if np.max(img)>1:
x = cv2.resize(img/255.1, x_shape)
else:
x = cv2.resize(x.astype(float), x_shape)
xs.append(x)
prediction = self.model.predict(self.sess, np.transpose(xs, axes = [0,2,1,3]),)
prediction = sparseTuples2dense(prediction[0]).astype(int)
results = []
for p in prediction:
results.append(''.join([self.labels_list[x] for x in p if x>=0 and x<len(self.labels_list)]))
return results |
py | 7dfbabb309aa507b8496b1979b23746581a80915 | # Copyright (C) 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittests for the project.py module."""
import contextlib
import os
import shutil
import subprocess
import tempfile
import unittest
import error
import git_command
import git_config
import platform_utils
import project
@contextlib.contextmanager
def TempGitTree():
"""Create a new empty git checkout for testing."""
# TODO(vapier): Convert this to tempfile.TemporaryDirectory once we drop
# Python 2 support entirely.
try:
tempdir = tempfile.mkdtemp(prefix='repo-tests')
# Tests need to assume, that main is default branch at init,
# which is not supported in config until 2.28.
cmd = ['git', 'init']
if git_command.git_require((2, 28, 0)):
cmd += ['--initial-branch=main']
else:
# Use template dir for init.
templatedir = tempfile.mkdtemp(prefix='.test-template')
with open(os.path.join(templatedir, 'HEAD'), 'w') as fp:
fp.write('ref: refs/heads/main\n')
cmd += ['--template=', templatedir]
subprocess.check_call(cmd, cwd=tempdir)
yield tempdir
finally:
platform_utils.rmtree(tempdir)
class FakeProject(object):
"""A fake for Project for basic functionality."""
def __init__(self, worktree):
self.worktree = worktree
self.gitdir = os.path.join(worktree, '.git')
self.name = 'fakeproject'
self.work_git = project.Project._GitGetByExec(
self, bare=False, gitdir=self.gitdir)
self.bare_git = project.Project._GitGetByExec(
self, bare=True, gitdir=self.gitdir)
self.config = git_config.GitConfig.ForRepository(gitdir=self.gitdir)
class ReviewableBranchTests(unittest.TestCase):
"""Check ReviewableBranch behavior."""
def test_smoke(self):
"""A quick run through everything."""
with TempGitTree() as tempdir:
fakeproj = FakeProject(tempdir)
# Generate some commits.
with open(os.path.join(tempdir, 'readme'), 'w') as fp:
fp.write('txt')
fakeproj.work_git.add('readme')
fakeproj.work_git.commit('-mAdd file')
fakeproj.work_git.checkout('-b', 'work')
fakeproj.work_git.rm('-f', 'readme')
fakeproj.work_git.commit('-mDel file')
# Start off with the normal details.
rb = project.ReviewableBranch(
fakeproj, fakeproj.config.GetBranch('work'), 'main')
self.assertEqual('work', rb.name)
self.assertEqual(1, len(rb.commits))
self.assertIn('Del file', rb.commits[0])
d = rb.unabbrev_commits
self.assertEqual(1, len(d))
short, long = next(iter(d.items()))
self.assertTrue(long.startswith(short))
self.assertTrue(rb.base_exists)
# Hard to assert anything useful about this.
self.assertTrue(rb.date)
# Now delete the tracking branch!
fakeproj.work_git.branch('-D', 'main')
rb = project.ReviewableBranch(
fakeproj, fakeproj.config.GetBranch('work'), 'main')
self.assertEqual(0, len(rb.commits))
self.assertFalse(rb.base_exists)
# Hard to assert anything useful about this.
self.assertTrue(rb.date)
class CopyLinkTestCase(unittest.TestCase):
"""TestCase for stub repo client checkouts.
It'll have a layout like:
tempdir/ # self.tempdir
checkout/ # self.topdir
git-project/ # self.worktree
Attributes:
tempdir: A dedicated temporary directory.
worktree: The top of the repo client checkout.
topdir: The top of a project checkout.
"""
def setUp(self):
self.tempdir = tempfile.mkdtemp(prefix='repo_tests')
self.topdir = os.path.join(self.tempdir, 'checkout')
self.worktree = os.path.join(self.topdir, 'git-project')
os.makedirs(self.topdir)
os.makedirs(self.worktree)
def tearDown(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
@staticmethod
def touch(path):
with open(path, 'w'):
pass
def assertExists(self, path, msg=None):
"""Make sure |path| exists."""
if os.path.exists(path):
return
if msg is None:
msg = ['path is missing: %s' % path]
while path != '/':
path = os.path.dirname(path)
if not path:
# If we're given something like "foo", abort once we get to "".
break
result = os.path.exists(path)
msg.append('\tos.path.exists(%s): %s' % (path, result))
if result:
msg.append('\tcontents: %r' % os.listdir(path))
break
msg = '\n'.join(msg)
raise self.failureException(msg)
class CopyFile(CopyLinkTestCase):
"""Check _CopyFile handling."""
def CopyFile(self, src, dest):
return project._CopyFile(self.worktree, src, self.topdir, dest)
def test_basic(self):
"""Basic test of copying a file from a project to the toplevel."""
src = os.path.join(self.worktree, 'foo.txt')
self.touch(src)
cf = self.CopyFile('foo.txt', 'foo')
cf._Copy()
self.assertExists(os.path.join(self.topdir, 'foo'))
def test_src_subdir(self):
"""Copy a file from a subdir of a project."""
src = os.path.join(self.worktree, 'bar', 'foo.txt')
os.makedirs(os.path.dirname(src))
self.touch(src)
cf = self.CopyFile('bar/foo.txt', 'new.txt')
cf._Copy()
self.assertExists(os.path.join(self.topdir, 'new.txt'))
def test_dest_subdir(self):
"""Copy a file to a subdir of a checkout."""
src = os.path.join(self.worktree, 'foo.txt')
self.touch(src)
cf = self.CopyFile('foo.txt', 'sub/dir/new.txt')
self.assertFalse(os.path.exists(os.path.join(self.topdir, 'sub')))
cf._Copy()
self.assertExists(os.path.join(self.topdir, 'sub', 'dir', 'new.txt'))
def test_update(self):
"""Make sure changed files get copied again."""
src = os.path.join(self.worktree, 'foo.txt')
dest = os.path.join(self.topdir, 'bar')
with open(src, 'w') as f:
f.write('1st')
cf = self.CopyFile('foo.txt', 'bar')
cf._Copy()
self.assertExists(dest)
with open(dest) as f:
self.assertEqual(f.read(), '1st')
with open(src, 'w') as f:
f.write('2nd!')
cf._Copy()
with open(dest) as f:
self.assertEqual(f.read(), '2nd!')
def test_src_block_symlink(self):
"""Do not allow reading from a symlinked path."""
src = os.path.join(self.worktree, 'foo.txt')
sym = os.path.join(self.worktree, 'sym')
self.touch(src)
platform_utils.symlink('foo.txt', sym)
self.assertExists(sym)
cf = self.CopyFile('sym', 'foo')
self.assertRaises(error.ManifestInvalidPathError, cf._Copy)
def test_src_block_symlink_traversal(self):
"""Do not allow reading through a symlink dir."""
realfile = os.path.join(self.tempdir, 'file.txt')
self.touch(realfile)
src = os.path.join(self.worktree, 'bar', 'file.txt')
platform_utils.symlink(self.tempdir, os.path.join(self.worktree, 'bar'))
self.assertExists(src)
cf = self.CopyFile('bar/file.txt', 'foo')
self.assertRaises(error.ManifestInvalidPathError, cf._Copy)
def test_src_block_copy_from_dir(self):
"""Do not allow copying from a directory."""
src = os.path.join(self.worktree, 'dir')
os.makedirs(src)
cf = self.CopyFile('dir', 'foo')
self.assertRaises(error.ManifestInvalidPathError, cf._Copy)
def test_dest_block_symlink(self):
"""Do not allow writing to a symlink."""
src = os.path.join(self.worktree, 'foo.txt')
self.touch(src)
platform_utils.symlink('dest', os.path.join(self.topdir, 'sym'))
cf = self.CopyFile('foo.txt', 'sym')
self.assertRaises(error.ManifestInvalidPathError, cf._Copy)
def test_dest_block_symlink_traversal(self):
"""Do not allow writing through a symlink dir."""
src = os.path.join(self.worktree, 'foo.txt')
self.touch(src)
platform_utils.symlink(tempfile.gettempdir(),
os.path.join(self.topdir, 'sym'))
cf = self.CopyFile('foo.txt', 'sym/foo.txt')
self.assertRaises(error.ManifestInvalidPathError, cf._Copy)
def test_src_block_copy_to_dir(self):
"""Do not allow copying to a directory."""
src = os.path.join(self.worktree, 'foo.txt')
self.touch(src)
os.makedirs(os.path.join(self.topdir, 'dir'))
cf = self.CopyFile('foo.txt', 'dir')
self.assertRaises(error.ManifestInvalidPathError, cf._Copy)
class LinkFile(CopyLinkTestCase):
"""Check _LinkFile handling."""
def LinkFile(self, src, dest):
return project._LinkFile(self.worktree, src, self.topdir, dest)
def test_basic(self):
"""Basic test of linking a file from a project into the toplevel."""
src = os.path.join(self.worktree, 'foo.txt')
self.touch(src)
lf = self.LinkFile('foo.txt', 'foo')
lf._Link()
dest = os.path.join(self.topdir, 'foo')
self.assertExists(dest)
self.assertTrue(os.path.islink(dest))
self.assertEqual(os.path.join('git-project', 'foo.txt'), os.readlink(dest))
def test_src_subdir(self):
"""Link to a file in a subdir of a project."""
src = os.path.join(self.worktree, 'bar', 'foo.txt')
os.makedirs(os.path.dirname(src))
self.touch(src)
lf = self.LinkFile('bar/foo.txt', 'foo')
lf._Link()
self.assertExists(os.path.join(self.topdir, 'foo'))
def test_src_self(self):
"""Link to the project itself."""
dest = os.path.join(self.topdir, 'foo', 'bar')
lf = self.LinkFile('.', 'foo/bar')
lf._Link()
self.assertExists(dest)
self.assertEqual(os.path.join('..', 'git-project'), os.readlink(dest))
def test_dest_subdir(self):
"""Link a file to a subdir of a checkout."""
src = os.path.join(self.worktree, 'foo.txt')
self.touch(src)
lf = self.LinkFile('foo.txt', 'sub/dir/foo/bar')
self.assertFalse(os.path.exists(os.path.join(self.topdir, 'sub')))
lf._Link()
self.assertExists(os.path.join(self.topdir, 'sub', 'dir', 'foo', 'bar'))
def test_src_block_relative(self):
"""Do not allow relative symlinks."""
BAD_SOURCES = (
'./',
'..',
'../',
'foo/.',
'foo/./bar',
'foo/..',
'foo/../foo',
)
for src in BAD_SOURCES:
lf = self.LinkFile(src, 'foo')
self.assertRaises(error.ManifestInvalidPathError, lf._Link)
def test_update(self):
"""Make sure changed targets get updated."""
dest = os.path.join(self.topdir, 'sym')
src = os.path.join(self.worktree, 'foo.txt')
self.touch(src)
lf = self.LinkFile('foo.txt', 'sym')
lf._Link()
self.assertEqual(os.path.join('git-project', 'foo.txt'), os.readlink(dest))
# Point the symlink somewhere else.
os.unlink(dest)
platform_utils.symlink(self.tempdir, dest)
lf._Link()
self.assertEqual(os.path.join('git-project', 'foo.txt'), os.readlink(dest))
|
py | 7dfbac62d22b7fe51c1d846ee8b98f6ecd1415f2 | from .tts import SileroTTSPlugin, SileroTTSValidator
|
py | 7dfbaca8e4377017cadd76ac4d384ec50195d5ef | """
@author: Ahmed Allam <[email protected]>
"""
import os
from datetime import datetime
import numpy
from .utilities import (
ReaderWriter,
create_directory,
generate_datetime_str,
vectorized_logsumexp,
)
class Learner(object):
"""learner used for training CRF models supporting search- and gradient-based learning methods
Args:
crf_model: an instance of CRF models such as :class:`HOCRFAD`
Keyword Arguments:
crf_model: an instance of CRF models such as :class:`HOCRFAD`
training_description: dictionary that will include the training specification
of the model
"""
def __init__(self, crf_model):
self.crf_model = crf_model
self.training_description = None
def train_model(
self, w0, seqs_id, optimization_options, working_dir, save_model=True
):
r"""the **MAIN** method for training models using the various options available
Args:
w0: numpy vector representing initial weights for the parameters
seqs_id: list of integers representing the sequence ids
optimization_options: dictionary specifying the training method
working_dir: string representing the directory where the model data
and generated files will be saved
Keyword Arguments:
save_model: boolean specifying if to save the final model
Example:
The available options for training are:
- `SGA` for stochastic gradient ascent
- `SGA-ADADELTA` for stochastic gradient ascent using ADADELTA approach
- `BFGS` or `L-BFGS-B` for optimization using second order information (hessian matrix)
- `SVRG` for stochastic variance reduced gradient method
- `COLLINS-PERCEPTRON` for structured perceptron
- `SAPO` for Search-based Probabilistic Online Learning Algorithm (SAPO) (an adapted version)
For example possible specification of the optimization options are:
::
1) {'method': 'SGA-ADADELTA'
'regularization_type': {'l1', 'l2'}
'regularization_value': float
'num_epochs': integer
'tolerance': float
'rho': float
'epsilon': float
}
2) {'method': 'SGA' or 'SVRG'
'regularization_type': {'l1', 'l2'}
'regularization_value': float
'num_epochs': integer
'tolerance': float
'learning_rate_schedule': one of ("bottu", "exponential_decay", "t_inverse", "constant")
't0': float
'alpha': float
'eta0': float
}
3) {'method': 'L-BFGS-B' or 'BFGS'
'regularization_type': 'l2'
'regularization_value': float
'disp': False
'maxls': 20,
'iprint': -1,
'gtol': 1e-05,
'eps': 1e-08,
'maxiter': 15000,
'ftol': 2.220446049250313e-09,
'maxcor': 10,
'maxfun': 15000
}
4) {'method': 'COLLINS-PERCEPTRON'
'regularization_type': {'l1', 'l2'}
'regularization_value': float
'num_epochs': integer
'update_type':{'early', 'max-fast', 'max-exhaustive', 'latest'}
'shuffle_seq': boolean
'beam_size': integer
'avg_scheme': {'avg_error', 'avg_uniform'}
'tolerance': float
}
5) {'method': 'SAPO'
'regularization_type': {'l2'}
'regularization_value': float
'num_epochs': integer
'update_type':'early'
'shuffle_seq': boolean
'beam_size': integer
'topK': integer
'tolerance': float
}
"""
pop_keys = set()
lambda_type = optimization_options.get("regularization_type")
pop_keys.add("regularization_type")
if lambda_type not in {"l1", "l2"}:
# default regularization type is l2
lambda_type = "l2"
# ^print("regularization by default is l2")
# get the regularization parameter value
lambda_val = optimization_options.get("regularization_value")
pop_keys.add("regularization_value")
if lambda_val == None:
# assign default lambda value
lambda_val = 0.0
elif lambda_val < 0:
# regularization should be positive
lambda_val = 0.0
# initialization of weight vector w node
# w0 = numpy.zeros(len(self.weights))
method = optimization_options.get("method")
pop_keys.add("method")
if method not in {
"L-BFGS-B",
"BFGS",
"SGA",
"SGA-ADADELTA",
"SVRG",
"COLLINS-PERCEPTRON",
"SAPO",
}:
# default weight learning/optimization method
method = "SGA-ADADELTA"
if method in {"L-BFGS-B", "BFGS"}:
# initialize the new optimization options
option_keys = set(optimization_options.keys()) - pop_keys
options = {elmkey: optimization_options[elmkey] for elmkey in option_keys}
optimization_config = {
"method": method,
"regularization_value": lambda_val,
"regularization_type": "l2",
"options": options,
}
estimate_weights = self._optimize_scipy
elif method in {"SGA", "SGA-ADADELTA", "SVRG", "COLLINS-PERCEPTRON", "SAPO"}:
num_epochs = optimization_options.get("num_epochs")
if type(num_epochs) != int:
# default number of epochs if not specified
num_epochs = 3
elif num_epochs < 0:
# num_epochs should be positive
num_epochs = 3
tolerance = optimization_options.get("tolerance")
if tolerance == None:
# default value of tolerance if not specified
tolerance = 1e-8
elif tolerance < 0:
tolerance = 1e-8
optimization_config = {
"method": method,
"regularization_type": lambda_type,
"regularization_value": lambda_val,
"num_epochs": num_epochs,
"tolerance": tolerance,
}
if method in {"COLLINS-PERCEPTRON", "SAPO"}:
# if segmentation problem the non-entity symbol is specified using this option else it is None
seg_other_symbol = optimization_options.get("seg_other_symbol")
optimization_config["seg_other_symbol"] = seg_other_symbol
# setting beam size
beam_size = optimization_options.get("beam_size")
# default beam size
default_beam = len(self.crf_model.model.Y_codebook)
if type(beam_size) != int:
beam_size = default_beam
elif beam_size <= 0 or beam_size > default_beam:
beam_size = default_beam
optimization_config["beam_size"] = beam_size
self.crf_model.beam_size = beam_size
# setting update type
update_type = optimization_options.get("update_type")
if update_type not in {"early", "latest", "max-exhaustive", "max-fast"}:
update_type = "early"
optimization_config["update_type"] = update_type
# setting shuffle_seq
shuffle_seq = optimization_options.get("shuffle_seq")
if type(shuffle_seq) != bool:
shuffle_seq = False
optimization_config["shuffle_seq"] = shuffle_seq
if method == "COLLINS-PERCEPTRON":
# getting averaging scheme
avg_scheme = optimization_options.get("avg_scheme")
if avg_scheme not in ("avg_uniform", "avg_error", "survival"):
avg_scheme = "avg_error"
optimization_config["avg_scheme"] = avg_scheme
estimate_weights = self._structured_perceptron
else:
# getting gamma (i.e. learning rate)
gamma = optimization_options.get("gamma")
if gamma == None:
# use default value
gamma = 1
elif gamma < 0:
gamma = 1
optimization_config["gamma"] = gamma
# getting topK (i.e. top-K decoded sequences)
topK = optimization_options.get("topK")
if topK == None:
# use default value
topK = 5
elif topK < 0:
topK = 5
optimization_config["topK"] = topK
estimate_weights = self._sapo
elif method in {"SGA", "SVRG"}:
# get the other parameters to be tuned such as t0 and alpha
learning_rate_schedule = optimization_options.get(
"learning_rate_schedule"
)
if learning_rate_schedule not in {
"bottu",
"exponential_decay",
"t_inverse",
"constant",
}:
# default learning rate schedule
learning_rate_schedule = "t_inverse"
optimization_config["learning_rate_schedule"] = learning_rate_schedule
t0 = optimization_options.get("t0")
if t0 == None:
# use default value
t0 = 0.1
elif t0 < 0:
t0 = 0.1
optimization_config["t0"] = t0
if learning_rate_schedule in {"t_inverse", "exponential_decay"}:
# get the alpha parameter
a = optimization_options.get("a")
if a == None:
# use a default value
a = 0.9
elif a <= 0 or a >= 1:
a = 0.9
optimization_config["a"] = a
if method == "SGA":
estimate_weights = self._sga_classic
else:
estimate_weights = self._sga_svrg
elif method == "SGA-ADADELTA":
estimate_weights = self._sga_adadelta
p_rho = optimization_options.get("p_rho")
if p_rho == None:
# default value
p_rho = 0.95
elif p_rho < 0:
# num_epochs should be positive
p_rho = 0.95
epsilon = optimization_options.get("epsilon")
if epsilon == None:
# default value of tolerance if not specified
epsilon = 1e-6
elif epsilon < 0:
epsilon = 1e-6
optimization_config["p_rho"] = p_rho
optimization_config["epsilon"] = epsilon
# save the training options
self.training_description = optimization_config
model_foldername = generate_datetime_str()
model_dir = create_directory(
model_foldername, create_directory("models", working_dir)
)
model_name = model_foldername + ".model"
self.training_description["model_dir"] = model_dir
self.training_description["model_name"] = model_name
self.training_description["train_seqs_id"] = seqs_id
# if everything is defined correctly then estimate the parameters
w_hat = estimate_weights(w0, seqs_id)
# update model weights to w_hat
self.crf_model.weights = w_hat
if save_model:
# pickle the model
modelparts_dir = create_directory("model_parts", model_dir)
self.crf_model.save_model(modelparts_dir)
# cleanup the instance variables
self.cleanup()
def _report_training(self):
"""report training by logging the description to a file"""
method = self.training_description["method"]
regularization_type = self.training_description["regularization_type"]
# regularization parameter lambda
C = self.training_description["regularization_value"]
model_dir = self.training_description["model_dir"]
model_name = self.training_description["model_name"]
# log file
log_file = os.path.join(model_dir, "crf_training_log.txt")
line = "---Model training-- starting time {} \n".format(datetime.now())
line += "model name: {} \n".format(model_name)
line += "model directory: {} \n".format(model_dir)
line += "model type: {} \n".format(self.crf_model.__class__)
line += "training method: {} \n".format(method)
if C:
line += "type of regularization: {} \n".format(regularization_type)
line += "value of regularization: {} \n".format(C)
if method == "SGA":
learning_rate_schedule = self.training_description["learning_rate_schedule"]
t0 = self.training_description["t0"]
line += "learning rate schedule: {} \n".format(learning_rate_schedule)
line += "eta0: {} \n".format(t0)
if learning_rate_schedule in ("t_inverse", "exponential_decay"):
# get the alpha parameter
a = self.training_description["a"]
line += "a: {} \n".format(a)
elif method == "SGA-ADADELTA":
rho = self.training_description["p_rho"]
epsilon = self.training_description["epsilon"]
line += "p_rho: {} \n".format(rho)
line += "epsilon: {} \n".format(epsilon)
elif method in {"SAPO", "COLLINS-PERCEPTRON"}:
update_type = self.training_description["update_type"]
beam_size = self.training_description["beam_size"]
shuffle_seq = self.training_description["shuffle_seq"]
line += "update_type: {} \n".format(update_type)
line += "beam_size: {} \n".format(beam_size)
line += "shuffle_seq: {} \n".format(shuffle_seq)
if method == "COLLINS-PERCEPTRON":
avg_scheme = self.training_description["avg_scheme"]
line += "averaging scheme: {} \n".format(avg_scheme)
else:
gamma = self.training_description["gamma"]
topK = self.training_description["topK"]
line += "gamma (learning rate): {} \n".format(gamma)
line += "topK (number of top decoded seqs): {} \n".format(topK)
if method not in ("L-BFGS-B", "BFGS"):
line += "number of epochs: {} \n".format(
self.training_description["num_epochs"]
)
# write to file
ReaderWriter.log_progress(line, log_file)
def _check_reldiff(self, x, y):
"""calculate relative difference between two numbers
Ars:
x: float
y: float
"""
tolerance = self.training_description["tolerance"]
if numpy.abs(y) <= tolerance:
self._exitloop = True
else:
if x != y:
reldiff = numpy.abs(x - y) / (numpy.abs(x) + numpy.abs(y))
# print("reldiff = {}".format(reldiff))
if reldiff <= tolerance:
self._exitloop = True
else:
self._exitloop = False
def _optscipy_seqs_loglikelihood(self, w, seqs_id):
"""compute seqs loglikelihood when using the BFGS and L-BFGS-B optimization options
Args:
w: weight vector (numpy vector)
seqs_id: list of integers representing ids assigned to the sequence
"""
crf_model = self.crf_model
seqs_loglikelihood = crf_model.compute_seqs_loglikelihood(w, seqs_id)
# clear cached info
crf_model.clear_cached_info(seqs_id)
# check for regularization parameter
l2 = self.training_description["regularization_value"]
if l2 > 0:
# log(p(Y|X;w)) - lambda/2 * ||w||**2
seqs_loglikelihood = seqs_loglikelihood - ((l2 / 2) * numpy.dot(w, w))
# since the optimization will be based on minimization, hence we multiply by -1
seqs_loglikelihood = seqs_loglikelihood * -1
return seqs_loglikelihood
def _optscipy_seqs_gradient(self, w, seqs_id):
"""compute seqs gradient when using the BFGS and L-BFGS-B optimization options
Args:
w: weight vector (numpy vector)
seqs_id: list of integers representing ids assigned to the sequence
"""
crf_model = self.crf_model
seqs_grad = crf_model.compute_seqs_gradient(w, seqs_id)
# clear cached info
crf_model.clear_cached_info(seqs_id)
l2 = self.training_description["regularization_value"]
if l2 > 0:
seqs_grad = seqs_grad - (l2 * w)
# since the optimization will be based on minimization, hence we multiply by -1
seqs_grad = seqs_grad * -1
return seqs_grad
def _optimize_scipy(self, w, train_seqs_id):
"""estimate the parameters w of the model using `scipy optimize function`
it uses `optimize.minimize()` function from the scipy package
Args:
w: weight vector (numpy vector)
train_seqs_id: list of integers representing ids of the training sequences
"""
from scipy import optimize
self._report_training()
objfunc = self._optscipy_seqs_loglikelihood
gradfunc = self._optscipy_seqs_gradient
method = self.training_description["method"]
options = self.training_description["options"]
# to keep track of elapsed time between optimization iterations
self._elapsed_time = datetime.now()
self._iter_count = 0
result = optimize.minimize(
fun=objfunc,
x0=w,
args=(train_seqs_id),
method=method,
jac=gradfunc,
options=options,
callback=self._track_scipy_optimizer,
)
model_dir = self.training_description["model_dir"]
# log file
log_file = os.path.join(model_dir, "crf_training_log.txt")
line = "---Model training--- end time {} \n".format(datetime.now())
line += "\n \n"
ReaderWriter.log_progress(line, log_file)
# print("results \n {}".format(result))
print("success: ", result["success"])
# print(result.keys())
# estimated optimal weights
w_hat = result.x
return w_hat
def _track_scipy_optimizer(self, w):
"""track scipy optimization by logging each iteration
Args:
w: weight vector (numpy vector)
"""
# increment iteration count
self._iter_count += 1
delta_time = datetime.now() - self._elapsed_time
crf_model = self.crf_model
# approximate estimation of sum of loglikelihood -- using previous weights
train_seqs_id = self.training_description["train_seqs_id"]
seqs_loglikelihood = 0
for seq_id in train_seqs_id:
seq_loglikelihood = crf_model.seqs_info[seq_id]["loglikelihood"]
seqs_loglikelihood += seq_loglikelihood
seqs_loglikelihood *= -1
""" use the below command >> to compute the sum of sequences' loglikelihood using the updated/current weights
the sum should be decreasing after each iteration for successful training (used as diagnostics)
however it is expensive/costly to recompute
>>> seqs_loglikelihood = crf_model.compute_seqs_loglikelihood(w, train_seqs_id)
"""
model_dir = self.training_description["model_dir"]
log_file = os.path.join(model_dir, "crf_training_log.txt")
line = "--- Iteration {} --- \n".format(self._iter_count)
line += "Estimated average negative loglikelihood is {} \n".format(
seqs_loglikelihood
)
line += "Number of seconds spent: {} \n".format(delta_time.total_seconds())
ReaderWriter.log_progress(line, log_file)
self._elapsed_time = datetime.now()
print("iteration ", self._iter_count)
def _identify_violation_indx(self, viol_indx, y_ref_boundaries):
"""determine the index where the violation occurs
violation means when the reference state falls off the specified beam while decoding
Args:
viol_indx: list of indices where violation occurred while decoding
y_ref_boundaries: boundaries of the labels/tags in the reference sequence
"""
# viol_index is 1-based indexing
counter = 0
for boundary in y_ref_boundaries:
__, v = boundary
if v >= viol_indx:
viol_pos = v
viol_boundindex = counter + 1
break
counter += 1
return (viol_pos, viol_boundindex)
def _compute_seq_decerror(self, y_ref, y_imposter, viol_pos):
"""compute the decoding error of a sequence
Args:
y_ref: reference sequence list of labels
y_imposter: imposter/decoded sequence list of labels
viol_pos: index where violation occurred,
it is identified using :func:`_identify_violation_indx` function
"""
# print("yref ", y_ref)
# print("y_imposter ", y_imposter)
# print("viol_pos ", viol_pos)
T = len(y_ref[:viol_pos])
# ^print("T ", T)
# ^print("viol_pos ", viol_pos)
missmatch = [i for i in range(T) if y_ref[i] != y_imposter[i]]
len_diff = len(missmatch)
# range of error is [0-1]
seq_err_count = float(len_diff / T)
return seq_err_count
def _unpack_windxfval(self, y_windxfval):
"""unpack the weight indices and corresponding feature values
Args:
y_windxfval: tuple having two numpy array entries; the first representing
the weight indices of the features while the second representing
the values that are feature sum/count
"""
windx, fval = y_windxfval
return (windx, fval)
def _find_update_violation(self, w, seq_id):
"""determine the *best* imposter sequence for weight updates
Args:
w: weight vector (numpy vector)
seq_id: integer representing unique id assigned to the sequence
"""
method = self.training_description["method"]
beam_size = self.training_description["beam_size"]
update_type = self.training_description["update_type"]
topK = self.training_description.get("topK")
crf_model = self.crf_model
seqs_info = crf_model.seqs_info
l = {"Y": (seq_id,)}
crf_model.check_cached_info(seq_id, l)
y_ref = seqs_info[seq_id]["Y"]["flat_y"]
y_ref_boundaries = seqs_info[seq_id]["Y"]["boundaries"]
if update_type in {"max-fast", "max-exhaustive", "latest"}:
early_stop = False
else:
early_stop = True
if not topK:
y_imposter, viol_indx = crf_model.viterbi(
w, seq_id, beam_size, early_stop, y_ref
)
y_imposters = [y_imposter]
else:
y_imposters, viol_indx = crf_model.viterbi(
w, seq_id, beam_size, early_stop, y_ref, topK
)
seq_err_count = None
ref_unp_windxfval = None
imps_unp_windxfval = None
# ^print("y_ref ", y_ref)
# ^print("y_imposter ", y_imposter)
# top decoded sequence
y_imposter = y_imposters[0]
if not viol_indx:
# we can perform full update
print("in full update routine ...")
T = seqs_info[seq_id]["T"]
seq_err_count = self._compute_seq_decerror(y_ref, y_imposter, T)
if seq_err_count or method == "SAPO":
ref_unp_windxfval, imps_unp_windxfval = self._load_gfeatures(
seq_id, "globalfeatures", y_imposters, T, len(y_ref_boundaries)
)
else:
if update_type == "early":
print("in early update routine ...")
# viol_index is 1-based indexing
earlyviol_indx = viol_indx[0]
viol_pos, viol_boundindex = self._identify_violation_indx(
earlyviol_indx, y_ref_boundaries
)
seq_err_count = self._compute_seq_decerror(y_ref, y_imposter, viol_pos)
ref_unp_windxfval, imps_unp_windxfval = self._load_gfeatures(
seq_id,
"globalfeatures_per_boundary",
y_imposters,
viol_pos,
viol_boundindex,
)
elif update_type == "max-exhaustive":
# max update is only supported for one imposter sequence
max_diff = numpy.inf
L = crf_model.model.L
print("in max-exhaustive update routine ...")
test = []
# viol_index is 1-based indexing
for i in range(len(viol_indx)):
indx = viol_indx[i]
if i == 0:
# case of early update index
if L > 1:
viol_pos, viol_boundindex = self._identify_violation_indx(
indx, y_ref_boundaries
)
else:
viol_pos = indx
viol_boundindex = viol_pos
seq_err_count = self._compute_seq_decerror(
y_ref, y_imposter, viol_pos
)
else:
if L > 1:
__, v = y_ref_boundaries[viol_boundindex]
viol_pos = v
viol_boundindex += 1
else:
viol_pos = indx
viol_boundindex = viol_pos
# seq_err_count = self._compute_seq_decerror(y_ref, y_imposter, viol_pos)
ref_unp_windxfval, imps_unp_windxfval = self._load_gfeatures(
seq_id,
"globalfeatures_per_boundary",
y_imposters,
viol_pos,
viol_boundindex,
)
ref_windx, ref_fval = ref_unp_windxfval
imp_windx, imp_fval = imps_unp_windxfval[0]
diff = numpy.dot(w[ref_windx], ref_fval) - numpy.dot(
w[imp_windx], imp_fval
)
test.append(diff)
# print("diff = {}, max_diff = {} ".format(diff, max_diff))
if diff <= max_diff:
# using less than or equal would allow for getting the longest sequence having max difference
max_diff = diff
ref_unp_windxfval = (ref_windx, ref_fval)
imp_unp_windxfval = (imp_windx, imp_fval)
imps_unp_windxfval = [imp_unp_windxfval]
# print("test ", test)
elif update_type == "max-fast":
# based on empirical observation, the last violation index (i.e. where the beam falls off) is almost always yielding the max violation
# this is a heuristic, for an exhaustive procedure, choose `max-exhaustive`
# max update is only supported for one imposter sequence
max_diff = numpy.inf
L = crf_model.model.L
print("in max-fast update routine ...")
# viol_index is 1-based indexing
lastviol_indx = viol_indx[-1]
viol_pos, viol_boundindex = self._identify_violation_indx(
lastviol_indx, y_ref_boundaries
)
seq_err_count = self._compute_seq_decerror(y_ref, y_imposter, viol_pos)
ref_unp_windxfval, imps_unp_windxfval = self._load_gfeatures(
seq_id,
"globalfeatures_per_boundary",
y_imposters,
viol_pos,
viol_boundindex,
)
elif update_type == "latest":
# to implement lastest update at some point..
pass
return (ref_unp_windxfval, imps_unp_windxfval, seq_err_count)
def _load_gfeatures(
self, seq_id, gfeatures_type, y_imposters, ypos_indx, boundpos_indx
):
"""load the global features of the reference and imposter/decoded sequence
Args:
seq_id: id of the sequence
gfeatures_type: determine the representation either aggregated or by boundary
y_imposters: list of imposter sequences
ypos_indx: index of the considered end of the label sequence
boundpos_indx: index of the boundary corresponding to the identified `ypos_indx`
"""
seg_other_symbol = self.training_description["seg_other_symbol"]
crf_model = self.crf_model
seqs_info = crf_model.seqs_info
y_ref_boundaries = seqs_info[seq_id]["Y"]["boundaries"]
if gfeatures_type == "globalfeatures":
per_boundary = False
y_ref_boundaries = None
else:
per_boundary = True
# to assign y_ref_boundries here -> y_ref_boundaries = y_ref_boundaries[:boundpos_indx]
l = {gfeatures_type: (seq_id, per_boundary)}
crf_model.check_cached_info(seq_id, l)
ref_gfeatures = seqs_info[seq_id][gfeatures_type]
if y_ref_boundaries:
y_ref_windxfval = crf_model.represent_globalfeature(
ref_gfeatures, y_ref_boundaries[:boundpos_indx]
)
else:
y_ref_windxfval = seqs_info[seq_id][gfeatures_type]
# ref_unp_windxfval = self._unpack_windxfval(y_ref_windxfval)
# generate global features for the imposters
imposters_windxfval = []
for y_imposter in y_imposters:
# generate global features for the current imposter
imposter_gfeatures_perboundary, y_imposter_boundaries = crf_model.load_imposter_globalfeatures(
seq_id, y_imposter[:ypos_indx], seg_other_symbol
)
# ^print("imposter_gfeatures_perboundary ", imposter_gfeatures_perboundary)
# ^print("imposter y_boundaries ", y_imposter_boundaries)
y_imposter_windxfval = crf_model.represent_globalfeature(
imposter_gfeatures_perboundary, y_imposter_boundaries
)
imposters_windxfval.append(y_imposter_windxfval)
return (y_ref_windxfval, imposters_windxfval)
def _update_weights_sapo(self, w, ref_unp_windxfval, imps_unp_windxfval, prob_vec):
"""update weight vector for the SAPO method
Args:
w: weight vector (numpy vector)
ref_unp_windxfval: tuple of two numpy array elements representing the weight indices
and corresponding feature sum/count of the reference sequence
imps_unp_windxfval: list of tuples each comprising two numpy array elements representing
the weight indices and corresponding feature sum/count of the imposter sequences
prob_vec: numpy vector representing the probability of each imposter sequence
"""
gamma = self.training_description["gamma"]
# update weights using the decoded sequences
for i in range(len(imps_unp_windxfval)):
windx, fval = imps_unp_windxfval[i]
w[windx] -= (gamma * prob_vec[i]) * fval
# update weights using the reference sequence
windx, fval = ref_unp_windxfval
w[windx] += gamma * fval
def _compute_probvec_sapo(self, w, imps_unp_windxfval):
"""compute the probabilty of each imposter sequence in the SAPO algorithm
Args:
w: weight vector (numpy vector)
imps_unp_windxfval: list of dictionaries (unpacked) representing the weight indices and corresponding feature sum/count
of the imposter sequences
"""
# normalize
num_imposters = len(imps_unp_windxfval)
ll_vec = numpy.zeros(num_imposters)
for i in range(num_imposters):
windx, fval = imps_unp_windxfval[i]
ll_vec[i] = numpy.dot(w[windx], fval)
Z = vectorized_logsumexp(ll_vec)
prob_vec = numpy.exp(ll_vec - Z)
# print("prob_vec ", prob_vec)
return prob_vec
def _sapo(self, w, train_seqs_id):
"""implements Search-based Probabilistic Online Learning Algorithm (SAPO)
this implementation adapts it to 'violation-fixing' framework (i.e. inexact search is supported)
.. see::
original paper at https://arxiv.org/pdf/1503.08381v1.pdf
.. note::
the regularization is based on averaging rather than l2 as it seems to be consistent during training
while using exact or inexact search
"""
self._report_training()
num_epochs = self.training_description["num_epochs"]
# regularization_type = self.training_description["regularization_type"]
# regularization parameter lambda
# C = self.training_description['regularization_value']
# gamma = self.training_description['gamma']
shuffle_seq = self.training_description["shuffle_seq"]
model_dir = self.training_description["model_dir"]
log_file = os.path.join(model_dir, "crf_training_log.txt")
N = len(train_seqs_id)
crf_model = self.crf_model
# instance variable to keep track of elapsed time between optimization iterations
self._elapsed_time = datetime.now()
self._exitloop = False
avg_error_list = [0]
w_avg = numpy.zeros(len(w), dtype="longdouble")
for k in range(num_epochs):
seq_left = N
error_count = 0
if shuffle_seq:
numpy.random.shuffle(train_seqs_id)
for seq_id in train_seqs_id:
ref_unp_windxfval, imps_unp_windxfval, seq_err_count = self._find_update_violation(
w, seq_id
)
prob_vec = self._compute_probvec_sapo(w, imps_unp_windxfval)
self._update_weights_sapo(
w, ref_unp_windxfval, imps_unp_windxfval, prob_vec
)
# regularize the weights
# reg = -(C/N)* w
# w += gamma*reg
w_avg += w
crf_model.clear_cached_info([seq_id])
seq_left -= 1
# print('seq_err_count ', seq_err_count)
if seq_err_count:
error_count += seq_err_count
# print("error count {}".format(error_count))
print("sequences left {}".format(seq_left))
avg_error_list.append(float(error_count / N))
self._track_perceptron_optimizer(w, k, avg_error_list)
ReaderWriter.dump_data(
w_avg / ((k + 1) * N),
os.path.join(model_dir, "model_avgweights_epoch_{}".format(k + 1)),
)
print("average error : {}".format(avg_error_list[1:]))
# print("self._exitloop {}".format(self._exitloop))
if self._exitloop:
break
self._elapsed_time = datetime.now()
line = "---Model training--- end time {} \n".format(datetime.now())
ReaderWriter.log_progress(line, log_file)
w = w_avg / (num_epochs * N)
ReaderWriter.dump_data(
avg_error_list, os.path.join(model_dir, "avg_decodingerror_training")
)
return w
def _update_weights_perceptron(self, w, ref_unp_windxfval, imp_unp_windxfval):
"""update weight vector for the COLLINS-PERCEPTRON method
Args:
w: weight vector (numpy vector)
ref_unp_windxfval: dictionary (unpacked) representing the weight indices and corresponding feature sum/count
of the reference sequence
imps_unp_windxfval: list of dictionaries (unpacked) representing the weight indices and corresponding feature sum/count
of the imposter sequences
"""
ref_windx, ref_fval = ref_unp_windxfval
imp_windx, imp_fval = imp_unp_windxfval
w[ref_windx] += ref_fval
w[imp_windx] -= imp_fval
def _structured_perceptron(self, w, train_seqs_id):
"""implements structured perceptron algorithm in particular the average perceptron
it was introduced by Michael Collins in 2002 (see his paper http://www.aclweb.org/anthology/W02-1001)
this implementation supports different averaging schemes for the weight learning
Args:
w: weight vector (numpy vector)
seqs_id: list of integers representing ids assigned to the sequence
"""
self._report_training()
num_epochs = self.training_description["num_epochs"]
avg_scheme = self.training_description["avg_scheme"]
shuffle_seq = self.training_description["shuffle_seq"]
model_dir = self.training_description["model_dir"]
log_file = os.path.join(model_dir, "crf_training_log.txt")
N = len(train_seqs_id)
crf_model = self.crf_model
# instance variable to keep track of elapsed time between optimization iterations
self._elapsed_time = datetime.now()
self._exitloop = False
if avg_scheme in {"avg_error", "avg_uniform"}:
# accumulated sum of estimated weights
w_avg = numpy.zeros(len(w), dtype="longdouble")
avg_error_list = [0]
num_upd = 0
for k in range(num_epochs):
seq_left = N
error_count = 0
if shuffle_seq:
numpy.random.shuffle(train_seqs_id)
for seq_id in train_seqs_id:
print("sequences left {}".format(seq_left))
ref_unp_windxfval, imps_unp_windxfval, seq_err_count = self._find_update_violation(
w, seq_id
)
# if decoding errors with the current weight occurs
# ^print("seq_err_count ", seq_err_count)
# ^print("y_ref_windxfval ", y_ref_windxfval)
if seq_err_count:
error_count += seq_err_count
if avg_scheme == "avg_error":
# consider/emphasize more on previous weights that have small average error decoding per sequence
w_avg += (1 - seq_err_count) * w
num_upd += 1 - seq_err_count
else:
w_avg += w
num_upd += 1
# update current weight
self._update_weights_perceptron(
w, ref_unp_windxfval, imps_unp_windxfval[0]
)
crf_model.clear_cached_info([seq_id])
seq_left -= 1
# print("error count {}".format(error_count))
avg_error_list.append(float(error_count / N))
self._track_perceptron_optimizer(w, k, avg_error_list)
if num_upd:
w_dump = w_avg / num_upd
else:
w_dump = w_avg
ReaderWriter.dump_data(
w_dump,
os.path.join(model_dir, "model_avgweights_epoch_{}".format(k + 1)),
)
print("average error : {}".format(avg_error_list[1:]))
# print("self._exitloop {}".format(self._exitloop))
if self._exitloop:
break
self._elapsed_time = datetime.now()
if num_upd:
w = w_avg / num_upd
line = "---Model training--- end time {} \n".format(datetime.now())
ReaderWriter.log_progress(line, log_file)
ReaderWriter.dump_data(
avg_error_list, os.path.join(model_dir, "avg_decodingerror_training")
)
return w
def _track_perceptron_optimizer(self, w, k, avg_error_list):
"""track search based optimized (such as SAPO and COLLINS-PERCEPTRON) by logging each iteration
Args:
w: weight vector (numpy vector)
k: current epoch
avg_error_list: list of the decoding errors in each previous epochs
"""
delta_time = datetime.now() - self._elapsed_time
self._check_reldiff(avg_error_list[-2], avg_error_list[-1])
model_dir = self.training_description["model_dir"]
log_file = os.path.join(model_dir, "crf_training_log.txt")
line = "--- Iteration {} --- \n".format(k + 1)
line += "Average percentage of decoding error: {} \n".format(
avg_error_list[-1] * 100
)
line += "Number of seconds spent: {} \n".format(delta_time.total_seconds())
ReaderWriter.log_progress(line, log_file)
# dump the learned weights for every pass
ReaderWriter.dump_data(
w, os.path.join(model_dir, "model_weights_epoch_{}".format(k + 1))
)
def _sga_adadelta(self, w, train_seqs_id):
"""implements stochastic gradient ascent using adaptive approach of ADADELTA
the original paper is found in https://arxiv.org/abs/1212.5701
Args:
w: weight vector (numpy vector)
train_seqs_id: list of integers representing ids assigned to the sequence
"""
self._report_training()
crf_model = self.crf_model
num_epochs = self.training_description["num_epochs"]
regularization_type = self.training_description["regularization_type"]
# regularization parameter lambda
C = self.training_description["regularization_value"]
# number of training sequences
N = len(train_seqs_id)
model_dir = self.training_description["model_dir"]
log_file = os.path.join(model_dir, "crf_training_log.txt")
# keeps track of the log-likelihood of a sequence before weight updating
seqs_loglikelihood_vec = numpy.zeros(N)
seqs_id_mapper = {
seq_id: unique_id for unique_id, seq_id in enumerate(train_seqs_id)
}
# step size decides the number of data points to average in the seqs_loglikelihood_vec
# using 10% of data points
step_size = round(N * 0.1)
if step_size == 0:
step_size = 1
mean_cost_vec = [0]
p_rho = self.training_description["p_rho"]
epsilon = self.training_description["epsilon"]
E_g2 = numpy.zeros(len(w), dtype="longdouble")
E_deltaw2 = numpy.zeros(len(w), dtype="longdouble")
if regularization_type == "l1":
u = 0
q = numpy.zeros(len(w), dtype="longdouble")
# gradient
grad = numpy.zeros(len(w), dtype="longdouble")
# instance variable to keep track of elapsed time between optimization iterations
self._elapsed_time = datetime.now()
self._exitloop = False
for k in range(num_epochs):
# shuffle sequences at the beginning of each epoch
numpy.random.shuffle(train_seqs_id)
numseqs_left = N
print("k ", k)
for seq_id in train_seqs_id:
# print(seq_id)
# print("first seqs_info[{}]={}".format(seq_id, crf_model.seqs_info[seq_id]))
seq_loglikelihood = crf_model.compute_seq_loglikelihood(w, seq_id)
seqs_loglikelihood_vec[seqs_id_mapper[seq_id]] = seq_loglikelihood
target_indx = crf_model.compute_seq_gradient(w, seq_id, grad)
if C:
if regularization_type == "l2":
seq_loglikelihood += -((C / N) * (1 / 2) * numpy.dot(w, w))
grad -= (C / N) * w
elif regularization_type == "l1":
seq_loglikelihood += -(C / N) * numpy.sum(numpy.abs(w))
# update the computed sequence loglikelihood by adding the regularization term contribution
seqs_loglikelihood_vec[seqs_id_mapper[seq_id]] = seq_loglikelihood
# accumulate gradient
E_g2 = p_rho * E_g2 + (1 - p_rho) * numpy.square(grad)
RMS_g = numpy.sqrt(E_g2 + epsilon)
RMS_deltaw = numpy.sqrt(E_deltaw2 + epsilon)
ratio = RMS_deltaw / RMS_g
deltaw = ratio * grad
E_deltaw2 = p_rho * E_deltaw2 + (1 - p_rho) * numpy.square(deltaw)
w += deltaw
if regularization_type == "l1":
u += ratio * (C / N)
w_upd, q_upd = self._apply_l1_penalty(w, q, u, target_indx)
w = w_upd
q = q_upd
else:
# accumulate gradient
fval = grad[target_indx]
E_g2 = p_rho * E_g2
E_g2[target_indx] += (1 - p_rho) * numpy.square(fval)
RMS_g = numpy.sqrt(E_g2 + epsilon)
RMS_deltaw = numpy.sqrt(E_deltaw2 + epsilon)
ratio = RMS_deltaw / RMS_g
deltaw = ratio[target_indx] * fval
E_deltaw2 = p_rho * E_deltaw2
E_deltaw2[target_indx] += (1 - p_rho) * numpy.square(deltaw)
w[target_indx] += deltaw
# print("second seqs_info[{}]={}".format(seq_id, crf_model.seqs_info[seq_id]))
# clean cached info
crf_model.clear_cached_info([seq_id])
numseqs_left -= 1
# print("third seqs_info[{}]={}".format(seq_id, crf_model.seqs_info[seq_id]))
# reset the gradient
grad.fill(0)
print("num seqs left: {}".format(numseqs_left))
seqs_cost_vec = [
numpy.mean(seqs_loglikelihood_vec[i : i + step_size])
for i in range(0, N, step_size)
]
# to consider plotting this vector
mean_cost_vec.append(numpy.mean(seqs_loglikelihood_vec))
self._track_sga_optimizer(w, seqs_cost_vec, mean_cost_vec, k)
if self._exitloop:
break
self._elapsed_time = datetime.now()
line = "---Model training--- end time {} \n".format(datetime.now())
ReaderWriter.log_progress(line, log_file)
ReaderWriter.dump_data(
mean_cost_vec, os.path.join(model_dir, "avg_loglikelihood_training")
)
return w
def _sga_classic(self, w, train_seqs_id):
"""implements stochastic gradient ascent
Args:
w: weight vector (numpy vector)
train_seqs_id: list of integers representing ids assigned to the sequence
"""
self._report_training()
crf_model = self.crf_model
num_epochs = self.training_description["num_epochs"]
regularization_type = self.training_description["regularization_type"]
# regularization parameter lambda
C = self.training_description["regularization_value"]
# number of training sequences
N = len(train_seqs_id)
model_dir = self.training_description["model_dir"]
log_file = os.path.join(model_dir, "crf_training_log.txt")
# keeps track of the log-likelihood of a sequence before weight updating
seqs_loglikelihood_vec = numpy.zeros(N)
seqs_id_mapper = {
seq_id: unique_id for unique_id, seq_id in enumerate(train_seqs_id)
}
# step size decides the number of data points to average in the seqs_loglikelihood_vec
# using 10% of data points
step_size = round(N * 0.1)
if step_size == 0:
step_size = 1
mean_cost_vec = [0]
# instance variable to keep track of elapsed time between optimization iterations
self._elapsed_time = datetime.now()
self._exitloop = False
if regularization_type == "l1":
u = 0
q = numpy.zeros(len(w), dtype="longdouble")
learning_rate_schedule = self.training_description["learning_rate_schedule"]
t0 = self.training_description["t0"]
# 0<a<1 -- a parameter should be between 0 and 1 exclusively
a = self.training_description["a"]
t = 0
# gradient
grad = numpy.zeros(len(w), dtype="longdouble")
for k in range(num_epochs):
# shuffle sequences at the beginning of each epoch
numpy.random.shuffle(train_seqs_id)
numseqs_left = N
for seq_id in train_seqs_id:
# compute/update learning rate
if learning_rate_schedule == "bottu":
eta = C / (t0 + t)
elif learning_rate_schedule == "exponential_decay":
eta = t0 * a ** (t / N)
elif learning_rate_schedule == "t_inverse":
eta = t0 / (1 + a * (t / N))
elif learning_rate_schedule == "constant":
eta = t0
# print("eta {}".format(eta))
# print(seq_id)
seq_loglikelihood = crf_model.compute_seq_loglikelihood(w, seq_id)
seqs_loglikelihood_vec[seqs_id_mapper[seq_id]] = seq_loglikelihood
target_index = crf_model.compute_seq_gradient(w, seq_id, grad)
# print("seq_grad {}".format(seq_grad))
if C:
if regularization_type == "l2":
seq_loglikelihood += -((C / N) * (1 / 2) * numpy.dot(w, w))
grad -= (C / N) * w
w += eta * grad
elif regularization_type == "l1":
seq_loglikelihood += -(C / N) * numpy.sum(numpy.abs(w))
u += eta * (C / N)
w_upd, q_upd = self._apply_l1_penalty(w, q, u, target_index)
w = w_upd
q = q_upd
# update the computed sequence loglikelihood by adding the regularization term contribution
seqs_loglikelihood_vec[seqs_id_mapper[seq_id]] = seq_loglikelihood
else:
# print("fval {}".format(fval))
w[target_index] += eta * grad[target_index]
t += 1
# clean cached info
crf_model.clear_cached_info([seq_id])
# reset the gradient
grad.fill(0)
numseqs_left -= 1
print("num seqs left: {}".format(numseqs_left))
seqs_cost_vec = [
numpy.mean(seqs_loglikelihood_vec[i : i + step_size])
for i in range(0, N, step_size)
]
# to consider plotting this vector
mean_cost_vec.append(numpy.mean(seqs_loglikelihood_vec))
self._track_sga_optimizer(w, seqs_cost_vec, mean_cost_vec, k)
if self._exitloop:
break
self._elapsed_time = datetime.now()
line = "---Model training--- end time {} \n".format(datetime.now())
ReaderWriter.log_progress(line, log_file)
ReaderWriter.dump_data(
mean_cost_vec, os.path.join(model_dir, "avg_loglikelihood_training")
)
return w
def _sga_svrg(self, w, train_seqs_id):
"""implements the stochastic variance reduced gradient
The algorithm is reported in `Johnson R, Zhang T. Accelerating Stochastic Gradient Descent using Predictive Variance Reduction.
<https://papers.nips.cc/paper/4937-accelerating-stochastic-gradient-descent-using-predictive-variance-reduction.pdf>`__
Args:
w: weight vector (numpy vector)
train_seqs_id: list of integers representing sequences IDs
"""
# keep the original number of epochs requested
num_epochs = self.training_description["num_epochs"]
# run stochastic gradient ascent to initialize the weights
self.training_description["num_epochs"] = 1
# current snapshot of w (i.e. w tilda)
w_tilda_c = self._sga_classic(w, train_seqs_id)
self.cleanup()
self.training_description["num_epochs"] = num_epochs
crf_model = self.crf_model
regularization_type = self.training_description["regularization_type"]
# regularization parameter lambda
C = self.training_description["regularization_value"]
# number of training sequences
N = len(train_seqs_id)
model_dir = self.training_description["model_dir"]
log_file = os.path.join(model_dir, "crf_training_log.txt")
# keeps track of the log-likelihood of a sequence before weight updating
seqs_loglikelihood_vec = numpy.zeros(N)
seqs_id_mapper = {
seq_id: unique_id for unique_id, seq_id in enumerate(train_seqs_id)
}
# step size decides the number of data points to average in the seqs_loglikelihood_vec
# using 10% of data points
step_size = round(N * 0.1)
if step_size == 0:
step_size = 1
mean_cost_vec = [0]
if regularization_type == "l1":
u = 0
q = numpy.zeros(len(w), dtype="longdouble")
eta = self.training_description["t0"]
m = 2 * N
saved_grad = {}
# gradient
grad = numpy.zeros(len(w), dtype="longdouble")
# instance variable to keep track of elapsed time between optimization iterations
self._elapsed_time = datetime.now()
self._exitloop = False
for s in range(num_epochs):
print("stage {}".format(s))
# ###################################
# compute the average gradient using the snapshot of w (i.e. w tilda)
mu_grad = numpy.zeros(len(w_tilda_c), dtype="longdouble")
# compute average gradient
seqs_left = N
for seq_id in train_seqs_id:
target_indx = crf_model.compute_seq_gradient(w_tilda_c, seq_id, grad)
fval = grad[target_indx]
mu_grad[target_indx] += fval
crf_model.clear_cached_info([seq_id])
saved_grad[seq_id] = (target_indx, fval)
# reset grad
grad.fill(0)
seqs_left -= 1
print("average gradient phase: {} seqs left".format(seqs_left))
mu_grad /= N
#######################################
w = numpy.copy(w_tilda_c)
for t in range(m):
seq_id = numpy.random.choice(train_seqs_id, 1)[0]
print("round {} out of {}".format(t + 1, m))
seq_loglikelihood = crf_model.compute_seq_loglikelihood(w, seq_id)
seqs_loglikelihood_vec[seqs_id_mapper[seq_id]] = seq_loglikelihood
target_indx = crf_model.compute_seq_gradient(w, seq_id, grad)
fval = grad[target_indx]
if C:
if regularization_type == "l2":
seq_loglikelihood += -((C / N) * (1 / 2) * numpy.dot(w, w))
grad -= (C / N) * w
grad[saved_grad[seq_id][0]] -= saved_grad[seq_id][1]
grad += mu_grad
w += eta * grad
elif regularization_type == "l1":
seq_loglikelihood += -(C / N) * numpy.sum(numpy.abs(w))
u += eta * (C / N)
grad[saved_grad[seq_id][0]] -= saved_grad[seq_id][1]
grad += mu_grad
w_upd, q_upd = self._apply_l1_penalty(w, q, u, target_indx)
w = w_upd
q = q_upd
# update the computed sequence loglikelihood by adding the regularization term contribution
seqs_loglikelihood_vec[seqs_id_mapper[seq_id]] = seq_loglikelihood
else:
w[target_indx] += eta * (fval - saved_grad[seq_id][1])
w += eta * mu_grad
t += 1
# clean cached info
crf_model.clear_cached_info([seq_id])
grad.fill(0)
w_tilda_c = w
seqs_cost_vec = [
numpy.mean(seqs_loglikelihood_vec[i : i + step_size])
for i in range(0, N, step_size)
]
# to consider plotting this vector
mean_cost_vec.append(numpy.mean(seqs_loglikelihood_vec))
self._track_sga_optimizer(w, seqs_cost_vec, mean_cost_vec, s)
if self._exitloop:
break
self._elapsed_time = datetime.now()
line = "---Model training--- end time {} \n".format(datetime.now())
ReaderWriter.log_progress(line, log_file)
ReaderWriter.dump_data(
mean_cost_vec, os.path.join(model_dir, "avg_loglikelihood_training")
)
return w
def _apply_l1_penalty(self, w, q, u, w_indx):
"""apply l1 regularization to the weights
it uses the approach of Tsuruoka et al. Stochastic gradient descent training for L1-regularized log-linear models with cumulative penalty
Args:
w: weight vector (numpy vector)
q: total L1 penalty that current weights (corresponding to the features) did receive up to the current time
u: absolute value of total L1 penalty that each weight could receive up to the current time
w_indx: weight indices corresponding to the current features under update
TODO: vectorize this function
"""
for indx in w_indx:
z = w[indx]
# print("z is {}".format(z))
# print("q[indx] is {}".format(q[indx]))
if w[indx] > 0:
# print("we want the max between 0 and {}".format(w[indx] - (u + q[indx])))
w[indx] = numpy.max([0, w[indx] - (u + q[indx])])
elif w[indx] < 0:
# print("we want the min between 0 and {}".format(w[indx] + (u - q[indx])))
w[indx] = numpy.min([0, w[indx] + (u - q[indx])])
# print("z is {}".format(z))
# print("w[indx] is {}".format(w[indx]))
q[indx] = q[indx] + (w[indx] - z)
return (w, q)
# print("q[indx] becomes {}".format(q[indx]))
def _track_sga_optimizer(self, w, seqs_loglikelihood, mean_loglikelihood, k):
"""track stochastic gradient ascent optimizers by logging each iteration
Args:
w: weight vector (numpy vector)
seqs_loglikelihood: numpy vector representing the average loglikelihood of batches of sequences
mean_loglikelihood: mean of the seqs_loglikelihood vector
k: current epoch
"""
delta_time = datetime.now() - self._elapsed_time
self._check_reldiff(mean_loglikelihood[-2], mean_loglikelihood[-1])
epoch_num = k
# log file
model_dir = self.training_description["model_dir"]
log_file = os.path.join(model_dir, "crf_training_log.txt")
line = "--- Epoch/pass {} --- \n".format(epoch_num + 1)
line += "Estimated training cost (average loglikelihood) is {} \n".format(
mean_loglikelihood[-1]
)
line += "Number of seconds spent: {} \n".format(delta_time.total_seconds())
ReaderWriter.log_progress(line, log_file)
def cleanup(self):
"""End of training -- cleanup"""
# reset iteration counter
self._iter_count = None
# reset elapsed time between iterations
self._elapsed_time = None
self._exitloop = None
class SeqDecodingEvaluator(object):
"""Evaluator class to evaluate performance of the models
Args:
model_repr: the CRF model representation that has a suffix of `ModelRepresentation`
such as :class:`HOCRFADModelRepresentation`
Attributes:
model_repr: the CRF model representation that has a suffix of `ModelRepresentation`
such as :class:`HOCRFADModelRepresentation`
.. note::
this class does not support evaluation of segment learning (i.e. notations that include IOB2/BIO notation)
"""
def __init__(self, model_repr):
self.model_repr = model_repr
def compute_states_confmatrix(self, Y_seqs_dict):
"""compute/generate the confusion matrix for each state
Args:
Y_seqs_dict: dictionary where each sequence has the reference label sequence
and its corresponding predicted sequence. It has the following form
``{seq_id:{'Y_ref':[reference_ylabels], 'Y_pred':[predicted_ylabels]}}``
"""
Y_codebook = self.model_repr.Y_codebook
M = len(Y_codebook)
# add another state in case unseen states occur in the test data
self.model_confusion_matrix = numpy.zeros((M + 1, M + 1), dtype="float")
for seq_id in Y_seqs_dict:
Y_pred = Y_seqs_dict[seq_id]["Y_pred"]
Y_ref = Y_seqs_dict[seq_id]["Y_ref"]
self._compute_model_confusionmatrix(
self.map_states_to_num(Y_ref, Y_codebook, M),
self.map_states_to_num(Y_pred, Y_codebook, M),
)
statelevel_confmatrix = self._generate_statelevel_confusion_matrix()
return statelevel_confmatrix
def _generate_statelevel_confusion_matrix(self):
model_confusion_matrix = self.model_confusion_matrix
num_states = model_confusion_matrix.shape[0]
total = model_confusion_matrix.sum()
statelevel_confmatrix = numpy.zeros((num_states, 2, 2), dtype="float")
for i in range(num_states):
tp = model_confusion_matrix[i, i]
fp = model_confusion_matrix[i, :].sum() - tp
fn = model_confusion_matrix[:, i].sum() - tp
tn = total - (tp + fp + fn)
statelevel_confmatrix[i] = numpy.array([[tp, fn], [fp, tn]])
return statelevel_confmatrix
def get_performance_metric(self, taglevel_performance, metric, exclude_states=[]):
"""compute the performance of the model using a requested metric
Args:
taglevel_performance: `numpy` array with Mx2x2 dimension. For every state code a 2x2 confusion matrix
is included. It is computed using :func:`compute_model_performance`
metric: evaluation metric that could take one of ``{'f1', 'precision', 'recall', 'accuracy'}``
Keyword Arguments:
exclude_states: list (default empty list) of states to exclude from the computation. Usually, in NER applications the non-entity symbol
such as 'O' is excluded from the computation. Example: If ``exclude_states = ['O']``, this will replicate the behavior of `conlleval script <http://www.cnts.ua.ac.be/conll2000/chunking/output.html>`__
"""
Y_codebook = self.model_repr.Y_codebook
# do not include 'exclude states' in the computation
exclude_indices = [Y_codebook[state] for state in exclude_states]
# total number of states plus 1
M = len(Y_codebook) + 1
include_indices = list(set(range(M)) - set(exclude_indices))
# perform sum across all layers to get micro-average
collapsed_performance = taglevel_performance[include_indices].sum(axis=0)
# print("collapsed performance \n {}".format(collapsed_performance))
tp = collapsed_performance[0, 0]
fp = collapsed_performance[1, 0]
fn = collapsed_performance[0, 1]
tn = collapsed_performance[1, 1]
perf_measure = 0
try:
if metric == "f1":
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1 = (2 * precision * recall) / (precision + recall)
print("f1 {}".format(f1))
perf_measure = f1
elif metric == "precision":
precision = tp / (tp + fp)
print("precision {}".format(precision))
perf_measure = precision
elif metric == "recall":
recall = tp / (tp + fn)
print("recall {}".format(recall))
perf_measure = recall
elif metric == "accuracy":
accuracy = (tp + tn) / (tp + fp + fn + tn)
print("accuracy {}".format(accuracy))
perf_measure = accuracy
except ZeroDivisionError as e:
print("dividing by Zero: check/investigate the confusion matrix")
finally:
return perf_measure
def map_states_to_num(self, Y, Y_codebook, M):
"""map states to their code/number using the `Y_codebook`
Args:
Y: list representing label sequence
Y_codebook: dictionary containing the states as keys and the assigned unique code as values
M: number of states
.. note:: we give one unique index for tags that did not occur in the training data such as len(Y_codebook)
"""
Y_coded = [Y_codebook[state] if state in Y_codebook else M for state in Y]
# print("Y_coded {}".format(Y_coded))
return Y_coded
def _compute_model_confusionmatrix(self, Y_ref, Y_pred):
"""compute confusion matrix on the level of the tag/state
Args:
Y_ref: list of reference label sequence (represented by the states code)
Y_pred: list of predicted label sequence (represented by the states code)
"""
Y_ref = numpy.asarray(Y_ref)
Y_pred = numpy.asarray(Y_pred)
model_confusion_matrix = self.model_confusion_matrix
for i in range(len(Y_ref)):
ref_state = Y_ref[i]
pred_state = Y_pred[i]
model_confusion_matrix[ref_state, pred_state] += 1
class Evaluator(object):
"""Evaluator class to evaluate performance of the models
Args:
model_repr: the CRF model representation that has a suffix of `ModelRepresentation`
such as :class:`HOCRFADModelRepresentation`
Attributes:
model_repr: the CRF model representation that has a suffix of `ModelRepresentation`
such as :class:`HOCRFADModelRepresentation`
.. note::
this class is **EXPERIMENTAL/work in progress*** and does not support evaluation of segment learning.
Use instead :class:`SeqDecodingEvaluator` for evaluating models learned using **sequence** learning.
"""
def __init__(self, model_repr):
self.model_repr = model_repr
def transform_codebook(self, Y_codebook, prefixes):
"""map states coded in BIO notation to their original states value
Args:
Y_codebook: dictionary of states each assigned a unique integer
prefixes: tuple of prefix notation used such as ("B-","I-") for BIO
"""
state_mapper = {}
for state in Y_codebook:
if state != "O":
for prefix in prefixes:
elems = state.split(prefix)
if len(elems) > 1:
new_state = elems[-1]
state_mapper[state] = new_state
break
else:
state_mapper[state] = state
return state_mapper
def compute_model_performance(
self, Y_seqs_dict, metric, output_file, states_notation
):
r"""compute the performance of the model
Args:
Y_seqs_dict: dictionary where each sequence has the reference label sequence
and its corresponding predicted sequence. It has the following form
``{seq_id:{'Y_ref':[reference_ylabels], 'Y_pred':[predicted_ylabels]}}``
metric: evaluation metric that could take one of {'f1', 'precision', 'recall', 'accuracy'}
output_file: file where to output the evaluation result
states_notation: notation used to code the state (i.e. BIO)
"""
Y_codebook = self.model_repr.Y_codebook
if states_notation == "BIO":
prefixes = ("B-", "I-")
state_mapper = self.transform_codebook(Y_codebook, prefixes)
transformed_codebook = {}
counter = 0
for new_state in state_mapper.values():
if new_state not in transformed_codebook:
transformed_codebook[new_state] = counter
counter += 1
else:
state_mapper = {state: state for state in Y_codebook}
transformed_codebook = Y_codebook
transformed_codebook_rev = {
code: state for state, code in transformed_codebook.items()
}
# ^print("original Y_codebook ", Y_codebook)
# ^print("state_mapper ", state_mapper)
# ^print("transformed_codebook ", transformed_codebook)
M = len(transformed_codebook)
# add another state in case unseen states occur in the test data
model_taglevel_performance = numpy.zeros((M + 1, 2, 2))
for seq_id in Y_seqs_dict:
Y_pred = Y_seqs_dict[seq_id]["Y_pred"]
Y_ref = Y_seqs_dict[seq_id]["Y_ref"]
# ^print("Y_pred ", Y_pred)
# ^print("Y_ref ", Y_ref)
taglevel_performance = self.compute_tags_confusionmatrix(
self.map_states_to_num(Y_ref, state_mapper, transformed_codebook, M),
self.map_states_to_num(Y_pred, state_mapper, transformed_codebook, M),
transformed_codebook_rev,
M,
)
# print("taglevel_performance {}".format(taglevel_performance))
# print("tagging performance \n {}".format(taglevel_performance))
model_taglevel_performance += taglevel_performance
# ^print("model_taglevel_performance ", model_taglevel_performance)
# perform sum across all layers to get micro-average
collapsed_performance = model_taglevel_performance.sum(axis=0)
# print("collapsed performance \n {}".format(collapsed_performance))
tp = collapsed_performance[0, 0]
fp = collapsed_performance[0, 1]
fn = collapsed_performance[1, 0]
tn = collapsed_performance[1, 1]
perf_measure = 0
if metric == "f1":
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1 = 2 * ((precision * recall) / (precision + recall))
print("f1 {}".format(f1))
perf_measure = f1
elif metric == "precision":
precision = tp / (tp + fp)
print("precision {}".format(precision))
perf_measure = precision
elif metric == "recall":
recall = tp / (tp + fn)
print("recall {}".format(recall))
perf_measure = recall
elif metric == "accuracy":
accuracy = (tp + tn) / (tp + fp + fn + tn)
print("accuracy {}".format(accuracy))
perf_measure = accuracy
with open(output_file, mode="w") as f:
f.write(
"The performance of the model based on the {} measure is {}\n".format(
metric, perf_measure
)
)
f.write(
"Confusion matrix: tp:{} fp:{} fn:{} tn:{}\n".format(tp, fp, fn, tn)
)
return perf_measure
def map_states_to_num(self, Y, state_mapper, transformed_codebook, M):
"""map states to their code/number using the `Y_codebook`
Args:
Y: list representing label sequence
state_mapper: mapper between the old and new generated states generated from :func:`tranform_codebook` method
trasformed_codebook: the transformed codebook of the new identified states
M: number of states
.. note:: we give one unique index for tags that did not occur in the training data such as len(Y_codebook)
"""
# Y_coded = []
# for state in Y:
# mapped_state = state_mapper[state]
# if(mapped_state in transformed_codebook):
# Y_coded.append(transformed_codebook[mapped_state])
# else:
# Y_coded.append(M)
Y_coded = [
transformed_codebook[state_mapper[state]]
if state_mapper.get(state) in transformed_codebook
else M
for state in Y
]
# print("Y_coded {}".format(Y_coded))
return Y_coded
def compute_tags_confusionmatrix(self, Y_ref, Y_pred, transformed_codebook_rev, M):
"""compute confusion matrix on the level of the tag/state
Args:
Y_ref: list of reference label sequence (represented by the states code)
Y_pred: list of predicted label sequence (represented by the states code)
transformed_codebook: the transformed codebook of the new identified states
M: number of states
"""
# print("Y_ref coded ", Y_ref)
# print("Y_pred coded ", Y_pred)
detected_statescode = set(Y_ref)
Y_ref = numpy.asarray(Y_ref)
Y_pred = numpy.asarray(Y_pred)
# print("Y_ref as numpy array {}".format(Y_ref))
tagslevel_performance = numpy.zeros((M + 1, 2, 2))
for statecode in detected_statescode:
# get all indices of the target tag (gold-standard)
tag_indx_origin = numpy.where(Y_ref == statecode)[0]
# get all indices of the target tag (predicted)
tag_indx_pred = numpy.where(Y_pred == statecode)[0]
tag_tp = len(numpy.where(numpy.in1d(tag_indx_origin, tag_indx_pred))[0])
tag_fn = len(tag_indx_origin) - tag_tp
other_indx_origin = numpy.where(Y_ref != statecode)[0]
tag_fp = len(numpy.where(numpy.in1d(other_indx_origin, tag_indx_pred))[0])
tag_tn = len(other_indx_origin) - tag_fp
tagslevel_performance[statecode] = numpy.array(
[[tag_tp, tag_fp], [tag_fn, tag_tn]]
)
return tagslevel_performance
if __name__ == "__main__":
pass
|
py | 7dfbacb89085f68c2ba067f5582fa3145c0a8f87 | import os
from kombu import Exchange, Queue
class Config(object):
NOTIFICATION_QUEUE_PREFIX = os.getenv('NOTIFICATION_QUEUE_PREFIX')
FTP_HOST = os.getenv('FTP_HOST')
FTP_USERNAME = os.getenv('FTP_USERNAME')
FTP_PASSWORD = os.getenv('FTP_PASSWORD')
# Logging
DEBUG = False
LOGGING_STDOUT_JSON = os.getenv('LOGGING_STDOUT_JSON') == '1'
###########################
# Default config values ###
###########################
NOTIFY_APP_NAME = 'api'
AWS_REGION = os.getenv('AWS_REGION', 'eu-west-1')
NOTIFY_LOG_PATH = os.getenv('NOTIFY_LOG_PATH', '/var/log/notify/application.log')
CELERY = {
'broker_url': 'sqs://',
'broker_transport_options': {
'region': AWS_REGION,
'visibility_timeout': 310,
'queue_name_prefix': NOTIFICATION_QUEUE_PREFIX,
'wait_time_seconds': 20, # enable long polling, with a wait time of 20 seconds
},
'timezone': 'Europe/London',
'imports': ['app.celery.tasks'],
'task_queues': [
Queue('process-ftp-tasks', Exchange('default'), routing_key='process-ftp-tasks')
],
# restart workers after each task is executed - this will help prevent any memory leaks (not that we should be
# encouraging sloppy memory management). Since we only run a handful of tasks per day, and none are time
# sensitive, the extra couple of seconds overhead isn't seen to be a huge issue.
'worker_max_tasks_per_child': 20
}
STATSD_ENABLED = False
STATSD_HOST = "statsd.hostedgraphite.com"
STATSD_PORT = 8125
LOCAL_FILE_STORAGE_PATH = "~/dvla-file-storage"
DVLA_JOB_BUCKET_NAME = None
DVLA_API_BUCKET_NAME = None
LETTERS_PDF_BUCKET_NAME = None
######################
# Config overrides ###
######################
class Development(Config):
DEBUG = True
NOTIFICATION_QUEUE_PREFIX = 'development'
NOTIFY_LOG_PATH = 'application.log'
LOCAL_FILE_STORAGE_PATH = "/tmp/dvla-file-storage"
DVLA_JOB_BUCKET_NAME = 'development-dvla-file-per-job'
DVLA_API_BUCKET_NAME = 'development-dvla-letter-api-files'
LETTERS_PDF_BUCKET_NAME = 'development-letters-pdf'
class Test(Development):
STATSD_ENABLED = False
LOCAL_FILE_STORAGE_PATH = "/tmp/dvla-file-storage"
DVLA_JOB_BUCKET_NAME = 'test-dvla-file-per-job'
DVLA_API_BUCKET_NAME = 'test-dvla-letter-api-files'
LETTERS_PDF_BUCKET_NAME = 'test-letters-pdf'
class Preview(Config):
DVLA_JOB_BUCKET_NAME = 'preview-dvla-file-per-job'
DVLA_API_BUCKET_NAME = 'preview-dvla-letter-api-files'
LETTERS_PDF_BUCKET_NAME = 'preview-letters-pdf'
class Staging(Config):
STATSD_ENABLED = False
DVLA_JOB_BUCKET_NAME = 'staging-dvla-file-per-job'
DVLA_API_BUCKET_NAME = 'staging-dvla-letter-api-files'
LETTERS_PDF_BUCKET_NAME = 'staging-letters-pdf'
class Production(Config):
STATSD_ENABLED = False
DVLA_JOB_BUCKET_NAME = 'production-dvla-file-per-job'
DVLA_API_BUCKET_NAME = 'production-dvla-letter-api-files'
LETTERS_PDF_BUCKET_NAME = 'production-letters-pdf'
configs = {
'development': Development,
'test': Test,
'preview': Preview,
'staging': Staging,
'live': Production,
'production': Production,
}
|
py | 7dfbaccf6ece859f186a99a5e60ccdbfcd55cd0a | """
Tests for the generic MLEModel
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
import re
import warnings
from statsmodels.tsa.statespace import (sarimax, varmax, kalman_filter,
kalman_smoother)
from statsmodels.tsa.statespace.mlemodel import MLEModel, MLEResultsWrapper
from statsmodels.tsa.statespace.tools import compatibility_mode
from statsmodels.datasets import nile
from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_raises
from nose.exc import SkipTest
from statsmodels.tsa.statespace.tests.results import results_sarimax, results_var_misc
current_path = os.path.dirname(os.path.abspath(__file__))
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except ImportError:
have_matplotlib = False
# Basic kwargs
kwargs = {
'k_states': 1, 'design': [[1]], 'transition': [[1]],
'selection': [[1]], 'state_cov': [[1]],
'initialization': 'approximate_diffuse'
}
def get_dummy_mod(fit=True, pandas=False):
# This tests time-varying parameters regression when in fact the parameters
# are not time-varying, and in fact the regression fit is perfect
endog = np.arange(100)*1.0
exog = 2*endog
if pandas:
index = pd.date_range('1960-01-01', periods=100, freq='MS')
endog = pd.Series(endog, index=index)
exog = pd.Series(exog, index=index)
mod = sarimax.SARIMAX(endog, exog=exog, order=(0,0,0), time_varying_regression=True, mle_regression=False)
if fit:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = mod.fit(disp=-1)
else:
res = None
return mod, res
def test_wrapping():
# Test the wrapping of various Representation / KalmanFilter /
# KalmanSmoother methods / attributes
mod, _ = get_dummy_mod(fit=False)
# Test that we can get the design matrix
assert_equal(mod['design', 0, 0], 2.0 * np.arange(100))
# Test that we can set individual elements of the design matrix
mod['design', 0, 0, :] = 2
assert_equal(mod.ssm['design', 0, 0, :], 2)
assert_equal(mod.ssm['design'].shape, (1, 1, 100))
# Test that we can set the entire design matrix
mod['design'] = [[3.]]
assert_equal(mod.ssm['design', 0, 0], 3.)
# (Now it's no longer time-varying, so only 2-dim)
assert_equal(mod.ssm['design'].shape, (1, 1))
# Test that we can change the following properties: loglikelihood_burn,
# initial_variance, tolerance
assert_equal(mod.loglikelihood_burn, 1)
mod.loglikelihood_burn = 0
assert_equal(mod.ssm.loglikelihood_burn, 0)
assert_equal(mod.tolerance, mod.ssm.tolerance)
mod.tolerance = 0.123
assert_equal(mod.ssm.tolerance, 0.123)
assert_equal(mod.initial_variance, 1e10)
mod.initial_variance = 1e12
assert_equal(mod.ssm.initial_variance, 1e12)
# Test that we can use the following wrappers: initialization,
# initialize_known, initialize_stationary, initialize_approximate_diffuse
# Initialization starts off as none
assert_equal(mod.initialization, None)
# Since the SARIMAX model may be fully stationary or may have diffuse
# elements, it uses a custom initialization by default, but it can be
# overridden by users
mod.initialize_state()
# (The default initialization in this case is known because there is a non-
# stationary state corresponding to the time-varying regression parameter)
assert_equal(mod.initialization, 'known')
mod.initialize_approximate_diffuse(1e5)
assert_equal(mod.initialization, 'approximate_diffuse')
assert_equal(mod.ssm._initial_variance, 1e5)
mod.initialize_known([5.], [[40]])
assert_equal(mod.initialization, 'known')
assert_equal(mod.ssm._initial_state, [5.])
assert_equal(mod.ssm._initial_state_cov, [[40]])
mod.initialize_stationary()
assert_equal(mod.initialization, 'stationary')
# Test that we can use the following wrapper methods: set_filter_method,
# set_stability_method, set_conserve_memory, set_smoother_output
# The defaults are as follows:
assert_equal(mod.ssm.filter_method, kalman_filter.FILTER_CONVENTIONAL)
assert_equal(mod.ssm.stability_method, kalman_filter.STABILITY_FORCE_SYMMETRY)
assert_equal(mod.ssm.conserve_memory, kalman_filter.MEMORY_STORE_ALL)
assert_equal(mod.ssm.smoother_output, kalman_smoother.SMOOTHER_ALL)
# Now, create the Cython filter object and assert that they have
# transferred correctly
mod.ssm._initialize_filter()
kf = mod.ssm._kalman_filter
assert_equal(kf.filter_method, kalman_filter.FILTER_CONVENTIONAL)
assert_equal(kf.stability_method, kalman_filter.STABILITY_FORCE_SYMMETRY)
assert_equal(kf.conserve_memory, kalman_filter.MEMORY_STORE_ALL)
# (the smoother object is so far not in Cython, so there is no
# transferring)
# Change the attributes in the model class
if compatibility_mode:
assert_raises(NotImplementedError, mod.set_filter_method, 100)
else:
mod.set_filter_method(100)
mod.set_stability_method(101)
mod.set_conserve_memory(102)
mod.set_smoother_output(103)
# Assert that the changes have occurred in the ssm class
if not compatibility_mode:
assert_equal(mod.ssm.filter_method, 100)
assert_equal(mod.ssm.stability_method, 101)
assert_equal(mod.ssm.conserve_memory, 102)
assert_equal(mod.ssm.smoother_output, 103)
# Assert that the changes have *not yet* occurred in the filter object
assert_equal(kf.filter_method, kalman_filter.FILTER_CONVENTIONAL)
assert_equal(kf.stability_method, kalman_filter.STABILITY_FORCE_SYMMETRY)
assert_equal(kf.conserve_memory, kalman_filter.MEMORY_STORE_ALL)
# Re-initialize the filter object (this would happen automatically anytime
# loglike, filter, etc. were called)
# In this case, an error will be raised since filter_method=100 is not
# valid
# Note: this error is only raised in the compatibility case, since the
# newer filter logic checks for a valid filter mode at a different point
if compatibility_mode:
assert_raises(NotImplementedError, mod.ssm._initialize_filter)
# Now, test the setting of the other two methods by resetting the
# filter method to a valid value
mod.set_filter_method(1)
mod.ssm._initialize_filter()
# Retrieve the new kalman filter object (a new object had to be created
# due to the changing filter method)
kf = mod.ssm._kalman_filter
assert_equal(kf.filter_method, 1)
assert_equal(kf.stability_method, 101)
assert_equal(kf.conserve_memory, 102)
def test_fit_misc():
true = results_sarimax.wpi1_stationary
endog = np.diff(true['data'])[1:]
mod = sarimax.SARIMAX(endog, order=(1,0,1), trend='c')
# Test optim_hessian={'opg','oim','approx'}
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res1 = mod.fit(method='ncg', disp=0, optim_hessian='opg', optim_complex_step=False)
res2 = mod.fit(method='ncg', disp=0, optim_hessian='oim', optim_complex_step=False)
# Check that the Hessians broadly result in the same optimum
assert_allclose(res1.llf, res2.llf, rtol=1e-2)
# Test return_params=True
mod, _ = get_dummy_mod(fit=False)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res_params = mod.fit(disp=-1, return_params=True)
# 5 digits necessary to accommodate 32-bit numpy / scipy with OpenBLAS 0.2.18
assert_almost_equal(res_params, [0, 0], 5)
def test_score_misc():
mod, res = get_dummy_mod()
# Test that the score function works
mod.score(res.params)
def test_from_formula():
assert_raises(NotImplementedError, lambda: MLEModel.from_formula(1,2,3))
def test_score_analytic_ar1():
# Test the score against the analytic score for an AR(1) model with 2
# observations
# Let endog = [1, 0.5], params=[0, 1]
mod = sarimax.SARIMAX([1, 0.5], order=(1,0,0))
def partial_phi(phi, sigma2):
return -0.5 * (phi**2 + 2*phi*sigma2 - 1) / (sigma2 * (1 - phi**2))
def partial_sigma2(phi, sigma2):
return -0.5 * (2*sigma2 + phi - 1.25) / (sigma2**2)
params = np.r_[0., 2]
# Compute the analytic score
analytic_score = np.r_[
partial_phi(params[0], params[1]),
partial_sigma2(params[0], params[1])]
# Check each of the approximations, transformed parameters
approx_cs = mod.score(params, transformed=True, approx_complex_step=True)
assert_allclose(approx_cs, analytic_score)
approx_fd = mod.score(params, transformed=True, approx_complex_step=False)
assert_allclose(approx_fd, analytic_score, atol=1e-5)
approx_fd_centered = (
mod.score(params, transformed=True, approx_complex_step=False,
approx_centered=True))
assert_allclose(approx_fd, analytic_score, atol=1e-5)
harvey_cs = mod.score(params, transformed=True, method='harvey',
approx_complex_step=True)
assert_allclose(harvey_cs, analytic_score)
harvey_fd = mod.score(params, transformed=True, method='harvey',
approx_complex_step=False)
assert_allclose(harvey_fd, analytic_score, atol=1e-5)
harvey_fd_centered = mod.score(params, transformed=True, method='harvey',
approx_complex_step=False,
approx_centered=True)
assert_allclose(harvey_fd_centered, analytic_score, atol=1e-5)
# Check the approximations for untransformed parameters. The analytic
# check now comes from chain rule with the analytic derivative of the
# transformation
# if L* is the likelihood evaluated at untransformed parameters and
# L is the likelihood evaluated at transformed parameters, then we have:
# L*(u) = L(t(u))
# and then
# L'*(u) = L'(t(u)) * t'(u)
def partial_transform_phi(phi):
return -1. / (1 + phi**2)**(3./2)
def partial_transform_sigma2(sigma2):
return 2. * sigma2
uparams = mod.untransform_params(params)
analytic_score = np.dot(
np.diag(np.r_[partial_transform_phi(uparams[0]),
partial_transform_sigma2(uparams[1])]),
np.r_[partial_phi(params[0], params[1]),
partial_sigma2(params[0], params[1])])
approx_cs = mod.score(uparams, transformed=False, approx_complex_step=True)
assert_allclose(approx_cs, analytic_score)
approx_fd = mod.score(uparams, transformed=False,
approx_complex_step=False)
assert_allclose(approx_fd, analytic_score, atol=1e-5)
approx_fd_centered = (
mod.score(uparams, transformed=False, approx_complex_step=False,
approx_centered=True))
assert_allclose(approx_fd, analytic_score, atol=1e-5)
harvey_cs = mod.score(uparams, transformed=False, method='harvey',
approx_complex_step=True)
assert_allclose(harvey_cs, analytic_score)
harvey_fd = mod.score(uparams, transformed=False, method='harvey',
approx_complex_step=False)
assert_allclose(harvey_fd, analytic_score, atol=1e-5)
harvey_fd_centered = mod.score(uparams, transformed=False, method='harvey',
approx_complex_step=False,
approx_centered=True)
assert_allclose(harvey_fd_centered, analytic_score, atol=1e-5)
# Check the Hessian: these approximations are not very good, particularly
# when phi is close to 0
params = np.r_[0.5, 1.]
def hessian(phi, sigma2):
hessian = np.zeros((2,2))
hessian[0,0] = (-phi**2 - 1) / (phi**2 - 1)**2
hessian[1,0] = hessian[0,1] = -1 / (2 * sigma2**2)
hessian[1,1] = (sigma2 + phi - 1.25) / sigma2**3
return hessian
analytic_hessian = hessian(params[0], params[1])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
assert_allclose(mod._hessian_complex_step(params) * 2,
analytic_hessian, atol=1e-1)
assert_allclose(mod._hessian_finite_difference(params) * 2,
analytic_hessian, atol=1e-1)
def test_cov_params():
mod, res = get_dummy_mod()
# Smoke test for each of the covariance types
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = mod.fit(res.params, disp=-1, cov_type='none')
assert_equal(res.cov_kwds['description'], 'Covariance matrix not calculated.')
res = mod.fit(res.params, disp=-1, cov_type='approx')
assert_equal(res.cov_type, 'approx')
assert_equal(res.cov_kwds['description'], 'Covariance matrix calculated using numerical (complex-step) differentiation.')
res = mod.fit(res.params, disp=-1, cov_type='oim')
assert_equal(res.cov_type, 'oim')
assert_equal(res.cov_kwds['description'], 'Covariance matrix calculated using the observed information matrix (complex-step) described in Harvey (1989).')
res = mod.fit(res.params, disp=-1, cov_type='opg')
assert_equal(res.cov_type, 'opg')
assert_equal(res.cov_kwds['description'], 'Covariance matrix calculated using the outer product of gradients (complex-step).')
res = mod.fit(res.params, disp=-1, cov_type='robust')
assert_equal(res.cov_type, 'robust')
assert_equal(res.cov_kwds['description'], 'Quasi-maximum likelihood covariance matrix used for robustness to some misspecifications; calculated using the observed information matrix (complex-step) described in Harvey (1989).')
res = mod.fit(res.params, disp=-1, cov_type='robust_oim')
assert_equal(res.cov_type, 'robust_oim')
assert_equal(res.cov_kwds['description'], 'Quasi-maximum likelihood covariance matrix used for robustness to some misspecifications; calculated using the observed information matrix (complex-step) described in Harvey (1989).')
res = mod.fit(res.params, disp=-1, cov_type='robust_approx')
assert_equal(res.cov_type, 'robust_approx')
assert_equal(res.cov_kwds['description'], 'Quasi-maximum likelihood covariance matrix used for robustness to some misspecifications; calculated using numerical (complex-step) differentiation.')
assert_raises(NotImplementedError, mod.fit, res.params, disp=-1, cov_type='invalid_cov_type')
def test_transform():
# The transforms in MLEModel are noops
mod = MLEModel([1,2], **kwargs)
# Test direct transform, untransform
assert_allclose(mod.transform_params([2, 3]), [2, 3])
assert_allclose(mod.untransform_params([2, 3]), [2, 3])
# Smoke test for transformation in `filter`, `update`, `loglike`,
# `loglikeobs`
mod.filter([], transformed=False)
mod.update([], transformed=False)
mod.loglike([], transformed=False)
mod.loglikeobs([], transformed=False)
# Note that mod is an SARIMAX instance, and the two parameters are
# variances
mod, _ = get_dummy_mod(fit=False)
# Test direct transform, untransform
assert_allclose(mod.transform_params([2, 3]), [4, 9])
assert_allclose(mod.untransform_params([4, 9]), [2, 3])
# Test transformation in `filter`
res = mod.filter([2, 3], transformed=True)
assert_allclose(res.params, [2, 3])
res = mod.filter([2, 3], transformed=False)
assert_allclose(res.params, [4, 9])
def test_filter():
endog = np.array([1., 2.])
mod = MLEModel(endog, **kwargs)
# Test return of ssm object
res = mod.filter([], return_ssm=True)
assert_equal(isinstance(res, kalman_filter.FilterResults), True)
# Test return of full results object
res = mod.filter([])
assert_equal(isinstance(res, MLEResultsWrapper), True)
assert_equal(res.cov_type, 'opg')
# Test return of full results object, specific covariance type
res = mod.filter([], cov_type='oim')
assert_equal(isinstance(res, MLEResultsWrapper), True)
assert_equal(res.cov_type, 'oim')
def test_params():
mod = MLEModel([1,2], **kwargs)
# By default start_params raises NotImplementedError
assert_raises(NotImplementedError, lambda: mod.start_params)
# But param names are by default an empty array
assert_equal(mod.param_names, [])
# We can set them in the object if we want
mod._start_params = [1]
mod._param_names = ['a']
assert_equal(mod.start_params, [1])
assert_equal(mod.param_names, ['a'])
def check_results(pandas):
mod, res = get_dummy_mod(pandas=pandas)
# Test fitted values
assert_almost_equal(res.fittedvalues[2:], mod.endog[2:].squeeze())
# Test residuals
assert_almost_equal(res.resid[2:], np.zeros(mod.nobs-2))
# Test loglikelihood_burn
assert_equal(res.loglikelihood_burn, 1)
def test_results(pandas=False):
check_results(pandas=False)
check_results(pandas=True)
def test_predict():
dates = pd.date_range(start='1980-01-01', end='1981-01-01', freq='AS')
endog = pd.Series([1,2], index=dates)
mod = MLEModel(endog, **kwargs)
res = mod.filter([])
# Test that predict with start=None, end=None does prediction with full
# dataset
predict = res.predict()
assert_equal(predict.shape, (mod.nobs,))
assert_allclose(res.get_prediction().predicted_mean, predict)
# Test a string value to the dynamic option
assert_allclose(res.predict(dynamic='1981-01-01'), res.predict())
# Test an invalid date string value to the dynamic option
# assert_raises(ValueError, res.predict, dynamic='1982-01-01')
# Test for passing a string to predict when dates are not set
mod = MLEModel([1,2], **kwargs)
res = mod.filter([])
assert_raises(KeyError, res.predict, dynamic='string')
def test_forecast():
# Numpy
mod = MLEModel([1,2], **kwargs)
res = mod.filter([])
forecast = res.forecast(steps=10)
assert_allclose(forecast, np.ones((10,)) * 2)
assert_allclose(res.get_forecast(steps=10).predicted_mean, forecast)
# Pandas
index = pd.date_range('1960-01-01', periods=2, freq='MS')
mod = MLEModel(pd.Series([1,2], index=index), **kwargs)
res = mod.filter([])
assert_allclose(res.forecast(steps=10), np.ones((10,)) * 2)
assert_allclose(res.forecast(steps='1960-12-01'), np.ones((10,)) * 2)
assert_allclose(res.get_forecast(steps=10).predicted_mean, np.ones((10,)) * 2)
def test_summary():
dates = pd.date_range(start='1980-01-01', end='1984-01-01', freq='AS')
endog = pd.Series([1,2,3,4,5], index=dates)
mod = MLEModel(endog, **kwargs)
res = mod.filter([])
# Get the summary
txt = str(res.summary())
# Test res.summary when the model has dates
assert_equal(re.search('Sample:\s+01-01-1980', txt) is not None, True)
assert_equal(re.search('\s+- 01-01-1984', txt) is not None, True)
# Test res.summary when `model_name` was not provided
assert_equal(re.search('Model:\s+MLEModel', txt) is not None, True)
# Smoke test that summary still works when diagnostic tests fail
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res.filter_results._standardized_forecasts_error[:] = np.nan
res.summary()
res.filter_results._standardized_forecasts_error = 1
res.summary()
res.filter_results._standardized_forecasts_error = 'a'
res.summary()
def check_endog(endog, nobs=2, k_endog=1, **kwargs):
# create the model
mod = MLEModel(endog, **kwargs)
# the data directly available in the model is the Statsmodels version of
# the data; it should be 2-dim, C-contiguous, long-shaped:
# (nobs, k_endog) == (2, 1)
assert_equal(mod.endog.ndim, 2)
assert_equal(mod.endog.flags['C_CONTIGUOUS'], True)
assert_equal(mod.endog.shape, (nobs, k_endog))
# the data in the `ssm` object is the state space version of the data; it
# should be 2-dim, F-contiguous, wide-shaped (k_endog, nobs) == (1, 2)
# and it should share data with mod.endog
assert_equal(mod.ssm.endog.ndim, 2)
assert_equal(mod.ssm.endog.flags['F_CONTIGUOUS'], True)
assert_equal(mod.ssm.endog.shape, (k_endog, nobs))
assert_equal(mod.ssm.endog.base is mod.endog, True)
return mod
def test_basic_endog():
# Test various types of basic python endog inputs (e.g. lists, scalars...)
# Check cannot call with non-array-like
# fails due to checks in Statsmodels base classes
assert_raises(ValueError, MLEModel, endog=1, k_states=1)
assert_raises(ValueError, MLEModel, endog='a', k_states=1)
assert_raises(ValueError, MLEModel, endog=True, k_states=1)
# Check behavior with different types
mod = MLEModel([1], **kwargs)
res = mod.filter([])
assert_equal(res.filter_results.endog, [[1]])
mod = MLEModel([1.], **kwargs)
res = mod.filter([])
assert_equal(res.filter_results.endog, [[1]])
mod = MLEModel([True], **kwargs)
res = mod.filter([])
assert_equal(res.filter_results.endog, [[1]])
mod = MLEModel(['a'], **kwargs)
# raises error due to inability coerce string to numeric
assert_raises(ValueError, mod.filter, [])
# Check that a different iterable tpyes give the expected result
endog = [1.,2.]
mod = check_endog(endog, **kwargs)
mod.filter([])
endog = [[1.],[2.]]
mod = check_endog(endog, **kwargs)
mod.filter([])
endog = (1.,2.)
mod = check_endog(endog, **kwargs)
mod.filter([])
def test_numpy_endog():
# Test various types of numpy endog inputs
# Check behavior of the link maintained between passed `endog` and
# `mod.endog` arrays
endog = np.array([1., 2.])
mod = MLEModel(endog, **kwargs)
assert_equal(mod.endog.base is not mod.data.orig_endog, True)
assert_equal(mod.endog.base is not endog, True)
assert_equal(mod.data.orig_endog.base is not endog, True)
endog[0] = 2
# there is no link to mod.endog
assert_equal(mod.endog, np.r_[1, 2].reshape(2,1))
# there remains a link to mod.data.orig_endog
assert_equal(mod.data.orig_endog, endog)
# Check behavior with different memory layouts / shapes
# Example (failure): 0-dim array
endog = np.array(1.)
# raises error due to len(endog) failing in Statsmodels base classes
assert_raises(TypeError, check_endog, endog, **kwargs)
# Example : 1-dim array, both C- and F-contiguous, length 2
endog = np.array([1.,2.])
assert_equal(endog.ndim, 1)
assert_equal(endog.flags['C_CONTIGUOUS'], True)
assert_equal(endog.flags['F_CONTIGUOUS'], True)
assert_equal(endog.shape, (2,))
mod = check_endog(endog, **kwargs)
mod.filter([])
# Example : 2-dim array, C-contiguous, long-shaped: (nobs, k_endog)
endog = np.array([1., 2.]).reshape(2, 1)
assert_equal(endog.ndim, 2)
assert_equal(endog.flags['C_CONTIGUOUS'], True)
# On newer numpy (>= 0.10), this array is (rightly) both C and F contiguous
# assert_equal(endog.flags['F_CONTIGUOUS'], False)
assert_equal(endog.shape, (2, 1))
mod = check_endog(endog, **kwargs)
mod.filter([])
# Example : 2-dim array, C-contiguous, wide-shaped: (k_endog, nobs)
endog = np.array([1., 2.]).reshape(1, 2)
assert_equal(endog.ndim, 2)
assert_equal(endog.flags['C_CONTIGUOUS'], True)
# On newer numpy (>= 0.10), this array is (rightly) both C and F contiguous
# assert_equal(endog.flags['F_CONTIGUOUS'], False)
assert_equal(endog.shape, (1, 2))
# raises error because arrays are always interpreted as
# (nobs, k_endog), which means that k_endog=2 is incompatibile with shape
# of design matrix (1, 1)
assert_raises(ValueError, check_endog, endog, **kwargs)
# Example : 2-dim array, F-contiguous, long-shaped (nobs, k_endog)
endog = np.array([1., 2.]).reshape(1, 2).transpose()
assert_equal(endog.ndim, 2)
# On newer numpy (>= 0.10), this array is (rightly) both C and F contiguous
# assert_equal(endog.flags['C_CONTIGUOUS'], False)
assert_equal(endog.flags['F_CONTIGUOUS'], True)
assert_equal(endog.shape, (2, 1))
mod = check_endog(endog, **kwargs)
mod.filter([])
# Example : 2-dim array, F-contiguous, wide-shaped (k_endog, nobs)
endog = np.array([1., 2.]).reshape(2, 1).transpose()
assert_equal(endog.ndim, 2)
# On newer numpy (>= 0.10), this array is (rightly) both C and F contiguous
# assert_equal(endog.flags['C_CONTIGUOUS'], False)
assert_equal(endog.flags['F_CONTIGUOUS'], True)
assert_equal(endog.shape, (1, 2))
# raises error because arrays are always interpreted as
# (nobs, k_endog), which means that k_endog=2 is incompatibile with shape
# of design matrix (1, 1)
assert_raises(ValueError, check_endog, endog, **kwargs)
# Example (failure): 3-dim array
endog = np.array([1., 2.]).reshape(2, 1, 1)
# raises error due to direct ndim check in Statsmodels base classes
assert_raises(ValueError, check_endog, endog, **kwargs)
# Example : np.array with 2 columns
# Update kwargs for k_endog=2
kwargs2 = {
'k_states': 1, 'design': [[1], [0.]], 'obs_cov': [[1, 0], [0, 1]],
'transition': [[1]], 'selection': [[1]], 'state_cov': [[1]],
'initialization': 'approximate_diffuse'
}
endog = np.array([[1., 2.], [3., 4.]])
mod = check_endog(endog, k_endog=2, **kwargs2)
mod.filter([])
def test_pandas_endog():
# Test various types of pandas endog inputs (e.g. TimeSeries, etc.)
# Example (failure): pandas.Series, no dates
endog = pd.Series([1., 2.])
# raises error due to no dates
warnings.simplefilter('always')
# assert_raises(ValueError, check_endog, endog, **kwargs)
# Example : pandas.Series
dates = pd.date_range(start='1980-01-01', end='1981-01-01', freq='AS')
endog = pd.Series([1., 2.], index=dates)
mod = check_endog(endog, **kwargs)
mod.filter([])
# Example : pandas.Series, string datatype
endog = pd.Series(['a'], index=dates)
# raises error due to direct type casting check in Statsmodels base classes
assert_raises(ValueError, check_endog, endog, **kwargs)
# Example : pandas.Series
endog = pd.Series([1., 2.], index=dates)
mod = check_endog(endog, **kwargs)
mod.filter([])
# Example : pandas.DataFrame with 1 column
endog = pd.DataFrame({'a': [1., 2.]}, index=dates)
mod = check_endog(endog, **kwargs)
mod.filter([])
# Example (failure): pandas.DataFrame with 2 columns
endog = pd.DataFrame({'a': [1., 2.], 'b': [3., 4.]}, index=dates)
# raises error because 2-columns means k_endog=2, but the design matrix
# set in **kwargs is shaped (1,1)
assert_raises(ValueError, check_endog, endog, **kwargs)
# Check behavior of the link maintained between passed `endog` and
# `mod.endog` arrays
endog = pd.DataFrame({'a': [1., 2.]}, index=dates)
mod = check_endog(endog, **kwargs)
assert_equal(mod.endog.base is not mod.data.orig_endog, True)
assert_equal(mod.endog.base is not endog, True)
assert_equal(mod.data.orig_endog.values.base is not endog, True)
endog.iloc[0, 0] = 2
# there is no link to mod.endog
assert_equal(mod.endog, np.r_[1, 2].reshape(2,1))
# there remains a link to mod.data.orig_endog
assert_allclose(mod.data.orig_endog, endog)
# Example : pandas.DataFrame with 2 columns
# Update kwargs for k_endog=2
kwargs2 = {
'k_states': 1, 'design': [[1], [0.]], 'obs_cov': [[1, 0], [0, 1]],
'transition': [[1]], 'selection': [[1]], 'state_cov': [[1]],
'initialization': 'approximate_diffuse'
}
endog = pd.DataFrame({'a': [1., 2.], 'b': [3., 4.]}, index=dates)
mod = check_endog(endog, k_endog=2, **kwargs2)
mod.filter([])
def test_diagnostics():
mod, res = get_dummy_mod()
# Override the standardized forecasts errors to get more reasonable values
# for the tests to run (not necessary, but prevents some annoying warnings)
shape = res.filter_results._standardized_forecasts_error.shape
res.filter_results._standardized_forecasts_error = (
np.random.normal(size=shape))
# Make sure method=None selects the appropriate test
actual = res.test_normality(method=None)
desired = res.test_normality(method='jarquebera')
assert_allclose(actual, desired)
assert_raises(NotImplementedError, res.test_normality, method='invalid')
actual = res.test_heteroskedasticity(method=None)
desired = res.test_heteroskedasticity(method='breakvar')
assert_allclose(actual, desired)
assert_raises(ValueError, res.test_heteroskedasticity, method=None, alternative='invalid')
assert_raises(NotImplementedError, res.test_heteroskedasticity, method='invalid')
actual = res.test_serial_correlation(method=None)
desired = res.test_serial_correlation(method='ljungbox')
assert_allclose(actual, desired)
assert_raises(NotImplementedError, res.test_serial_correlation, method='invalid')
# Smoke tests for other options
actual = res.test_heteroskedasticity(method=None, alternative='d', use_f=False)
desired = res.test_serial_correlation(method='boxpierce')
def test_diagnostics_nile_eviews():
# Test the diagnostic tests using the Nile dataset. Results are from
# "Fitting State Space Models with EViews" (Van den Bossche 2011,
# Journal of Statistical Software).
# For parameter values, see Figure 2
# For Ljung-Box and Jarque-Bera statistics and p-values, see Figure 5
# The Heteroskedasticity statistic is not provided in this paper.
niledata = nile.data.load_pandas().data
niledata.index = pd.date_range('1871-01-01', '1970-01-01', freq='AS')
mod = MLEModel(niledata['volume'], k_states=1,
initialization='approximate_diffuse', initial_variance=1e15,
loglikelihood_burn=1)
mod.ssm['design', 0, 0] = 1
mod.ssm['obs_cov', 0, 0] = np.exp(9.600350)
mod.ssm['transition', 0, 0] = 1
mod.ssm['selection', 0, 0] = 1
mod.ssm['state_cov', 0, 0] = np.exp(7.348705)
res = mod.filter([])
# Test Ljung-Box
# Note: only 3 digits provided in the reference paper
actual = res.test_serial_correlation(method='ljungbox', lags=10)[0, :, -1]
assert_allclose(actual, [13.117, 0.217], atol=1e-3)
# Test Jarque-Bera
actual = res.test_normality(method='jarquebera')[0, :2]
assert_allclose(actual, [0.041686, 0.979373], atol=1e-5)
def test_diagnostics_nile_durbinkoopman():
# Test the diagnostic tests using the Nile dataset. Results are from
# Durbin and Koopman (2012); parameter values reported on page 37; test
# statistics on page 40
niledata = nile.data.load_pandas().data
niledata.index = pd.date_range('1871-01-01', '1970-01-01', freq='AS')
mod = MLEModel(niledata['volume'], k_states=1,
initialization='approximate_diffuse', initial_variance=1e15,
loglikelihood_burn=1)
mod.ssm['design', 0, 0] = 1
mod.ssm['obs_cov', 0, 0] = 15099.
mod.ssm['transition', 0, 0] = 1
mod.ssm['selection', 0, 0] = 1
mod.ssm['state_cov', 0, 0] = 1469.1
res = mod.filter([])
# Test Ljung-Box
# Note: only 3 digits provided in the reference paper
actual = res.test_serial_correlation(method='ljungbox', lags=9)[0, 0, -1]
assert_allclose(actual, [8.84], atol=1e-2)
# Test Jarque-Bera
# Note: The book reports 0.09 for Kurtosis, because it is reporting the
# statistic less the mean of the Kurtosis distribution (which is 3).
norm = res.test_normality(method='jarquebera')[0]
actual = [norm[0], norm[2], norm[3]]
assert_allclose(actual, [0.05, -0.03, 3.09], atol=1e-2)
# Test Heteroskedasticity
# Note: only 2 digits provided in the book
actual = res.test_heteroskedasticity(method='breakvar')[0, 0]
assert_allclose(actual, [0.61], atol=1e-2)
def test_prediction_results():
# Just smoke tests for the PredictionResults class, which is copied from
# elsewhere in Statsmodels
mod, res = get_dummy_mod()
predict = res.get_prediction()
summary_frame = predict.summary_frame()
def test_lutkepohl_information_criteria():
# Setup dataset, use Lutkepohl data
dta = pd.DataFrame(
results_var_misc.lutkepohl_data, columns=['inv', 'inc', 'consump'],
index=pd.date_range('1960-01-01', '1982-10-01', freq='QS'))
dta['dln_inv'] = np.log(dta['inv']).diff()
dta['dln_inc'] = np.log(dta['inc']).diff()
dta['dln_consump'] = np.log(dta['consump']).diff()
endog = dta.loc['1960-04-01':'1978-10-01',
['dln_inv', 'dln_inc', 'dln_consump']]
# AR model - SARIMAX
# (use loglikelihood_burn=1 to mimic conditional MLE used by Stata's var
# command).
true = results_var_misc.lutkepohl_ar1_lustats
mod = sarimax.SARIMAX(endog['dln_inv'], order=(1, 0, 0), trend='c',
loglikelihood_burn=1)
res = mod.filter(true['params'])
assert_allclose(res.llf, true['loglike'])
# Test the Lutkepohl ICs
# Note: for the Lutkepohl ICs, Stata only counts the AR coefficients as
# estimated parameters for the purposes of information criteria, whereas we
# count all parameters including scale and constant, so we need to adjust
# for that
aic = (res.info_criteria('aic', method='lutkepohl') -
2 * 2 / res.nobs_effective)
bic = (res.info_criteria('bic', method='lutkepohl') -
2 * np.log(res.nobs_effective) / res.nobs_effective)
hqic = (res.info_criteria('hqic', method='lutkepohl') -
2 * 2 * np.log(np.log(res.nobs_effective)) / res.nobs_effective)
assert_allclose(aic, true['aic'])
assert_allclose(bic, true['bic'])
assert_allclose(hqic, true['hqic'])
# Test the non-Lutkepohl ICs
# Note: for the non-Lutkepohl ICs, Stata does not count the scale as an
# estimated parameter, but does count the constant term, for the
# purposes of information criteria, whereas we count both, so we need to
# adjust for that
true = results_var_misc.lutkepohl_ar1
aic = res.aic - 2
bic = res.bic - np.log(res.nobs_effective)
assert_allclose(aic, true['estat_aic'])
assert_allclose(bic, true['estat_bic'])
aic = res.info_criteria('aic') - 2
bic = res.info_criteria('bic') - np.log(res.nobs_effective)
assert_allclose(aic, true['estat_aic'])
assert_allclose(bic, true['estat_bic'])
# Note: could also test the "dfk" (degree of freedom corrections), but not
# really necessary since they just rescale things a bit
# VAR model - VARMAX
# (use loglikelihood_burn=1 to mimic conditional MLE used by Stata's var
# command).
true = results_var_misc.lutkepohl_var1_lustats
mod = varmax.VARMAX(endog, order=(1, 0), trend='nc',
error_cov_type='unstructured', loglikelihood_burn=1,)
res = mod.filter(true['params'])
assert_allclose(res.llf, true['loglike'])
# Test the Lutkepohl ICs
# Note: for the Lutkepohl ICs, Stata only counts the AR coefficients as
# estimated parameters for the purposes of information criteria, whereas we
# count all parameters including the elements of the covariance matrix, so
# we need to adjust for that
aic = (res.info_criteria('aic', method='lutkepohl') -
2 * 6 / res.nobs_effective)
bic = (res.info_criteria('bic', method='lutkepohl') -
6 * np.log(res.nobs_effective) / res.nobs_effective)
hqic = (res.info_criteria('hqic', method='lutkepohl') -
2 * 6 * np.log(np.log(res.nobs_effective)) / res.nobs_effective)
assert_allclose(aic, true['aic'])
assert_allclose(bic, true['bic'])
assert_allclose(hqic, true['hqic'])
# Test the non-Lutkepohl ICs
# Note: for the non-Lutkepohl ICs, Stata does not count the elements of the
# covariance matrix as estimated parameters for the purposes of information
# criteria, whereas we count both, so we need to adjust for that
true = results_var_misc.lutkepohl_var1
aic = res.aic - 2 * 6
bic = res.bic - 6 * np.log(res.nobs_effective)
assert_allclose(aic, true['estat_aic'])
assert_allclose(bic, true['estat_bic'])
aic = res.info_criteria('aic') - 2 * 6
bic = res.info_criteria('bic') - 6 * np.log(res.nobs_effective)
assert_allclose(aic, true['estat_aic'])
assert_allclose(bic, true['estat_bic'])
|
py | 7dfbad3d45b0a9e8ddf2e14c6ffabe3c03a03261 |
from ..get_sparql_dataframe import get_sparql_dataframe
def get_countries():
query = """
SELECT
?country ?countryLabel ?population ?countryCode2 ?countryCode3
(AVG(?lat) AS ?latitude) (AVG(?long) AS ?longitude)
WHERE
{
?country wdt:P31 wd:Q6256 ;
wdt:P1082 ?population ;
wdt:P297 ?countryCode2 ;
wdt:P298 ?countryCode3 ;
p:P625 [
psv:P625 [
wikibase:geoLatitude ?lat ;
wikibase:geoLongitude ?long ;
]
]
SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }
}
GROUP BY ?country ?countryLabel ?population ?countryCode2 ?countryCode3
"""
result = get_sparql_dataframe(service='https://query.wikidata.org/sparql', query=query)
result.columns = ['id', 'country', 'population', 'code2', 'code3', 'latitude', 'longitude']
return result
def get_cities():
query = """
SELECT ?city ?cityLabel ?population ?country ?countryLabel
(AVG(?lat) AS ?latitude) (AVG(?long) AS ?longitude)
WHERE {
?city wdt:P31/wdt:P279* wd:Q515 ;
wdt:P1082 ?population ;
wdt:P17 ?country ;
p:P625 [
psv:P625 [
wikibase:geoLatitude ?lat;
wikibase:geoLongitude ?long;
]
]
SERVICE wikibase:label {
bd:serviceParam wikibase:language "en" .
}
}
GROUP BY ?city ?cityLabel ?population ?country ?countryLabel
"""
result = get_sparql_dataframe(service='https://query.wikidata.org/sparql', query=query)
result.columns = ['id', 'city', 'population', 'country_id', 'country', 'latitude', 'longitude']
return result |
py | 7dfbae02b4efa732a6688c583212d8a102adfc7e | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.forms import widgets
from django.utils.encoding import force_text
from django.utils.translation import ugettext as _
from cms.utils.i18n import get_current_language
from cms.utils.urlutils import admin_reverse
class LinkedRelatedInlineMixin(object):
"""
This InlineAdmin mixin links the first field to the row object's own admin
change form.
NOTE: If the first field is editable, it is undefined what will happen.
For best results, consider making all fields readonly (since they can be
edited with ease by following the link), and disabling the ability to add
new objects by overriding has_add_permission() on the inline to always
return false.
"""
extra = 0
class ReverseLink:
allow_tags = True
def __init__(self, display_link="link"):
self.display_link = display_link
self.short_description = display_link
def __call__(self, obj):
model_name = obj.__class__.__name__.lower()
admin_link = admin_reverse(
"{app_label}_{model_name}_change".format(
app_label=obj._meta.app_label.lower(),
model_name=model_name,
), args=(obj.id, ))
return '<a href="{admin_link}" title="{title}">{link}</a>'.format(
admin_link=admin_link,
title=_('Click to view or edit this {0}').format(
obj._meta.verbose_name),
link=getattr(obj, self.display_link))
def __init__(self, parent_model, admin_site):
self.original_fields = self.get_fields_list(None)
if len(self.original_fields):
self.fields = ["reverse_link", ] + self.original_fields[1:]
else:
self.fields = ["reverse_link"]
self.reverse_link = self.ReverseLink(self.original_fields[0])
super(LinkedRelatedInlineMixin, self).__init__(
parent_model, admin_site)
def get_fields_list(self, request, obj=None):
"""
Returns a list of the AdminModel's declared `fields`, or, constructs it
from the object, then, removes any `exclude`d items.
"""
# ModelAdmin.get_fields came in Django 1.7, I believe
if hasattr(super(LinkedRelatedInlineMixin, self), "get_fields"):
fields = super(
LinkedRelatedInlineMixin, self).get_fields(request, obj)
elif self.fields:
fields = self.fields
else:
fields = [f.name for f in self.model._meta.local_fields]
if fields and self.exclude:
fields = [f for f in fields if f not in self.exclude]
if fields:
return list(fields)
else:
return []
def get_readonly_fields(self, request, obj=None):
readonly_fields = super(
LinkedRelatedInlineMixin, self).get_readonly_fields(request, obj)
if "reverse_link" not in readonly_fields:
readonly_fields = list(readonly_fields) + ["reverse_link", ]
# We want all fields to be readonly for this inline
return readonly_fields
class AllTranslationsMixin(object):
@property
def media(self):
return super(AllTranslationsMixin, self).media + widgets.Media(
css={'all': ('css/admin/all-translations-mixin.css', ), }
)
def all_translations(self, obj):
"""
Adds a property to the list_display that lists all translations with
links directly to their change forms. Includes CSS to style the links
to looks like tags with color indicating current language, active and
inactive translations.
A similar capability is in HVAD, and now there is this for
Parler-based projects.
"""
available = list(obj.get_available_languages())
current = get_current_language()
langs = []
for code, lang_name in settings.LANGUAGES:
classes = ["lang-code", ]
title = force_text(lang_name)
if code == current:
classes += ["current", ]
if code in available:
classes += ["active", ]
title += " (translated)"
else:
title += " (untranslated)"
change_form_url = admin_reverse(
'{app_label}_{model_name}_change'.format(
app_label=obj._meta.app_label.lower(),
model_name=obj.__class__.__name__.lower(),
), args=(obj.id, )
)
link = '<a class="{classes}" href="{url}?language={code}" title="{title}">{code}</a>'.format(
classes=' '.join(classes),
url=change_form_url,
code=code,
title=title,
)
langs.append(link)
return ''.join(langs)
all_translations.short_description = 'Translations'
all_translations.allow_tags = True
def get_list_display(self, request):
"""
Unless the the developer has already placed "all_translations" in the
list_display list (presumably specifically where she wants it), append
the list of translations to the end.
"""
list_display = super(
AllTranslationsMixin, self).get_list_display(request)
if 'all_translations' not in list_display:
list_display = list(list_display) + ['all_translations', ]
return list_display
|
py | 7dfbae59a2425ff0c6d5f4d7092981874e9619a3 | # Generated by Django 3.0.5 on 2020-04-10 19:07
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=50)),
('title', models.CharField(max_length=120)),
('description', models.CharField(max_length=200)),
('body', models.TextField()),
('location', models.CharField(max_length=120)),
('publication_date', models.DateField()),
('active', models.BooleanField(default=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
|
py | 7dfbaf24d4e1782b01c38a94b5b5ea04c15335ac | """
Django settings for RestAdmin project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import datetime
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,BASE_DIR)
sys.path.insert(0,os.path.join(BASE_DIR, 'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-l7*47(+oc2xu*gx*cgxci0v0gyc(m5qe1p#hlb)@*+5z&)p(a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
# 设置查询过滤器,注意不能大于版本2.0.2
'django_filters',
'system',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'RestAdmin.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'RestAdmin.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'system.UserProfile'
REST_FRAMEWORK = {
#分页
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
#每页显示的个数
'PAGE_SIZE': 10,
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
# 'system.permissions.IsOwnerOrReadOnly',
# 设置要求登陆,原先登陆的会无效
'rest_framework.permissions.IsAuthenticated',
# 'system.permissions.IsOwnerOrReadOnly',
),
}
JWT_AUTH = {
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=1), #也可以设置seconds=20
'JWT_AUTH_HEADER_PREFIX': 'JWT', #JWT跟前端保持一致,比如“token”这里设置成JWT
}
# 设置手机号也能登陆
AUTHENTICATION_BACKENDS = (
'system.views.CustomBackend',
)
# ALLOWED_HOSTS= ["*"] |
py | 7dfbaf7e38c68f466f8852b4befa59977aa5ea9c | import my_module
|
py | 7dfbb0d32ce7a56dd0629c711b67005bf202f7b7 | from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
class ExtractTensorPatches(nn.Module):
r"""Module that extract patches from tensors and stack them.
In the simplest case, the output value of the operator with input size
:math:`(B, C, H, W)` is :math:`(B, N, C, H_{out}, W_{out})`.
where
- :math:`B` is the batch size.
- :math:`N` denotes the total number of extracted patches stacked in
- :math:`C` denotes the number of input channels.
- :math:`H`, :math:`W` the input height and width of the input in pixels.
- :math:`H_{out}`, :math:`W_{out}` denote to denote to the patch size
defined in the function signature.
left-right and top-bottom order.
* :attr:`window_size` is the size of the sliding window and controls the
shape of the output tensor and defines the shape of the output patch.
* :attr:`stride` controls the stride to apply to the sliding window and
regulates the overlapping between the extracted patches.
* :attr:`padding` controls the amount of implicit zeros-paddings on both
sizes at each dimension.
The parameters :attr:`window_size`, :attr:`stride` and :attr:`padding` can
be either:
- a single ``int`` -- in which case the same value is used for the
height and width dimension.
- a ``tuple`` of two ints -- in which case, the first `int` is used for
the height dimension, and the second `int` for the width dimension.
Args:
window_size: the size of the sliding window and the output patch size.
stride: stride of the sliding window.
padding: Zero-padding added to both side of the input.
Shape:
- Input: :math:`(B, C, H, W)`
- Output: :math:`(B, N, C, H_{out}, W_{out})`
Returns:
the tensor with the extracted patches.
Examples:
>>> input = torch.arange(9.).view(1, 1, 3, 3)
>>> patches = extract_tensor_patches(input, (2, 3))
>>> input
tensor([[[[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]]]])
>>> patches[:, -1]
tensor([[[[3., 4., 5.],
[6., 7., 8.]]]])
"""
def __init__(
self,
window_size: Union[int, Tuple[int, int]],
stride: Optional[Union[int, Tuple[int, int]]] = 1,
padding: Optional[Union[int, Tuple[int, int]]] = 0,
) -> None:
super().__init__()
self.window_size: Tuple[int, int] = _pair(window_size)
self.stride: Tuple[int, int] = _pair(stride)
self.padding: Tuple[int, int] = _pair(padding)
def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore
return extract_tensor_patches(input, self.window_size, stride=self.stride, padding=self.padding)
class CombineTensorPatches(nn.Module):
r"""Module that combine patches from tensors.
In the simplest case, the output value of the operator with input size
:math:`(B, N, C, H_{out}, W_{out})` is :math:`(B, C, H, W)`.
where
- :math:`B` is the batch size.
- :math:`N` denotes the total number of extracted patches stacked in
- :math:`C` denotes the number of input channels.
- :math:`H`, :math:`W` the input height and width of the input in pixels.
- :math:`H_{out}`, :math:`W_{out}` denote to denote to the patch size
defined in the function signature.
left-right and top-bottom order.
* :attr:`window_size` is the size of the sliding window and controls the
shape of the output tensor and defines the shape of the output patch.
* :attr:`stride` controls the stride to apply to the sliding window and
regulates the overlapping between the extracted patches.
* :attr:`padding` controls the amount of implicit zeros-paddings on both
sizes at each dimension.
The parameters :attr:`window_size`, :attr:`stride` and :attr:`padding` can
be either:
- a single ``int`` -- in which case the same value is used for the
height and width dimension.
- a ``tuple`` of two ints -- in which case, the first `int` is used for
the height dimension, and the second `int` for the width dimension.
Args:
patches: patched tensor.
window_size: the size of the sliding window and the output patch size.
unpadding: remove the padding added to both side of the input.
Shape:
- Input: :math:`(B, N, C, H_{out}, W_{out})`
- Output: :math:`(B, C, H, W)`
Example:
>>> out = extract_tensor_patches(torch.arange(16).view(1, 1, 4, 4), window_size=(2, 2), stride=(2, 2))
>>> combine_tensor_patches(out, window_size=(2, 2), stride=(2, 2))
tensor([[[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]]]])
"""
def __init__(
self,
window_size: Union[int, Tuple[int, int]],
unpadding: Union[int, Tuple[int, int]] = 0,
) -> None:
super().__init__()
self.window_size: Tuple[int, int] = _pair(window_size)
pad: Tuple[int, int] = _pair(unpadding)
self.unpadding: Tuple[int, int, int, int] = (pad[0], pad[0], pad[1], pad[1])
def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore
return combine_tensor_patches(input, self.window_size, stride=self.window_size, unpadding=self.unpadding)
def combine_tensor_patches(
patches: torch.Tensor,
window_size: Tuple[int, int] = (4, 4),
stride: Tuple[int, int] = (4, 4),
unpadding: Optional[Tuple[int, int, int, int]] = None,
) -> torch.Tensor:
r"""Restore input from patches.
Args:
patches: patched tensor with shape :math:`(B, N, C, H_{out}, W_{out})`.
window_size: the size of the sliding window and the output patch size.
stride: stride of the sliding window.
unpadding: remove the padding added to both side of the input.
Return:
The combined patches in an image tensor with shape :math:`(B, C, H, W)`.
Example:
>>> out = extract_tensor_patches(torch.arange(16).view(1, 1, 4, 4), window_size=(2, 2), stride=(2, 2))
>>> combine_tensor_patches(out, window_size=(2, 2), stride=(2, 2))
tensor([[[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]]]])
"""
if stride[0] != window_size[0] or stride[1] != window_size[1]:
raise NotImplementedError(
f"Only stride == window_size is supported. Got {stride} and {window_size}."
"Please feel free to drop a PR to Kornia Github."
)
if unpadding is not None:
window_size = (
window_size[0] + (unpadding[0] + unpadding[1]) // window_size[0],
window_size[1] + (unpadding[2] + unpadding[3]) // window_size[1]
)
patches_tensor = patches.view(-1, window_size[0], window_size[1], *patches.shape[-3:])
restored_tensor = torch.cat(torch.chunk(patches_tensor, window_size[0], dim=1), -2).squeeze(1)
restored_tensor = torch.cat(torch.chunk(restored_tensor, window_size[1], dim=1), -1).squeeze(1)
if unpadding is not None:
restored_tensor = torch.nn.functional.pad(restored_tensor, [-i for i in unpadding])
return restored_tensor
def _extract_tensor_patchesnd(
input: torch.Tensor, window_sizes: Tuple[int, ...], strides: Tuple[int, ...]
) -> torch.Tensor:
batch_size, num_channels = input.size()[:2]
dims = range(2, input.dim())
for dim, patch_size, stride in zip(dims, window_sizes, strides):
input = input.unfold(dim, patch_size, stride)
input = input.permute(0, *dims, 1, *(dim + len(dims) for dim in dims)).contiguous()
return input.view(batch_size, -1, num_channels, *window_sizes)
def extract_tensor_patches(
input: torch.Tensor,
window_size: Union[int, Tuple[int, int]],
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
) -> torch.Tensor:
r"""Function that extract patches from tensors and stack them.
See :class:`~kornia.contrib.ExtractTensorPatches` for details.
Args:
input: tensor image where to extract the patches with shape :math:`(B, C, H, W)`.
window_size: the size of the sliding window and the output patch size.
stride: stride of the sliding window.
padding: Zero-padding added to both side of the input.
Returns:
the tensor with the extracted patches with shape :math:`(B, N, C, H_{out}, W_{out})`.
Examples:
>>> input = torch.arange(9.).view(1, 1, 3, 3)
>>> patches = extract_tensor_patches(input, (2, 3))
>>> input
tensor([[[[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]]]])
>>> patches[:, -1]
tensor([[[[3., 4., 5.],
[6., 7., 8.]]]])
"""
if not torch.is_tensor(input):
raise TypeError(f"Input input type is not a torch.Tensor. Got {type(input)}")
if not len(input.shape) == 4:
raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {input.shape}")
if padding:
pad_vert, pad_horz = _pair(padding)
input = F.pad(input, [pad_horz, pad_horz, pad_vert, pad_vert])
return _extract_tensor_patchesnd(input, _pair(window_size), _pair(stride))
|
py | 7dfbb0d5fa66100050c6c006179f7d3840b2aca3 |
import os, re
f = open(os.path.join(os.path.dirname(__file__), '../input/9/part1.txt'), 'r')
def findGroups(stream):
groups = []
startIndex = stream.find('{')
if startIndex == -1:
return groups
endIndex = startIndex + 1
groupMatchCount = 1
while groupMatchCount > 0 and endIndex < len(stream):
if stream[endIndex] == '{':
groupMatchCount += 1
endIndex += 1
elif stream[endIndex] == '}':
groupMatchCount -= 1
if groupMatchCount != 0:
endIndex += 1
else:
endIndex += 1
if groupMatchCount != 0:
return groups
groups.append(stream[startIndex + 1:endIndex])
otherGroups = []
if endIndex < len(stream) - 1:
otherGroups = findGroups(stream[endIndex:])
return groups + otherGroups
def processStream(stream, score=1):
groups = findGroups(stream)
length = len(groups)
streamScore = score * length
for group in groups:
if not group:
continue
subStreamScore = processStream(group, score + 1)
streamScore += subStreamScore
return streamScore
def main():
line = f.readline().rstrip()
line = re.sub('!.', '', line)
line = re.sub('<[^>]*>', '', line)
score = processStream(line)
print(score)
if __name__ == '__main__':
main()
|
py | 7dfbb131c1bb64475119421090fe97f100479a59 | """Manage config entries in Home Assistant."""
from __future__ import annotations
import asyncio
from collections import ChainMap
from collections.abc import Callable, Coroutine, Iterable, Mapping
from contextvars import ContextVar
import dataclasses
from enum import Enum
import functools
import logging
from types import MappingProxyType, MethodType
from typing import TYPE_CHECKING, Any, Optional, TypeVar, cast
import weakref
from . import data_entry_flow, loader
from .backports.enum import StrEnum
from .components import persistent_notification
from .const import EVENT_HOMEASSISTANT_STARTED, EVENT_HOMEASSISTANT_STOP, Platform
from .core import CALLBACK_TYPE, CoreState, Event, HomeAssistant, callback
from .exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady, HomeAssistantError
from .helpers import device_registry, entity_registry, storage
from .helpers.event import async_call_later
from .helpers.frame import report
from .helpers.typing import UNDEFINED, ConfigType, DiscoveryInfoType, UndefinedType
from .setup import async_process_deps_reqs, async_setup_component
from .util import uuid as uuid_util
from .util.decorator import Registry
if TYPE_CHECKING:
from .components.dhcp import DhcpServiceInfo
from .components.hassio import HassioServiceInfo
from .components.mqtt import MqttServiceInfo
from .components.ssdp import SsdpServiceInfo
from .components.usb import UsbServiceInfo
from .components.zeroconf import ZeroconfServiceInfo
_LOGGER = logging.getLogger(__name__)
SOURCE_DHCP = "dhcp"
SOURCE_DISCOVERY = "discovery"
SOURCE_HASSIO = "hassio"
SOURCE_HOMEKIT = "homekit"
SOURCE_IMPORT = "import"
SOURCE_INTEGRATION_DISCOVERY = "integration_discovery"
SOURCE_MQTT = "mqtt"
SOURCE_SSDP = "ssdp"
SOURCE_USB = "usb"
SOURCE_USER = "user"
SOURCE_ZEROCONF = "zeroconf"
# If a user wants to hide a discovery from the UI they can "Ignore" it. The config_entries/ignore_flow
# websocket command creates a config entry with this source and while it exists normal discoveries
# with the same unique id are ignored.
SOURCE_IGNORE = "ignore"
# This is used when a user uses the "Stop Ignoring" button in the UI (the
# config_entries/ignore_flow websocket command). It's triggered after the "ignore" config entry has
# been removed and unloaded.
SOURCE_UNIGNORE = "unignore"
# This is used to signal that re-authentication is required by the user.
SOURCE_REAUTH = "reauth"
HANDLERS: Registry[str, type[ConfigFlow]] = Registry()
STORAGE_KEY = "core.config_entries"
STORAGE_VERSION = 1
# Deprecated since 0.73
PATH_CONFIG = ".config_entries.json"
SAVE_DELAY = 1
_T = TypeVar("_T", bound="ConfigEntryState")
class ConfigEntryState(Enum):
"""Config entry state."""
LOADED = "loaded", True
"""The config entry has been set up successfully"""
SETUP_ERROR = "setup_error", True
"""There was an error while trying to set up this config entry"""
MIGRATION_ERROR = "migration_error", False
"""There was an error while trying to migrate the config entry to a new version"""
SETUP_RETRY = "setup_retry", True
"""The config entry was not ready to be set up yet, but might be later"""
NOT_LOADED = "not_loaded", True
"""The config entry has not been loaded"""
FAILED_UNLOAD = "failed_unload", False
"""An error occurred when trying to unload the entry"""
_recoverable: bool
def __new__(cls: type[_T], value: str, recoverable: bool) -> _T:
"""Create new ConfigEntryState."""
obj = object.__new__(cls)
obj._value_ = value
obj._recoverable = recoverable
return obj
@property
def recoverable(self) -> bool:
"""Get if the state is recoverable."""
return self._recoverable
DEFAULT_DISCOVERY_UNIQUE_ID = "default_discovery_unique_id"
DISCOVERY_NOTIFICATION_ID = "config_entry_discovery"
DISCOVERY_SOURCES = (
SOURCE_DHCP,
SOURCE_DISCOVERY,
SOURCE_HOMEKIT,
SOURCE_IMPORT,
SOURCE_INTEGRATION_DISCOVERY,
SOURCE_MQTT,
SOURCE_SSDP,
SOURCE_UNIGNORE,
SOURCE_USB,
SOURCE_ZEROCONF,
)
RECONFIGURE_NOTIFICATION_ID = "config_entry_reconfigure"
EVENT_FLOW_DISCOVERED = "config_entry_discovered"
class ConfigEntryDisabler(StrEnum):
"""What disabled a config entry."""
USER = "user"
# DISABLED_* is deprecated, to be removed in 2022.3
DISABLED_USER = ConfigEntryDisabler.USER.value
RELOAD_AFTER_UPDATE_DELAY = 30
# Deprecated: Connection classes
# These aren't used anymore since 2021.6.0
# Mainly here not to break custom integrations.
CONN_CLASS_CLOUD_PUSH = "cloud_push"
CONN_CLASS_CLOUD_POLL = "cloud_poll"
CONN_CLASS_LOCAL_PUSH = "local_push"
CONN_CLASS_LOCAL_POLL = "local_poll"
CONN_CLASS_ASSUMED = "assumed"
CONN_CLASS_UNKNOWN = "unknown"
class ConfigError(HomeAssistantError):
"""Error while configuring an account."""
class UnknownEntry(ConfigError):
"""Unknown entry specified."""
class OperationNotAllowed(ConfigError):
"""Raised when a config entry operation is not allowed."""
UpdateListenerType = Callable[[HomeAssistant, "ConfigEntry"], Coroutine[Any, Any, None]]
class ConfigEntry:
"""Hold a configuration entry."""
__slots__ = (
"entry_id",
"version",
"domain",
"title",
"data",
"options",
"unique_id",
"supports_unload",
"supports_remove_device",
"pref_disable_new_entities",
"pref_disable_polling",
"source",
"state",
"disabled_by",
"_setup_lock",
"update_listeners",
"reason",
"_async_cancel_retry_setup",
"_on_unload",
)
def __init__(
self,
version: int,
domain: str,
title: str,
data: Mapping[str, Any],
source: str,
pref_disable_new_entities: bool | None = None,
pref_disable_polling: bool | None = None,
options: Mapping[str, Any] | None = None,
unique_id: str | None = None,
entry_id: str | None = None,
state: ConfigEntryState = ConfigEntryState.NOT_LOADED,
disabled_by: ConfigEntryDisabler | None = None,
) -> None:
"""Initialize a config entry."""
# Unique id of the config entry
self.entry_id = entry_id or uuid_util.random_uuid_hex()
# Version of the configuration.
self.version = version
# Domain the configuration belongs to
self.domain = domain
# Title of the configuration
self.title = title
# Config data
self.data = MappingProxyType(data)
# Entry options
self.options = MappingProxyType(options or {})
# Entry system options
if pref_disable_new_entities is None:
pref_disable_new_entities = False
self.pref_disable_new_entities = pref_disable_new_entities
if pref_disable_polling is None:
pref_disable_polling = False
self.pref_disable_polling = pref_disable_polling
# Source of the configuration (user, discovery, cloud)
self.source = source
# State of the entry (LOADED, NOT_LOADED)
self.state = state
# Unique ID of this entry.
self.unique_id = unique_id
# Config entry is disabled
if isinstance(disabled_by, str) and not isinstance(
disabled_by, ConfigEntryDisabler
):
report( # type: ignore[unreachable]
"uses str for config entry disabled_by. This is deprecated and will "
"stop working in Home Assistant 2022.3, it should be updated to use "
"ConfigEntryDisabler instead",
error_if_core=False,
)
disabled_by = ConfigEntryDisabler(disabled_by)
self.disabled_by = disabled_by
# Supports unload
self.supports_unload = False
# Supports remove device
self.supports_remove_device = False
# Listeners to call on update
self.update_listeners: list[
weakref.ReferenceType[UpdateListenerType] | weakref.WeakMethod
] = []
# Reason why config entry is in a failed state
self.reason: str | None = None
# Function to cancel a scheduled retry
self._async_cancel_retry_setup: Callable[[], Any] | None = None
# Hold list for functions to call on unload.
self._on_unload: list[CALLBACK_TYPE] | None = None
async def async_setup(
self,
hass: HomeAssistant,
*,
integration: loader.Integration | None = None,
tries: int = 0,
) -> None:
"""Set up an entry."""
current_entry.set(self)
if self.source == SOURCE_IGNORE or self.disabled_by:
return
if integration is None:
integration = await loader.async_get_integration(hass, self.domain)
self.supports_unload = await support_entry_unload(hass, self.domain)
self.supports_remove_device = await support_remove_from_device(
hass, self.domain
)
try:
component = integration.get_component()
except ImportError as err:
_LOGGER.error(
"Error importing integration %s to set up %s configuration entry: %s",
integration.domain,
self.domain,
err,
)
if self.domain == integration.domain:
self.state = ConfigEntryState.SETUP_ERROR
self.reason = "Import error"
return
if self.domain == integration.domain:
try:
integration.get_platform("config_flow")
except ImportError as err:
_LOGGER.error(
"Error importing platform config_flow from integration %s to set up %s configuration entry: %s",
integration.domain,
self.domain,
err,
)
self.state = ConfigEntryState.SETUP_ERROR
self.reason = "Import error"
return
# Perform migration
if not await self.async_migrate(hass):
self.state = ConfigEntryState.MIGRATION_ERROR
self.reason = None
return
error_reason = None
try:
result = await component.async_setup_entry(hass, self)
if not isinstance(result, bool):
_LOGGER.error(
"%s.async_setup_entry did not return boolean", integration.domain
)
result = False
except ConfigEntryAuthFailed as ex:
message = str(ex)
auth_base_message = "could not authenticate"
error_reason = message or auth_base_message
auth_message = (
f"{auth_base_message}: {message}" if message else auth_base_message
)
_LOGGER.warning(
"Config entry '%s' for %s integration %s",
self.title,
self.domain,
auth_message,
)
self._async_process_on_unload()
self.async_start_reauth(hass)
result = False
except ConfigEntryNotReady as ex:
self.state = ConfigEntryState.SETUP_RETRY
self.reason = str(ex) or None
wait_time = 2 ** min(tries, 4) * 5
tries += 1
message = str(ex)
ready_message = f"ready yet: {message}" if message else "ready yet"
if tries == 1:
_LOGGER.warning(
"Config entry '%s' for %s integration not %s; Retrying in background",
self.title,
self.domain,
ready_message,
)
else:
_LOGGER.debug(
"Config entry '%s' for %s integration not %s; Retrying in %d seconds",
self.title,
self.domain,
ready_message,
wait_time,
)
async def setup_again(*_: Any) -> None:
"""Run setup again."""
self._async_cancel_retry_setup = None
await self.async_setup(hass, integration=integration, tries=tries)
if hass.state == CoreState.running:
self._async_cancel_retry_setup = async_call_later(
hass, wait_time, setup_again
)
else:
self._async_cancel_retry_setup = hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STARTED, setup_again
)
self._async_process_on_unload()
return
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error setting up entry %s for %s", self.title, integration.domain
)
result = False
# Only store setup result as state if it was not forwarded.
if self.domain != integration.domain:
return
if result:
self.state = ConfigEntryState.LOADED
self.reason = None
else:
self.state = ConfigEntryState.SETUP_ERROR
self.reason = error_reason
async def async_shutdown(self) -> None:
"""Call when Home Assistant is stopping."""
self.async_cancel_retry_setup()
@callback
def async_cancel_retry_setup(self) -> None:
"""Cancel retry setup."""
if self._async_cancel_retry_setup is not None:
self._async_cancel_retry_setup()
self._async_cancel_retry_setup = None
async def async_unload(
self, hass: HomeAssistant, *, integration: loader.Integration | None = None
) -> bool:
"""Unload an entry.
Returns if unload is possible and was successful.
"""
if self.source == SOURCE_IGNORE:
self.state = ConfigEntryState.NOT_LOADED
self.reason = None
return True
if self.state == ConfigEntryState.NOT_LOADED:
return True
if integration is None:
try:
integration = await loader.async_get_integration(hass, self.domain)
except loader.IntegrationNotFound:
# The integration was likely a custom_component
# that was uninstalled, or an integration
# that has been renamed without removing the config
# entry.
self.state = ConfigEntryState.NOT_LOADED
self.reason = None
return True
component = integration.get_component()
if integration.domain == self.domain:
if not self.state.recoverable:
return False
if self.state is not ConfigEntryState.LOADED:
self.async_cancel_retry_setup()
self.state = ConfigEntryState.NOT_LOADED
self.reason = None
return True
supports_unload = hasattr(component, "async_unload_entry")
if not supports_unload:
if integration.domain == self.domain:
self.state = ConfigEntryState.FAILED_UNLOAD
self.reason = "Unload not supported"
return False
try:
result = await component.async_unload_entry(hass, self)
assert isinstance(result, bool)
# Only adjust state if we unloaded the component
if result and integration.domain == self.domain:
self.state = ConfigEntryState.NOT_LOADED
self.reason = None
self._async_process_on_unload()
# https://github.com/python/mypy/issues/11839
return result # type: ignore[no-any-return]
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error unloading entry %s for %s", self.title, integration.domain
)
if integration.domain == self.domain:
self.state = ConfigEntryState.FAILED_UNLOAD
self.reason = "Unknown error"
return False
async def async_remove(self, hass: HomeAssistant) -> None:
"""Invoke remove callback on component."""
if self.source == SOURCE_IGNORE:
return
try:
integration = await loader.async_get_integration(hass, self.domain)
except loader.IntegrationNotFound:
# The integration was likely a custom_component
# that was uninstalled, or an integration
# that has been renamed without removing the config
# entry.
return
component = integration.get_component()
if not hasattr(component, "async_remove_entry"):
return
try:
await component.async_remove_entry(hass, self)
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error calling entry remove callback %s for %s",
self.title,
integration.domain,
)
async def async_migrate(self, hass: HomeAssistant) -> bool:
"""Migrate an entry.
Returns True if config entry is up-to-date or has been migrated.
"""
if (handler := HANDLERS.get(self.domain)) is None:
_LOGGER.error(
"Flow handler not found for entry %s for %s", self.title, self.domain
)
return False
# Handler may be a partial
# Keep for backwards compatibility
# https://github.com/home-assistant/core/pull/67087#discussion_r812559950
while isinstance(handler, functools.partial):
handler = handler.func # type: ignore[unreachable]
if self.version == handler.VERSION:
return True
integration = await loader.async_get_integration(hass, self.domain)
component = integration.get_component()
supports_migrate = hasattr(component, "async_migrate_entry")
if not supports_migrate:
_LOGGER.error(
"Migration handler not found for entry %s for %s",
self.title,
self.domain,
)
return False
try:
result = await component.async_migrate_entry(hass, self)
if not isinstance(result, bool):
_LOGGER.error(
"%s.async_migrate_entry did not return boolean", self.domain
)
return False
if result:
# pylint: disable=protected-access
hass.config_entries._async_schedule_save()
# https://github.com/python/mypy/issues/11839
return result # type: ignore[no-any-return]
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error migrating entry %s for %s", self.title, self.domain
)
return False
def add_update_listener(self, listener: UpdateListenerType) -> CALLBACK_TYPE:
"""Listen for when entry is updated.
Returns function to unlisten.
"""
weak_listener: Any
# weakref.ref is not applicable to a bound method, e.g. method of a class instance, as reference will die immediately
if hasattr(listener, "__self__"):
weak_listener = weakref.WeakMethod(cast(MethodType, listener))
else:
weak_listener = weakref.ref(listener)
self.update_listeners.append(weak_listener)
return lambda: self.update_listeners.remove(weak_listener)
def as_dict(self) -> dict[str, Any]:
"""Return dictionary version of this entry."""
return {
"entry_id": self.entry_id,
"version": self.version,
"domain": self.domain,
"title": self.title,
"data": dict(self.data),
"options": dict(self.options),
"pref_disable_new_entities": self.pref_disable_new_entities,
"pref_disable_polling": self.pref_disable_polling,
"source": self.source,
"unique_id": self.unique_id,
"disabled_by": self.disabled_by,
}
@callback
def async_on_unload(self, func: CALLBACK_TYPE) -> None:
"""Add a function to call when config entry is unloaded."""
if self._on_unload is None:
self._on_unload = []
self._on_unload.append(func)
@callback
def _async_process_on_unload(self) -> None:
"""Process the on_unload callbacks."""
if self._on_unload is not None:
while self._on_unload:
self._on_unload.pop()()
@callback
def async_start_reauth(self, hass: HomeAssistant) -> None:
"""Start a reauth flow."""
flow_context = {
"source": SOURCE_REAUTH,
"entry_id": self.entry_id,
"title_placeholders": {"name": self.title},
"unique_id": self.unique_id,
}
for flow in hass.config_entries.flow.async_progress_by_handler(self.domain):
if flow["context"] == flow_context:
return
hass.async_create_task(
hass.config_entries.flow.async_init(
self.domain,
context=flow_context,
data=self.data,
)
)
current_entry: ContextVar[ConfigEntry | None] = ContextVar(
"current_entry", default=None
)
class ConfigEntriesFlowManager(data_entry_flow.FlowManager):
"""Manage all the config entry flows that are in progress."""
def __init__(
self,
hass: HomeAssistant,
config_entries: ConfigEntries,
hass_config: ConfigType,
) -> None:
"""Initialize the config entry flow manager."""
super().__init__(hass)
self.config_entries = config_entries
self._hass_config = hass_config
@callback
def _async_has_other_discovery_flows(self, flow_id: str) -> bool:
"""Check if there are any other discovery flows in progress."""
return any(
flow.context["source"] in DISCOVERY_SOURCES and flow.flow_id != flow_id
for flow in self._progress.values()
)
async def async_finish_flow(
self, flow: data_entry_flow.FlowHandler, result: data_entry_flow.FlowResult
) -> data_entry_flow.FlowResult:
"""Finish a config flow and add an entry."""
flow = cast(ConfigFlow, flow)
# Remove notification if no other discovery config entries in progress
if not self._async_has_other_discovery_flows(flow.flow_id):
persistent_notification.async_dismiss(self.hass, DISCOVERY_NOTIFICATION_ID)
if result["type"] != data_entry_flow.RESULT_TYPE_CREATE_ENTRY:
return result
# Check if config entry exists with unique ID. Unload it.
existing_entry = None
# Abort all flows in progress with same unique ID
# or the default discovery ID
for progress_flow in self.async_progress_by_handler(flow.handler):
progress_unique_id = progress_flow["context"].get("unique_id")
if progress_flow["flow_id"] != flow.flow_id and (
(flow.unique_id and progress_unique_id == flow.unique_id)
or progress_unique_id == DEFAULT_DISCOVERY_UNIQUE_ID
):
self.async_abort(progress_flow["flow_id"])
if flow.unique_id is not None:
# Reset unique ID when the default discovery ID has been used
if flow.unique_id == DEFAULT_DISCOVERY_UNIQUE_ID:
await flow.async_set_unique_id(None)
# Find existing entry.
for check_entry in self.config_entries.async_entries(result["handler"]):
if check_entry.unique_id == flow.unique_id:
existing_entry = check_entry
break
# Unload the entry before setting up the new one.
# We will remove it only after the other one is set up,
# so that device customizations are not getting lost.
if existing_entry is not None and existing_entry.state.recoverable:
await self.config_entries.async_unload(existing_entry.entry_id)
entry = ConfigEntry(
version=result["version"],
domain=result["handler"],
title=result["title"],
data=result["data"],
options=result["options"],
source=flow.context["source"],
unique_id=flow.unique_id,
)
await self.config_entries.async_add(entry)
if existing_entry is not None:
await self.config_entries.async_remove(existing_entry.entry_id)
result["result"] = entry
return result
async def async_create_flow(
self, handler_key: Any, *, context: dict | None = None, data: Any = None
) -> ConfigFlow:
"""Create a flow for specified handler.
Handler key is the domain of the component that we want to set up.
"""
try:
integration = await loader.async_get_integration(self.hass, handler_key)
except loader.IntegrationNotFound as err:
_LOGGER.error("Cannot find integration %s", handler_key)
raise data_entry_flow.UnknownHandler from err
# Make sure requirements and dependencies of component are resolved
await async_process_deps_reqs(self.hass, self._hass_config, integration)
try:
integration.get_platform("config_flow")
except ImportError as err:
_LOGGER.error(
"Error occurred loading configuration flow for integration %s: %s",
handler_key,
err,
)
raise data_entry_flow.UnknownHandler
if (handler := HANDLERS.get(handler_key)) is None:
raise data_entry_flow.UnknownHandler
if not context or "source" not in context:
raise KeyError("Context not set or doesn't have a source set")
flow = handler()
flow.init_step = context["source"]
return flow
async def async_post_init(
self, flow: data_entry_flow.FlowHandler, result: data_entry_flow.FlowResult
) -> None:
"""After a flow is initialised trigger new flow notifications."""
source = flow.context["source"]
# Create notification.
if source in DISCOVERY_SOURCES:
self.hass.bus.async_fire(EVENT_FLOW_DISCOVERED)
persistent_notification.async_create(
self.hass,
title="New devices discovered",
message=(
"We have discovered new devices on your network. "
"[Check it out](/config/integrations)."
),
notification_id=DISCOVERY_NOTIFICATION_ID,
)
elif source == SOURCE_REAUTH:
persistent_notification.async_create(
self.hass,
title="Integration requires reconfiguration",
message=(
"At least one of your integrations requires reconfiguration to "
"continue functioning. [Check it out](/config/integrations)."
),
notification_id=RECONFIGURE_NOTIFICATION_ID,
)
class ConfigEntries:
"""Manage the configuration entries.
An instance of this object is available via `hass.config_entries`.
"""
def __init__(self, hass: HomeAssistant, hass_config: ConfigType) -> None:
"""Initialize the entry manager."""
self.hass = hass
self.flow = ConfigEntriesFlowManager(hass, self, hass_config)
self.options = OptionsFlowManager(hass)
self._hass_config = hass_config
self._entries: dict[str, ConfigEntry] = {}
self._domain_index: dict[str, list[str]] = {}
self._store = storage.Store(hass, STORAGE_VERSION, STORAGE_KEY)
EntityRegistryDisabledHandler(hass).async_setup()
@callback
def async_domains(
self, include_ignore: bool = False, include_disabled: bool = False
) -> list[str]:
"""Return domains for which we have entries."""
return list(
{
entry.domain: None
for entry in self._entries.values()
if (include_ignore or entry.source != SOURCE_IGNORE)
and (include_disabled or not entry.disabled_by)
}
)
@callback
def async_get_entry(self, entry_id: str) -> ConfigEntry | None:
"""Return entry with matching entry_id."""
return self._entries.get(entry_id)
@callback
def async_entries(self, domain: str | None = None) -> list[ConfigEntry]:
"""Return all entries or entries for a specific domain."""
if domain is None:
return list(self._entries.values())
return [
self._entries[entry_id] for entry_id in self._domain_index.get(domain, [])
]
async def async_add(self, entry: ConfigEntry) -> None:
"""Add and setup an entry."""
if entry.entry_id in self._entries:
raise HomeAssistantError(
f"An entry with the id {entry.entry_id} already exists."
)
self._entries[entry.entry_id] = entry
self._domain_index.setdefault(entry.domain, []).append(entry.entry_id)
await self.async_setup(entry.entry_id)
self._async_schedule_save()
async def async_remove(self, entry_id: str) -> dict[str, Any]:
"""Remove an entry."""
if (entry := self.async_get_entry(entry_id)) is None:
raise UnknownEntry
if not entry.state.recoverable:
unload_success = entry.state is not ConfigEntryState.FAILED_UNLOAD
else:
unload_success = await self.async_unload(entry_id)
await entry.async_remove(self.hass)
del self._entries[entry.entry_id]
self._domain_index[entry.domain].remove(entry.entry_id)
if not self._domain_index[entry.domain]:
del self._domain_index[entry.domain]
self._async_schedule_save()
dev_reg = device_registry.async_get(self.hass)
ent_reg = entity_registry.async_get(self.hass)
dev_reg.async_clear_config_entry(entry_id)
ent_reg.async_clear_config_entry(entry_id)
# If the configuration entry is removed during reauth, it should
# abort any reauth flow that is active for the removed entry.
for progress_flow in self.hass.config_entries.flow.async_progress_by_handler(
entry.domain
):
context = progress_flow.get("context")
if (
context
and context["source"] == SOURCE_REAUTH
and "entry_id" in context
and context["entry_id"] == entry_id
and "flow_id" in progress_flow
):
self.hass.config_entries.flow.async_abort(progress_flow["flow_id"])
# After we have fully removed an "ignore" config entry we can try and rediscover it so that a
# user is able to immediately start configuring it. We do this by starting a new flow with
# the 'unignore' step. If the integration doesn't implement async_step_unignore then
# this will be a no-op.
if entry.source == SOURCE_IGNORE:
self.hass.async_create_task(
self.hass.config_entries.flow.async_init(
entry.domain,
context={"source": SOURCE_UNIGNORE},
data={"unique_id": entry.unique_id},
)
)
return {"require_restart": not unload_success}
async def _async_shutdown(self, event: Event) -> None:
"""Call when Home Assistant is stopping."""
await asyncio.gather(
*(entry.async_shutdown() for entry in self._entries.values())
)
await self.flow.async_shutdown()
async def async_initialize(self) -> None:
"""Initialize config entry config."""
# Migrating for config entries stored before 0.73
config = await storage.async_migrator(
self.hass,
self.hass.config.path(PATH_CONFIG),
self._store,
old_conf_migrate_func=_old_conf_migrator,
)
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, self._async_shutdown)
if config is None:
self._entries = {}
self._domain_index = {}
return
entries = {}
domain_index: dict[str, list[str]] = {}
for entry in config["entries"]:
pref_disable_new_entities = entry.get("pref_disable_new_entities")
# Between 0.98 and 2021.6 we stored 'disable_new_entities' in a system options dictionary
if pref_disable_new_entities is None and "system_options" in entry:
pref_disable_new_entities = entry.get("system_options", {}).get(
"disable_new_entities"
)
domain = entry["domain"]
entry_id = entry["entry_id"]
entries[entry_id] = ConfigEntry(
version=entry["version"],
domain=domain,
entry_id=entry_id,
data=entry["data"],
source=entry["source"],
title=entry["title"],
# New in 0.89
options=entry.get("options"),
# New in 0.104
unique_id=entry.get("unique_id"),
# New in 2021.3
disabled_by=ConfigEntryDisabler(entry["disabled_by"])
if entry.get("disabled_by")
else None,
# New in 2021.6
pref_disable_new_entities=pref_disable_new_entities,
pref_disable_polling=entry.get("pref_disable_polling"),
)
domain_index.setdefault(domain, []).append(entry_id)
self._domain_index = domain_index
self._entries = entries
async def async_setup(self, entry_id: str) -> bool:
"""Set up a config entry.
Return True if entry has been successfully loaded.
"""
if (entry := self.async_get_entry(entry_id)) is None:
raise UnknownEntry
if entry.state is not ConfigEntryState.NOT_LOADED:
raise OperationNotAllowed
# Setup Component if not set up yet
if entry.domain in self.hass.config.components:
await entry.async_setup(self.hass)
else:
# Setting up the component will set up all its config entries
result = await async_setup_component(
self.hass, entry.domain, self._hass_config
)
if not result:
return result
return entry.state is ConfigEntryState.LOADED # type: ignore[comparison-overlap] # mypy bug?
async def async_unload(self, entry_id: str) -> bool:
"""Unload a config entry."""
if (entry := self.async_get_entry(entry_id)) is None:
raise UnknownEntry
if not entry.state.recoverable:
raise OperationNotAllowed
return await entry.async_unload(self.hass)
async def async_reload(self, entry_id: str) -> bool:
"""Reload an entry.
If an entry was not loaded, will just load.
"""
if (entry := self.async_get_entry(entry_id)) is None:
raise UnknownEntry
unload_result = await self.async_unload(entry_id)
if not unload_result or entry.disabled_by:
return unload_result
return await self.async_setup(entry_id)
async def async_set_disabled_by(
self, entry_id: str, disabled_by: ConfigEntryDisabler | None
) -> bool:
"""Disable an entry.
If disabled_by is changed, the config entry will be reloaded.
"""
if (entry := self.async_get_entry(entry_id)) is None:
raise UnknownEntry
if isinstance(disabled_by, str) and not isinstance(
disabled_by, ConfigEntryDisabler
):
report( # type: ignore[unreachable]
"uses str for config entry disabled_by. This is deprecated and will "
"stop working in Home Assistant 2022.3, it should be updated to use "
"ConfigEntryDisabler instead",
error_if_core=False,
)
disabled_by = ConfigEntryDisabler(disabled_by)
if entry.disabled_by is disabled_by:
return True
entry.disabled_by = disabled_by
self._async_schedule_save()
dev_reg = device_registry.async_get(self.hass)
ent_reg = entity_registry.async_get(self.hass)
if not entry.disabled_by:
# The config entry will no longer be disabled, enable devices and entities
device_registry.async_config_entry_disabled_by_changed(dev_reg, entry)
entity_registry.async_config_entry_disabled_by_changed(ent_reg, entry)
# Load or unload the config entry
reload_result = await self.async_reload(entry_id)
if entry.disabled_by:
# The config entry has been disabled, disable devices and entities
device_registry.async_config_entry_disabled_by_changed(dev_reg, entry)
entity_registry.async_config_entry_disabled_by_changed(ent_reg, entry)
return reload_result
@callback
def async_update_entry(
self,
entry: ConfigEntry,
*,
unique_id: str | None | UndefinedType = UNDEFINED,
title: str | UndefinedType = UNDEFINED,
data: Mapping[str, Any] | UndefinedType = UNDEFINED,
options: Mapping[str, Any] | UndefinedType = UNDEFINED,
pref_disable_new_entities: bool | UndefinedType = UNDEFINED,
pref_disable_polling: bool | UndefinedType = UNDEFINED,
) -> bool:
"""Update a config entry.
If the entry was changed, the update_listeners are
fired and this function returns True
If the entry was not changed, the update_listeners are
not fired and this function returns False
"""
changed = False
for attr, value in (
("unique_id", unique_id),
("title", title),
("pref_disable_new_entities", pref_disable_new_entities),
("pref_disable_polling", pref_disable_polling),
):
if value == UNDEFINED or getattr(entry, attr) == value:
continue
setattr(entry, attr, value)
changed = True
if data is not UNDEFINED and entry.data != data:
changed = True
entry.data = MappingProxyType(data)
if options is not UNDEFINED and entry.options != options:
changed = True
entry.options = MappingProxyType(options)
if not changed:
return False
for listener_ref in entry.update_listeners:
if (listener := listener_ref()) is not None:
self.hass.async_create_task(listener(self.hass, entry))
self._async_schedule_save()
return True
@callback
def async_setup_platforms(
self, entry: ConfigEntry, platforms: Iterable[Platform | str]
) -> None:
"""Forward the setup of an entry to platforms."""
for platform in platforms:
self.hass.async_create_task(self.async_forward_entry_setup(entry, platform))
async def async_forward_entry_setup(
self, entry: ConfigEntry, domain: Platform | str
) -> bool:
"""Forward the setup of an entry to a different component.
By default an entry is setup with the component it belongs to. If that
component also has related platforms, the component will have to
forward the entry to be setup by that component.
You don't want to await this coroutine if it is called as part of the
setup of a component, because it can cause a deadlock.
"""
# Setup Component if not set up yet
if domain not in self.hass.config.components:
result = await async_setup_component(self.hass, domain, self._hass_config)
if not result:
return False
integration = await loader.async_get_integration(self.hass, domain)
await entry.async_setup(self.hass, integration=integration)
return True
async def async_unload_platforms(
self, entry: ConfigEntry, platforms: Iterable[Platform | str]
) -> bool:
"""Forward the unloading of an entry to platforms."""
return all(
await asyncio.gather(
*(
self.async_forward_entry_unload(entry, platform)
for platform in platforms
)
)
)
async def async_forward_entry_unload(
self, entry: ConfigEntry, domain: Platform | str
) -> bool:
"""Forward the unloading of an entry to a different component."""
# It was never loaded.
if domain not in self.hass.config.components:
return True
integration = await loader.async_get_integration(self.hass, domain)
return await entry.async_unload(self.hass, integration=integration)
@callback
def _async_schedule_save(self) -> None:
"""Save the entity registry to a file."""
self._store.async_delay_save(self._data_to_save, SAVE_DELAY)
@callback
def _data_to_save(self) -> dict[str, list[dict[str, Any]]]:
"""Return data to save."""
return {"entries": [entry.as_dict() for entry in self._entries.values()]}
async def _old_conf_migrator(old_config: dict[str, Any]) -> dict[str, Any]:
"""Migrate the pre-0.73 config format to the latest version."""
return {"entries": old_config}
class ConfigFlow(data_entry_flow.FlowHandler):
"""Base class for config flows with some helpers."""
def __init_subclass__(cls, *, domain: str | None = None, **kwargs: Any) -> None:
"""Initialize a subclass, register if possible."""
super().__init_subclass__(**kwargs)
if domain is not None:
HANDLERS.register(domain)(cls)
@property
def unique_id(self) -> str | None:
"""Return unique ID if available."""
if not self.context:
return None
return cast(Optional[str], self.context.get("unique_id"))
@staticmethod
@callback
def async_get_options_flow(config_entry: ConfigEntry) -> OptionsFlow:
"""Get the options flow for this handler."""
raise data_entry_flow.UnknownHandler
@classmethod
@callback
def async_supports_options_flow(cls, config_entry: ConfigEntry) -> bool:
"""Return options flow support for this handler."""
return cls.async_get_options_flow is not ConfigFlow.async_get_options_flow
@callback
def _async_abort_entries_match(
self, match_dict: dict[str, Any] | None = None
) -> None:
"""Abort if current entries match all data."""
if match_dict is None:
match_dict = {} # Match any entry
for entry in self._async_current_entries(include_ignore=False):
if all(
item in ChainMap(entry.options, entry.data).items() # type: ignore[arg-type]
for item in match_dict.items()
):
raise data_entry_flow.AbortFlow("already_configured")
@callback
def _abort_if_unique_id_configured(
self,
updates: dict[str, Any] | None = None,
reload_on_update: bool = True,
) -> None:
"""Abort if the unique ID is already configured."""
if self.unique_id is None:
return
for entry in self._async_current_entries(include_ignore=True):
if entry.unique_id == self.unique_id:
if updates is not None:
changed = self.hass.config_entries.async_update_entry(
entry, data={**entry.data, **updates}
)
if (
changed
and reload_on_update
and entry.state
in (ConfigEntryState.LOADED, ConfigEntryState.SETUP_RETRY)
):
self.hass.async_create_task(
self.hass.config_entries.async_reload(entry.entry_id)
)
# Allow ignored entries to be configured on manual user step
if entry.source == SOURCE_IGNORE and self.source == SOURCE_USER:
continue
raise data_entry_flow.AbortFlow("already_configured")
async def async_set_unique_id(
self, unique_id: str | None = None, *, raise_on_progress: bool = True
) -> ConfigEntry | None:
"""Set a unique ID for the config flow.
Returns optionally existing config entry with same ID.
"""
if unique_id is None:
self.context["unique_id"] = None
return None
if raise_on_progress:
for progress in self._async_in_progress(include_uninitialized=True):
if progress["context"].get("unique_id") == unique_id:
raise data_entry_flow.AbortFlow("already_in_progress")
self.context["unique_id"] = unique_id
# Abort discoveries done using the default discovery unique id
if unique_id != DEFAULT_DISCOVERY_UNIQUE_ID:
for progress in self._async_in_progress(include_uninitialized=True):
if progress["context"].get("unique_id") == DEFAULT_DISCOVERY_UNIQUE_ID:
self.hass.config_entries.flow.async_abort(progress["flow_id"])
for entry in self._async_current_entries(include_ignore=True):
if entry.unique_id == unique_id:
return entry
return None
@callback
def _set_confirm_only(
self,
) -> None:
"""Mark the config flow as only needing user confirmation to finish flow."""
self.context["confirm_only"] = True
@callback
def _async_current_entries(
self, include_ignore: bool | None = None
) -> list[ConfigEntry]:
"""Return current entries.
If the flow is user initiated, filter out ignored entries unless include_ignore is True.
"""
config_entries = self.hass.config_entries.async_entries(self.handler)
if (
include_ignore is True
or include_ignore is None
and self.source != SOURCE_USER
):
return config_entries
return [entry for entry in config_entries if entry.source != SOURCE_IGNORE]
@callback
def _async_current_ids(self, include_ignore: bool = True) -> set[str | None]:
"""Return current unique IDs."""
return {
entry.unique_id
for entry in self.hass.config_entries.async_entries(self.handler)
if include_ignore or entry.source != SOURCE_IGNORE
}
@callback
def _async_in_progress(
self, include_uninitialized: bool = False
) -> list[data_entry_flow.FlowResult]:
"""Return other in progress flows for current domain."""
return [
flw
for flw in self.hass.config_entries.flow.async_progress_by_handler(
self.handler, include_uninitialized=include_uninitialized
)
if flw["flow_id"] != self.flow_id
]
async def async_step_ignore(
self, user_input: dict[str, Any]
) -> data_entry_flow.FlowResult:
"""Ignore this config flow."""
await self.async_set_unique_id(user_input["unique_id"], raise_on_progress=False)
return self.async_create_entry(title=user_input["title"], data={})
async def async_step_unignore(
self, user_input: dict[str, Any]
) -> data_entry_flow.FlowResult:
"""Rediscover a config entry by it's unique_id."""
return self.async_abort(reason="not_implemented")
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> data_entry_flow.FlowResult:
"""Handle a flow initiated by the user."""
return self.async_abort(reason="not_implemented")
async def _async_handle_discovery_without_unique_id(self) -> None:
"""Mark this flow discovered, without a unique identifier.
If a flow initiated by discovery, doesn't have a unique ID, this can
be used alternatively. It will ensure only 1 flow is started and only
when the handler has no existing config entries.
It ensures that the discovery can be ignored by the user.
"""
if self.unique_id is not None:
return
# Abort if the handler has config entries already
if self._async_current_entries():
raise data_entry_flow.AbortFlow("already_configured")
# Use an special unique id to differentiate
await self.async_set_unique_id(DEFAULT_DISCOVERY_UNIQUE_ID)
self._abort_if_unique_id_configured()
# Abort if any other flow for this handler is already in progress
if self._async_in_progress(include_uninitialized=True):
raise data_entry_flow.AbortFlow("already_in_progress")
async def async_step_discovery(
self, discovery_info: DiscoveryInfoType
) -> data_entry_flow.FlowResult:
"""Handle a flow initialized by discovery."""
await self._async_handle_discovery_without_unique_id()
return await self.async_step_user()
@callback
def async_abort(
self, *, reason: str, description_placeholders: dict | None = None
) -> data_entry_flow.FlowResult:
"""Abort the config flow."""
# Remove reauth notification if no reauth flows are in progress
if self.source == SOURCE_REAUTH and not any(
ent["context"]["source"] == SOURCE_REAUTH
for ent in self.hass.config_entries.flow.async_progress_by_handler(
self.handler
)
if ent["flow_id"] != self.flow_id
):
persistent_notification.async_dismiss(
self.hass, RECONFIGURE_NOTIFICATION_ID
)
return super().async_abort(
reason=reason, description_placeholders=description_placeholders
)
async def async_step_dhcp(
self, discovery_info: DhcpServiceInfo
) -> data_entry_flow.FlowResult:
"""Handle a flow initialized by DHCP discovery."""
return await self.async_step_discovery(dataclasses.asdict(discovery_info))
async def async_step_hassio(
self, discovery_info: HassioServiceInfo
) -> data_entry_flow.FlowResult:
"""Handle a flow initialized by HASS IO discovery."""
return await self.async_step_discovery(discovery_info.config)
async def async_step_integration_discovery(
self, discovery_info: DiscoveryInfoType
) -> data_entry_flow.FlowResult:
"""Handle a flow initialized by integration specific discovery."""
return await self.async_step_discovery(discovery_info)
async def async_step_homekit(
self, discovery_info: ZeroconfServiceInfo
) -> data_entry_flow.FlowResult:
"""Handle a flow initialized by Homekit discovery."""
return await self.async_step_discovery(dataclasses.asdict(discovery_info))
async def async_step_mqtt(
self, discovery_info: MqttServiceInfo
) -> data_entry_flow.FlowResult:
"""Handle a flow initialized by MQTT discovery."""
return await self.async_step_discovery(dataclasses.asdict(discovery_info))
async def async_step_ssdp(
self, discovery_info: SsdpServiceInfo
) -> data_entry_flow.FlowResult:
"""Handle a flow initialized by SSDP discovery."""
return await self.async_step_discovery(dataclasses.asdict(discovery_info))
async def async_step_usb(
self, discovery_info: UsbServiceInfo
) -> data_entry_flow.FlowResult:
"""Handle a flow initialized by USB discovery."""
return await self.async_step_discovery(dataclasses.asdict(discovery_info))
async def async_step_zeroconf(
self, discovery_info: ZeroconfServiceInfo
) -> data_entry_flow.FlowResult:
"""Handle a flow initialized by Zeroconf discovery."""
return await self.async_step_discovery(dataclasses.asdict(discovery_info))
@callback
def async_create_entry(
self,
*,
title: str,
data: Mapping[str, Any],
description: str | None = None,
description_placeholders: dict | None = None,
options: Mapping[str, Any] | None = None,
) -> data_entry_flow.FlowResult:
"""Finish config flow and create a config entry."""
result = super().async_create_entry(
title=title,
data=data,
description=description,
description_placeholders=description_placeholders,
)
result["options"] = options or {}
return result
class OptionsFlowManager(data_entry_flow.FlowManager):
"""Flow to set options for a configuration entry."""
async def async_create_flow(
self,
handler_key: Any,
*,
context: dict[str, Any] | None = None,
data: dict[str, Any] | None = None,
) -> OptionsFlow:
"""Create an options flow for a config entry.
Entry_id and flow.handler is the same thing to map entry with flow.
"""
entry = self.hass.config_entries.async_get_entry(handler_key)
if entry is None:
raise UnknownEntry(handler_key)
if entry.domain not in HANDLERS:
raise data_entry_flow.UnknownHandler
return HANDLERS[entry.domain].async_get_options_flow(entry)
async def async_finish_flow(
self, flow: data_entry_flow.FlowHandler, result: data_entry_flow.FlowResult
) -> data_entry_flow.FlowResult:
"""Finish an options flow and update options for configuration entry.
Flow.handler and entry_id is the same thing to map flow with entry.
"""
flow = cast(OptionsFlow, flow)
if result["type"] != data_entry_flow.RESULT_TYPE_CREATE_ENTRY:
return result
entry = self.hass.config_entries.async_get_entry(flow.handler)
if entry is None:
raise UnknownEntry(flow.handler)
if result["data"] is not None:
self.hass.config_entries.async_update_entry(entry, options=result["data"])
result["result"] = True
return result
class OptionsFlow(data_entry_flow.FlowHandler):
"""Base class for config option flows."""
handler: str
class EntityRegistryDisabledHandler:
"""Handler to handle when entities related to config entries updating disabled_by."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize the handler."""
self.hass = hass
self.registry: entity_registry.EntityRegistry | None = None
self.changed: set[str] = set()
self._remove_call_later: Callable[[], None] | None = None
@callback
def async_setup(self) -> None:
"""Set up the disable handler."""
self.hass.bus.async_listen(
entity_registry.EVENT_ENTITY_REGISTRY_UPDATED,
self._handle_entry_updated,
event_filter=_handle_entry_updated_filter,
)
async def _handle_entry_updated(self, event: Event) -> None:
"""Handle entity registry entry update."""
if self.registry is None:
self.registry = entity_registry.async_get(self.hass)
entity_entry = self.registry.async_get(event.data["entity_id"])
if (
# Stop if no entry found
entity_entry is None
# Stop if entry not connected to config entry
or entity_entry.config_entry_id is None
# Stop if the entry got disabled. In that case the entity handles it
# themselves.
or entity_entry.disabled_by
):
return
config_entry = self.hass.config_entries.async_get_entry(
entity_entry.config_entry_id
)
assert config_entry is not None
if config_entry.entry_id not in self.changed and config_entry.supports_unload:
self.changed.add(config_entry.entry_id)
if not self.changed:
return
# We are going to delay reloading on *every* entity registry change so that
# if a user is happily clicking along, it will only reload at the end.
if self._remove_call_later:
self._remove_call_later()
self._remove_call_later = async_call_later(
self.hass, RELOAD_AFTER_UPDATE_DELAY, self._handle_reload
)
async def _handle_reload(self, _now: Any) -> None:
"""Handle a reload."""
self._remove_call_later = None
to_reload = self.changed
self.changed = set()
_LOGGER.info(
"Reloading configuration entries because disabled_by changed in entity registry: %s",
", ".join(self.changed),
)
await asyncio.gather(
*(self.hass.config_entries.async_reload(entry_id) for entry_id in to_reload)
)
@callback
def _handle_entry_updated_filter(event: Event) -> bool:
"""Handle entity registry entry update filter.
Only handle changes to "disabled_by".
If "disabled_by" was CONFIG_ENTRY, reload is not needed.
"""
if (
event.data["action"] != "update"
or "disabled_by" not in event.data["changes"]
or event.data["changes"]["disabled_by"]
is entity_registry.RegistryEntryDisabler.CONFIG_ENTRY
):
return False
return True
async def support_entry_unload(hass: HomeAssistant, domain: str) -> bool:
"""Test if a domain supports entry unloading."""
integration = await loader.async_get_integration(hass, domain)
component = integration.get_component()
return hasattr(component, "async_unload_entry")
async def support_remove_from_device(hass: HomeAssistant, domain: str) -> bool:
"""Test if a domain supports being removed from a device."""
integration = await loader.async_get_integration(hass, domain)
component = integration.get_component()
return hasattr(component, "async_remove_config_entry_device")
|
py | 7dfbb301b0113c39fdb8aa79eeed2a362b98500c | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 17 16:27:09 2017
@author: MichaelEK
"""
import geopandas as gpd
import pandas as pd
import os
from util import grp_ts_agg, tsreg, getPolyCoords
import shutil
from gistools.vector import multipoly_to_poly, xy_to_gpd
from datetime import date
from scipy.stats import rankdata
from numpy import nan
from warnings import filterwarnings
from pdsql import mssql
from pyhydrotel import get_ts_data
from bokeh.plotting import figure, show, output_file
from bokeh.models import ColumnDataSource, HoverTool, CategoricalColorMapper, CustomJS, renderers, annotations
from bokeh.palettes import brewer
from bokeh.models.widgets import Select
from bokeh.layouts import column
from bokeh.io import save
import parameters as param
pd.options.display.max_columns = 10
##################################################
#### Read in data
print('Reading in the data')
### gw
#gw_sites = read_file(join(base_dir, gw_sites_shp))
gw_zones = gpd.read_file(os.path.join(param.base_dir, param.input_dir, param.gw_poly_shp))[['ZONE_NAME', 'geometry']]
gw_zones = gw_zones.rename(columns={'ZONE_NAME': 'zone'})
#gw_zones['mtype'] = 'gw'
# well_depths = pd.read_csv(os.path.join(param.base_dir, param.input_dir, param.well_depth_csv)).set_index('site')
well_depths = mssql.rd_sql(param.wells_server, param.wells_database, param.well_depth_table, ['well_no', 'depth']).drop_duplicates('well_no')
well_depths = well_depths[well_depths['depth'].notnull()]
well_depths.rename(columns={'depth': 'well_depth'}, inplace=True)
well_screens = mssql.rd_sql(param.wells_server, param.wells_database, param.well_screen_table, ['well_no', 'top_screen'], where_in={'screen_no': [1]}).drop_duplicates('well_no')
##################################################
#### Process well depth catergories
well_info = pd.merge(well_depths, well_screens, on='well_no', how='left')
well_info['depth'] = 'Shallow'
well_info.loc[well_info['top_screen'] >= 30, 'depth'] = 'Deep'
well_info.loc[well_info['top_screen'].isnull() & (well_info['well_depth'] >= 30), 'depth'] = 'Deep'
well_depths = well_info[['well_no', 'depth']].rename(columns={'well_no': 'site'}).set_index('site')
#################################################
#### Select sites
### GW
sites1 = mssql.rd_sql(param.usm_server, param.usm_database, param.site_table, ['ID', 'UpstreamSiteID', 'NZTMX', 'NZTMY'])
sites_attr1 = mssql.rd_sql(param.usm_server, param.usm_database, param.site_attr_table, ['SiteID', 'CwmsName'])
sites_attr1.rename(columns={'SiteID': 'ID'}, inplace=True)
sites = pd.merge(sites1, sites_attr1, on='ID').drop('ID', axis=1)
sites.rename(columns={'UpstreamSiteID': 'site'}, inplace=True)
sites = sites[sites.site.isin(well_depths.index)]
## Manual data
mgw1 = mssql.rd_sql(param.wells_server, param.wells_database, 'DTW_READINGS', ['well_no', 'date_read', 'depth_to_water'], where_in={'TIDEDA_FLAG': ['N']}, rename_cols=['site', 'time', 'data'])
mgw1['time'] = pd.to_datetime(mgw1['time'])
mgw1 = mgw1.groupby(['site', pd.Grouper(key='time', freq='D')]).mean().reset_index()
mgw1 = mgw1[mgw1.site.isin(sites.site)]
## Recorder data
# hy1 = get_ts_data(param.hydrotel_server, param.hydrotel_database, ['water level', 'adjusted water level'], sites.site.tolist(), resample_code='D').reset_index()
# rgw1 = hy1.sort_values('MType').drop_duplicates(['ExtSiteID', 'DateTime']).drop('MType', axis=1)
# rgw1.rename(columns={'ExtSiteID': 'site', 'DateTime': 'time', 'Value': 'data'}, inplace=True)
# rgw1 = mssql.rd_sql_ts(param.hydro_server, param.hydro_database, param.ts_table, 'ExtSiteID', 'DateTime', 'Value', where_in={'DatasetTypeID': [10]}).reset_index()
# rgw1.rename(columns={'ExtSiteID': 'site', 'DateTime': 'time', 'Value': 'data'}, inplace=True)
#
# rgw1 = rgw1[rgw1.site.isin(sites.site)]
## Prioritise recorder data
# mgw1 = mgw1[~mgw1.site.isin(rgw1.site.unique())].copy()
## Combine
# gw1 = pd.concat([rgw1, mgw1]).drop_duplicates(['site', 'time'])
gw1 = mgw1.copy()
#################################################
#### Run monthly summary stats
print('Processing past data')
### Filter sites
count0 = gw1.copy()
count0['month'] = gw1.time.dt.month
count0['year'] = gw1.time.dt.year
count1 = count0.drop_duplicates(['site', 'year', 'month']).groupby('site').data.count()
start_date0 = gw1.groupby('site').time.first()
end_date1 = gw1.groupby('site').time.last()
now1 = pd.to_datetime(param.date_now) + pd.DateOffset(days=param.add_days)
start_date1 = now1 - pd.DateOffset(months=121) - pd.DateOffset(days=now1.day - 1)
start_date2 = now1 - pd.DateOffset(months=1) - pd.DateOffset(days=now1.day - 1)
sites1 = sites[sites.site.isin(count1[(count1 >= 120) & (end_date1 >= start_date2) & (start_date0 <= start_date1)].index)]
uw1 = sites[sites.CwmsName.isin(['Upper Waitaki']) & sites.site.isin(count1[(count1 >= 80) & (end_date1 >= start_date2) & (start_date0 <= start_date1)].index)]
sites2 = pd.concat([sites1, uw1]).drop_duplicates()
gw_sites = xy_to_gpd(['site', 'CwmsName'], 'NZTMX', 'NZTMY', sites2)
gw2 = gw1[gw1.site.isin(gw_sites.site)].copy()
### Extract Site locations
gw_sites.to_file(os.path.join(param.base_dir, param.output_dir, param.gw_sites_shp))
### Combine the sites with the polygons
gw_site_zone = gw_sites.drop(['geometry'], axis=1)
gw_site_zone.rename(columns={'CwmsName': 'zone'}, inplace=True)
### Monthly interpolations
if param.interp:
## Estimate monthly means through interpolation
day1 = grp_ts_agg(gw2, 'site', 'time', 'D').mean().unstack('site')
day2 = tsreg(day1, 'D', False)
day3 = day2.interpolate(method='time', limit=40)
mon_gw1 = day3.resample('M').median().stack().reset_index()
else:
mon_gw1 = grp_ts_agg(gw2, 'site', 'time', 'M').median().reset_index()
## End the dataset to the lastest month
end_date = now1 - pd.DateOffset(days=now1.day - 1)
mon_gw1 = mon_gw1[mon_gw1.time < end_date].copy()
## Assign month
mon_gw1['mon'] = mon_gw1.time.dt.month
##############################################
#### Run the monthly stats comparisons
print('Calculating the percentiles')
hy_gw0 = mon_gw1.copy()
hy_gw0['perc'] = (hy_gw0.groupby(['site', 'mon'])['data'].transform(lambda x: (rankdata(x)-1)/(len(x)-1)) * 100).round(2)
###############################################
#### Pull out recent monthly data
start_date = now1 - pd.DateOffset(months=param.n_previous_months) - pd.DateOffset(days=now1.day - 1)
print('start date: ' + str(start_date), 'and date: ' + str(end_date))
### selection
hy_gw = hy_gw0[(hy_gw0.time >= start_date)].copy()
### Convert datetime to year-month str
hy_gw['time'] = hy_gw.time.dt.strftime('%Y-%m')
##############################################
#### Calc zone stats and apply categories
perc_site_zone = pd.merge(hy_gw, gw_site_zone, on='site')
perc_zone = perc_site_zone.groupby(['zone', 'time'])['perc'].mean()
prod1 = [gw_zones.zone.unique(), perc_zone.reset_index().time.unique()]
mindex = pd.MultiIndex.from_product(prod1, names=['zone', 'time'])
blank1 = pd.Series(nan, index=mindex, name='temp')
zone_stats2 = pd.concat([perc_zone, blank1], axis=1).perc
zone_stats2[zone_stats2.isnull()] = -1
cat_val_lst = [-10, -0.5, 10, 25, 75, 90, 100]
cat_name_lst = ['No data', 'Very low', 'Below average', 'Average', 'Above average', 'Very high']
cat1 = pd.cut(zone_stats2, cat_val_lst, labels=cat_name_lst).astype('str')
cat1.name = 'category'
cat2 = pd.concat([zone_stats2, cat1], axis=1)
cat3 = cat2.sort_values('perc', ascending=False).category
################################################
#### Output stats
print('Exporting results to csv')
ts_out1 = hy_gw.loc[:, ['site', 'time', 'perc']].copy()
ts_out2 = ts_out1.pivot_table('perc', 'site', 'time').round(2)
stats1 = mon_gw1.groupby('site')['data'].describe().round(2)
ts_out3 = pd.concat([ts_out2, stats1], axis=1, join='inner')
well_depths1 = well_depths.loc[ts_out3.index]
ts_out4 = pd.concat([ts_out3, well_depths1], axis=1).reset_index()
gw_sites_ts = gw_sites.merge(ts_out4, on='site')
gw_sites_ts.crs = gw_sites.crs
gw_sites_ts.to_file(os.path.join(param.base_dir, param.output_dir, param.gw_sites_ts_shp))
ts_out10 = hy_gw0.loc[:, ['site', 'time', 'perc']].copy()
ts_out10['time'] = ts_out10['time'].dt.date.astype(str)
ts_out10['perc'] = ts_out10['perc'].round(2)
ts_out10.to_csv(os.path.join(param.base_dir, param.output_dir, param.gw_sites_ts_csv), header=True, index=False)
#################################################
#### Plotting
print('Creating the plot')
### Extract x and y data for plotting
zones1 = multipoly_to_poly(gw_zones)
zones1['x'] = zones1.apply(getPolyCoords, coord_type='x', axis=1)
zones1['y'] = zones1.apply(getPolyCoords, coord_type='y', axis=1)
zones2 = zones1.drop('geometry', axis=1)
### Combine with time series data
data1 = pd.merge(cat1.unstack('time').reset_index(), zones2, on=['zone'])
time_index = hy_gw.time.unique().tolist()
data1['cat'] = data1[time_index[-1]]
### Extract the mtype dataframes
gw_b = data1.copy()
gw_source = ColumnDataSource(gw_b)
time_source = ColumnDataSource(pd.DataFrame({'index': time_index}))
### Set up plotting parameters
c1 = brewer['RdBu'][5]
grey1 = brewer['Greys'][7][5]
factors = cat_name_lst[::-1]
color_map = CategoricalColorMapper(factors=factors, palette=[c1[0], c1[1], c1[2], c1[3], c1[4], grey1])
### Set up dummy source for the legend
dummy_b = gw_b[['zone', 'cat', 'x', 'y']].sort_values('zone')
dummy_b.loc[:, 'cat'].iloc[0:len(factors)] = factors
dummy_source = ColumnDataSource(dummy_b)
TOOLS = "pan,wheel_zoom,reset,hover,save"
w = 700
h = w
bokeh_gw_cwms_html = os.path.join(param.base_dir, param.bokeh_dir, param.today_gw_cwms_html)
output_file(bokeh_gw_cwms_html)
## dummy figure - for legend consistency
p0 = figure(title='dummy Index', tools=[], height=h, width=w)
p0.patches('x', 'y', source=dummy_source, fill_color={'field': 'cat', 'transform': color_map}, line_color="black", line_width=1, legend='cat')
p0.renderers = [i for i in p0.renderers if (type(i) == renderers.GlyphRenderer) | (type(i) == annotations.Legend)]
p0.renderers[1].visible = False
## Figure 3 - GW
p3 = figure(title='Groundwater Level Index', tools=TOOLS, active_scroll='wheel_zoom', plot_height=h, plot_width=w)
p3.patches('x', 'y', source=gw_source, fill_color={'field': 'cat', 'transform': color_map}, line_color="black", line_width=1, legend='cat')
p3.renderers.extend(p0.renderers)
p3.legend.location = 'top_left'
hover3 = p3.select_one(HoverTool)
hover3.point_policy = "follow_mouse"
hover3.tooltips = [("Category", "@cat"), ("Zone", "@zone")]
callback3 = CustomJS(args=dict(source=gw_source), code="""
var data = source.data;
var f = cb_obj.value;
source.data.cat = data[f];
source.change.emit();
""")
select3 = Select(title='Month', value=time_index[-1], options=time_index)
select3.js_on_change('value', callback3)
layout3 = column(p3, select3)
save(layout3)
#############################################
### Make html copy without date in filename
bokeh_subregion_html1 = os.path.join(os.path.split(bokeh_gw_cwms_html)[0], param.base_gw_cwms_html)
shutil.copy(bokeh_gw_cwms_html, bokeh_subregion_html1)
#############################################
#### Print where results are saved
#print('########################')
#
#print('shapefile results were saved here: ' + os.path.join(param.base_dir, param.input_dir, param.gw_sites_ts_shp))
#print('csv results were saved here: ' + os.path.join(param.base_dir, param.input_dir, param.gw_sites_ts_csv))
#print('The plot was saved here: ' + os.path.join(param.base_dir, param.input_dir, param.today_gw_cwms_html))
|
py | 7dfbb3b1fe7443d59638d6146e8547feef963416 | # Copyright 2016 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from flexmock import flexmock_teardown
from tests.dell10g import enable, configuring_vlan, \
assert_running_config_contains_in_order, ssh_protocol_factory,\
telnet_protocol_factory, add_vlan, configuring
from tests.util.protocol_util import with_protocol
class Dell10GConfigureVlanTest(unittest.TestCase):
__test__ = False
protocol_factory = None
def setUp(self):
self.protocol = self.protocol_factory()
def tearDown(self):
flexmock_teardown()
@with_protocol
def test_configuring_a_vlan(self, t):
enable(t)
add_vlan(t, 1000)
add_vlan(t, 1001)
add_vlan(t, 2000)
configuring_vlan(t, 2000, do="name shizzle")
assert_running_config_contains_in_order(t, [
"vlan 2000",
"name shizzle",
"exit",
"vlan 1,1000-1001",
"exit",
])
configuring(t, do="no vlan 1000")
configuring(t, do="no vlan 1001")
configuring(t, do="no vlan 2000")
class Dell10GConfigureVlanSshTest(Dell10GConfigureVlanTest):
__test__ = True
protocol_factory = ssh_protocol_factory
class Dell10GConfigureVlanTelnetTest(Dell10GConfigureVlanTest):
__test__ = True
protocol_factory = telnet_protocol_factory
|
py | 7dfbb5fe415628140d2db3296f60204d275a1e81 | #!/usr/bin/env python3
# -*-coding:utf-8-*-
import os
import sys
import argparse
import threading
import re
from copy import deepcopy
class MUltiLoader():
""" Multi downloader for esp32 nvs partition """
DOWNLOAD_SUCESS_STRING = "Hard resetting via RTS pin..."
ERASE_SUCCESS_STRING = "Chip erase completed successfully"
RE_MAC = r"MAC: (([A-Fa-f0-9]{2}:){5}[A-Fa-f0-9]{2})"
def __init__(self, ports, baudrate, tools, nvs_addr, bin_dir, erase_flag):
self._ports = ports
self._baudrate = baudrate
self._nvs_addr = nvs_addr
self._tools = tools
self._dir = bin_dir
self._erase_flag = erase_flag
self.re_mac = re.compile(self.RE_MAC)
def _select_port(self):
if 'all' in self._ports:
dev_list = os.listdir('/dev')
for dev in dev_list:
if dev.find('ttyUSB') != -1:
yield f"/dev/{dev}"
else:
for i in self._ports:
yield f"/dev/ttyUSB{i}"
def _get_nvsbin_lists(self):
files = os.listdir(self._dir)
files_bak = deepcopy(files)
for i in files_bak:
if i[-4:] != '.bin':
files.remove(i)
return [os.path.join(self._dir,file) for file in files]
def _download_performance(self, command):
loader = os.popen(command)
ret_string = loader.read()
if ret_string.find(self.DOWNLOAD_SUCESS_STRING) != -1:
print(f"{command} success")
elif ret_string.find(self.ERASE_SUCCESS_STRING) != -1:
print(f"{command} success")
else:
print(f"{command} failed")
loader.close()
def _erase(self, port):
command = f"{self._tools} -p {port} -b {self._baudrate} erase_flash"
print(f"start erase through {port}")
t = threading.Thread(target=self._download_performance, args=(command,))
t.start()
return t
def _download(self, port, files):
""" Read MAC address from device """
t = os.popen(f"{self._tools} -b 115200 -p {port} read_mac")
strings = t.read()
result = self.re_mac.search(strings)
mac = result.group(1).replace(':','-')
t.close()
""" Match MAC address and bin file """
file = str()
for item in files:
if item.find(mac) == -1:
continue
file = item
break
assert(len(file) != 0)
""" Download bin fine to device """
command = f"{self._tools} -p {port} -b {self._baudrate} write_flash {self._nvs_addr} {file}"
print(f"start download bin through {port}")
t = threading.Thread(
target=self._download_performance, args=(command,))
t.start()
return t
def run(self):
t_list = list()
if self._erase_flag:
for port in self._select_port():
t_list.append(self._erase(port))
for t in t_list:
t.join()
files = self._get_nvsbin_lists()
if len(files) == 0:
print("can not find bin file")
return False
for port in self._select_port():
t_list.append(self._download(port, files))
for t in t_list:
t.join()
def main():
parser = argparse.ArgumentParser(
description="ESP multi download tools for nvs partition bin")
parser.add_argument('-p', '--port', dest='ports', action='append', required=True, nargs='+',
help="uart port list, pass all for all uart port")
parser.add_argument('-b', '--baudrate', dest='baudrate', action='store', default='460800',
help="uart baudrate")
parser.add_argument('--erase', dest='erase', action='store_true', default=False,
help="erase before flash if this is given")
parser.add_argument('--addr', dest='addr', action='store', default='0x9000',
help="nvs address in partition table")
parser.add_argument('--dir', dest='dir', action='store', required=True,
help="nvs bin file dir")
parser.add_argument('--tools', dest='tools', action='store', required=True,
help="esptools path")
args = parser.parse_args()
ports = []
for i in args.ports:
ports += i
downloader = MUltiLoader(
ports=ports, baudrate=args.baudrate, erase_flag=args.erase, tools=args.tools, nvs_addr=args.addr, bin_dir=args.dir)
downloader.run()
if __name__ == '__main__':
if sys.version_info < (3, 6):
print('Error, need python version >= 3.6')
sys.exit()
try:
main()
except KeyboardInterrupt:
quit()
|
py | 7dfbb62c938f3ad685ad7956f7f949546a780bbd | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['DiagnosticLogger']
class DiagnosticLogger(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
diagnostic_id: Optional[pulumi.Input[str]] = None,
loggerid: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Logger details.
API Version: 2018-01-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] diagnostic_id: Diagnostic identifier. Must be unique in the current API Management service instance.
:param pulumi.Input[str] loggerid: Logger identifier. Must be unique in the API Management service instance.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service_name: The name of the API Management service.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if diagnostic_id is None and not opts.urn:
raise TypeError("Missing required property 'diagnostic_id'")
__props__['diagnostic_id'] = diagnostic_id
__props__['loggerid'] = loggerid
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if service_name is None and not opts.urn:
raise TypeError("Missing required property 'service_name'")
__props__['service_name'] = service_name
__props__['credentials'] = None
__props__['description'] = None
__props__['is_buffered'] = None
__props__['logger_type'] = None
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:apimanagement:DiagnosticLogger"), pulumi.Alias(type_="azure-native:apimanagement/latest:DiagnosticLogger"), pulumi.Alias(type_="azure-nextgen:apimanagement/latest:DiagnosticLogger"), pulumi.Alias(type_="azure-native:apimanagement/v20170301:DiagnosticLogger"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20170301:DiagnosticLogger"), pulumi.Alias(type_="azure-native:apimanagement/v20180101:DiagnosticLogger"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180101:DiagnosticLogger")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DiagnosticLogger, __self__).__init__(
'azure-native:apimanagement:DiagnosticLogger',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DiagnosticLogger':
"""
Get an existing DiagnosticLogger resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["credentials"] = None
__props__["description"] = None
__props__["is_buffered"] = None
__props__["logger_type"] = None
__props__["name"] = None
__props__["type"] = None
return DiagnosticLogger(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def credentials(self) -> pulumi.Output[Mapping[str, str]]:
"""
The name and SendRule connection string of the event hub for azureEventHub logger.
Instrumentation key for applicationInsights logger.
"""
return pulumi.get(self, "credentials")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Logger description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="isBuffered")
def is_buffered(self) -> pulumi.Output[Optional[bool]]:
"""
Whether records are buffered in the logger before publishing. Default is assumed to be true.
"""
return pulumi.get(self, "is_buffered")
@property
@pulumi.getter(name="loggerType")
def logger_type(self) -> pulumi.Output[str]:
"""
Logger type.
"""
return pulumi.get(self, "logger_type")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | 7dfbb6515e5ee928e606da239d4e225fb349c2eb | from __future__ import print_function#, unicode_literals
import maryclient
import codecs
import common_utils
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Retrieves pronounciations entries of arbitrary German words (using the TTS software Mary) for a whole word list.')
parser.add_argument('-i', '--inputfile', dest='inputfile',
help='Process this word list (one per line, utf-8)', type=str, default='')
parser.add_argument('-o', '--outputfile', dest='outputfile',
help='Export pronouciation entries to this outputfile (one per line, utf-8)', type=str, default='')
args = parser.parse_args()
mary = maryclient.maryclient()
dictionary = {}
with codecs.open(args.inputfile, 'r', 'utf-8') as inputfile:
for word in inputfile:
tokens, phonems = common_utils.getCleanTokensAndPhonemes(
word, mary)
if len(phonems) != 1:
print(
'Warning, MARY did split this word into more than one token:', word, phonems)
dictionary[word[:-1]] = ''.join(phonems[0])
with codecs.open(args.outputfile, 'w', 'utf-8') as outputfile:
for word in sorted(dictionary):
outputfile.write(word+' '+dictionary[word]+'\n')
|
py | 7dfbb78383741ad4db838a18a3ac6bf9983b493c | from django import forms
from django.core.exceptions import ValidationError
from django.db import models
from base.models_i.chat import ChatModel
class StateTaskManager(models.Manager):
def get_task(self,id,stream,user):
task =self.get(pk = id)
if task.stream.pk != stream['id'] and stream['id'] !=0 :
raise ValidationError
task.asignet_to = user
task.status = task.STATUS[1][0]
task.save()
return task
def chenge_status(self,task,user,status):
if task.status == status:
return
if user == task.asignet_to or user == task.autors or user.is_superuser == True :
task.status = status
task.chenge_user = user
task.save()
class DetailTaskManager(models.Manager):
def get_task_form(self,user,task):
# autor asignet None
asignet_to = task.asignet_to
autor = task.autors
if user == autor:
return self.create_form("autor")
if user == asignet_to:
return self.create_form("asignet")
def create_form(self,choise):
# CHOICES = (('Option 2', 'Option 2'), ('Option 1', 'Option 1'),)
if choise == "autor":
CHOICES = (("O", "Открыта"), ("C", "Закрыто"))
elif choise == "asignet":
CHOICES = (("O", "Открыта"), ("S", "Решено"),)
else:
return None
class TicketForm(forms.Form):
mesenge = forms.CharField(widget=forms.Textarea, label="Сообщения")
status = forms.ChoiceField(choices=CHOICES, label="Статус")
return TicketForm
def create_row_insert(self,title,body,class_css="inline-block"):
# obj = {"title": "Content", "class": "inline-block", "body": "test fasf asf ", }
obj = {}
obj["title"] = title
obj["class"] = class_css
obj["body"] = body
return obj
def get_task(self,task):
# obj = {
# "title": "1212. Test Task [Open]",
# "rows" : [
# {"title":"Content","class": "inline-block","body": "test fasf asf ",},
# {"title":"Content2","class": "inline-block","body": "test fasf asf ",},
# {"title":"Content3","class": "inline-block","body": "test fasf asf ",},
# {"title":"Content3","class": "char-block","body": "test fasf asfasd as dd sfsdf ",},
# {"title":"Content3","class": "file-fields-block","body": "test fasf asfasd as dd sfsdf ","is_file":True,
# "files":[
# {"name":"f1","url":"#"},
# {"name":"f2","url":"#"},
# {"name":"f3","url":"#"},
# ]},
# ]
# }
obj = {}
obj['title'] = "[#{}] {} [{}]".format(task.pk,task.title,task.get_status())
inline_list = [
"stream",
"autors",
"created_at",
"updated_at",
"date_due",
]
if task.asignet_to !=None:
inline_list.insert(2,"asignet_to")
char_block_list = [
"description",
]
if task.file != None:
file = {"title":getattr(task.__class__,"file",).field.verbose_name.capitalize(),"class": "file-fields-block","body": "","is_file":True,
"files":[
{"name":task.file,"url":task.file.url()},
# {"name":"f2","url":"#"},
# {"name":"f3","url":"#"},
]}
else:
file = {}
inline_row_list = [ self.create_row_insert(title=getattr(task.__class__,i,).field.verbose_name.capitalize(),body=getattr(task,i)) for i in inline_list]
char_row_list = [ self.create_row_insert(title=getattr(task.__class__,i,).field.verbose_name.capitalize(),body=getattr(task,i),class_css="char-block") for i in char_block_list]
obj["rows"] = inline_row_list + char_row_list + [file]
return obj
def get_chat(self,task,user):
obj_list = []
asignet_to = task.asignet_to
autor = task.autors
if user != autor and asignet_to !=user or asignet_to == None:
return {}
chats = ChatModel.obj.filter(task=task)
for messenge in chats:
obj = {}
if messenge.user == autor:
obj["color"] ="red"
elif messenge.user == asignet_to:
obj["color"] = "blue"
obj["user"] = messenge.user.__str__()
obj["body"] = messenge.mesenge
obj["date"] = messenge.updated_at
obj_list.append(obj)
return {"is_chat":True,"obj":obj_list,}
class CustomManegerTaskSelf(models.Manager):
## return stat +!
## return is_avtor Task +
## return is use for stream
## return is use for asignet
def all_task_for_user(self,user):
task_q_asignet_to = self.filter(asignet_to = user)
task_q_autors = self.filter(autors = user)
task_q_summ = self.none()
task_q_summ |= task_q_autors | task_q_asignet_to
return task_q_summ
def all_chat_for_user(self,user):
task = self.all_task_for_user(user)
chat_id = ChatModel.obj.filter(task__in =task)
return chat_id
def _stat(self,name,all,list):
obj = {
"title":name,
"all":all,
}
obj["stats"] = list
return obj
def _stats_d(self,f,ques):
count = ques.count()
f["all"] = count
f_valid = []
for stat in f["filter_l"]:
c = ques.filter(status=stat["status"]).count()
name = stat["name"]
f_valid.append({
"name":name,
"data":c
})
del f["filter_l"]
f["stats"] = f_valid
return f
def stats(self,user):
data = []
task_q_asignet_to = self.filter(autors=user)
f = {"filter_l": [
{"name": "Ожидает",
"status": "W"
},
{"name": "Ваполняется",
"status": "O"
},
{"name": "Закрыто",
"status": "C"
},
],
"title": "Созданные вами заявки",
"all": 12,
}
data.append( self._stats_d(f,task_q_asignet_to))
task_q_asignet_to = self.filter(asignet_to=user)
f = {"filter_l": [
{"name": "Ваполняется",
"status": "O"
},
{"name": "Решено",
"status": "S"
},
{"name": "Закрыто",
"status": "C"
},
],
"title": "Выполняемые вами заявки",
"all": 12,
}
data.append( self._stats_d(f,task_q_asignet_to))
return data
def is_avtor(self,user):
return self.filter(autors=user)
|
py | 7dfbb807a4c0328250f182aa16b93c26469a94d0 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Contains Mapping values for Servers in ODE VPC
Info on the python library used to generate the script
https://github.com/cloudtools/troposphere
"""
from troposphere import constants
logicalName = 'logicalName'
mapping = 'mapping'
PV64 = 'PV64'
HVM64 = 'HVM64'
HVMG2 = 'HVMG2' # GPU Instance
PV64_MAP = {'Arch': PV64}
HVM64_MAP = {'Arch': HVM64}
HVMG2_MAP = {'Arch': HVMG2} # GPU Instance
AWSInstanceType2Arch = {
logicalName :'AWSInstanceType2Arch',
mapping : {
't1.micro': PV64_MAP,
't2.micro': HVM64_MAP,
't2.small': HVM64_MAP,
't2.medium': HVM64_MAP,
'm1.small': PV64_MAP,
'm1.medium': PV64_MAP,
'm1.large': PV64_MAP,
'm1.xlarge': PV64_MAP,
'm2.xlarge': PV64_MAP,
'm2.2xlarge': PV64_MAP,
'm2.4xlarge': PV64_MAP,
'm3.medium': HVM64_MAP,
'm3.large': HVM64_MAP,
'm3.xlarge': HVM64_MAP,
'm3.2xlarge': HVM64_MAP,
'c1.medium': PV64_MAP,
'c1.xlarge': PV64_MAP,
'c3.large': HVM64_MAP,
'c3.xlarge': HVM64_MAP,
'c3.2xlarge': HVM64_MAP,
'c3.4xlarge': HVM64_MAP,
'c3.8xlarge': HVM64_MAP,
'g2.2xlarge': HVMG2_MAP,
'r3.large': HVM64_MAP,
'r3.xlarge': HVM64_MAP,
'r3.2xlarge': HVM64_MAP,
'r3.4xlarge': HVM64_MAP,
'r3.8xlarge': HVM64_MAP,
'i2.xlarge': HVM64_MAP,
'i2.2xlarge': HVM64_MAP,
'i2.4xlarge': HVM64_MAP,
'i2.8xlarge': HVM64_MAP,
'hi1.4xlarge': HVM64_MAP,
'hs1.8xlarge': HVM64_MAP,
'cr1.8xlarge': HVM64_MAP,
'cc2.8xlarge': HVM64_MAP,
}}
"""
Liferay powered by Bitnami (HVM)
OS Linux/Unix, Ubuntu 14.04
https://aws.amazon.com/marketplace/pp/B00NPHLT1E
"""
AWSRegionArch2AMI = {
logicalName: 'AWSRegionArch2AMI',
mapping : {
constants.US_EAST_1: {HVM64: 'ami-a0e8eec8'}, # Private AMI Image #'ami-4e90d426', # Market Place Liferay Public bitnami ubuntu
constants.US_WEST_2: {HVM64: 'ami-6df5ac5d'}, # market place image
constants.US_WEST_1: {HVM64: 'ami-088b924d'} # market place image
}}
"""
CentOS 7 x86_64 (2014_09_29) EBS HVM-b7ee8a69-ee97-4a49-9e68-afaee216db2e-ami-d2a117ba.2
"""
centos_7_AWSRegionArch2AMI = {
logicalName: 'centos7AWSRegionArch2AMI',
mapping: {
constants.US_EAST_1: {HVM64: "ami-96a818fe"},
constants.US_WEST_1: {HVM64: "ami-6bcfc42e"},
constants.US_WEST_2: {HVM64: "ami-c7d092f7"}
}}
"""
Ambari Server and Hadoop Cluster Images
OS: CentOS 6 (x86_64) - with Updates HVM
AWS: https://aws.amazon.com/marketplace/pp/B00NQAYLWO
CentOS 6 x86_64 (2014_09_29) EBS HVM
CentOS 6 x86_64 (2014_09_29) EBS HVM-74e73035-3435-48d6-88e0-89cc02ad83ee-ami-a8a117c0.2
"""
centos_65_AWSRegionArch2AMI = {
logicalName: 'centos65AWSRegionArch2AMI',
mapping : {
constants.US_EAST_1: {HVM64: 'ami-c2a818aa'}, # market place image
constants.US_WEST_1: {HVM64: 'ami-57cfc412'}, # market place image
constants.US_WEST_2: {HVM64: 'ami-81d092b1'} # market place image
}}
"""
Ubuntu 14.04 Trusty 64 hvm-ssd instances
ubuntu/images/hvm-ssd/ubuntu-trusty-14.04-amd64-server-20150325
http://cloud-images.ubuntu.com/locator/
"""
ubuntu_14_AWSRegionArch2AMI = {
logicalName: 'ubuntu14AWSRegionArch2AMI',
mapping : {
constants.US_EAST_1: {HVM64: 'ami-d05e75b8'},
constants.US_WEST_1: {HVM64: 'ami-df6a8b9b'},
constants.US_WEST_2: {HVM64: 'ami-5189a661'}
}}
"""
Ubuntu Server 12.04 LTS (HVM)
ubuntu/images/hvm-ssd/ubuntu-precise-12.04-amd64-server-20150127-f4f523b3-d6b3-42a4-82e8-5f264cf4cf91-ami-f2bbff9a.2
http://cloud-images.ubuntu.com/locator/
"""
ubuntu_12_AWSRegionArch2AMI = {
logicalName: 'ubuntu12AWSRegionArch2AMI',
mapping : {
constants.US_EAST_1: {HVM64: 'ami-427a392a'},
constants.US_WEST_1: {HVM64: 'ami-82bba3c7'},
constants.US_WEST_2: {HVM64: 'ami-2b471c1b'}
}}
"""
aws ec2 describe-images --region us-xxxx-n --filter "Name=name,Values=amzn-ami-vpc-nat-hvm-2015*"
-query Images[*].{Name:Name,Arch:Architecture,Description:Description,Id:ImageId,CreationDate:CreationDate,RootVolumeType:RootDeviceType}
Amazon Linux AMI VPC NAT x86_64 HVM
amzn-ami-vpc-nat-hvm-2015.03.0.x86_64-ebs
"""
ami_nat_instanceAWSRegionArch2AMI = {
logicalName: 'amazonNATInstance',
mapping :{
constants.US_EAST_1: {HVM64: 'ami-b0210ed8'},
constants.US_WEST_1: {HVM64: 'ami-ada746e9'},
constants.US_WEST_2: {HVM64: 'ami-75ae8245'}
}}
"""
Instance Mapping Template
AWSRegionArch2AMI = {
logicalName: 'AWSRegionArch2AMI',
mapping : {
'us-east-1': { 'PV64': None,
'HVM64': None,
'HVMG2': None'},
'us-west-2': {'PV64': None,
'HVM64': None,
'HVMG2': None},
'us-west-1': {'PV64': None,
'HVM64': None,
'HVMG2': None},
# 'eu-west-1': {'PV64': 'ami-aa8f28dd',
# 'HVM64': 'ami-748e2903',
# 'HVMG2': 'ami-00913777'},
# 'ap-southeast-1': {'PV64': 'ami-20e1c572',
# 'HVM64': 'ami-d6e1c584',
# 'HVMG2': 'ami-fabe9aa8'},
# 'ap-northeast-1': {'PV64': 'ami-21072820',
# 'HVM64': 'ami-35072834',
# 'HVMG2': 'ami-5dd1ff5c'},
# 'ap-southeast-2': {'PV64': 'ami-8b4724b1',
# 'HVM64': 'ami-fd4724c7',
# 'HVMG2': 'ami-e98ae9d3'},
# 'sa-east-1': {'PV64': 'ami-9d6cc680',
# 'HVM64': 'ami-956cc688',
# 'HVMG2': 'NOT_SUPPORTED'},
# 'cn-north-1': {'PV64': 'ami-a857c591',
# 'HVM64': 'ami-ac57c595',
# 'HVMG2': 'NOT_SUPPORTED'},
# 'eu-central-1': {'PV64': 'ami-a03503bd',
# 'HVM64': 'ami-b43503a9',
# 'HVMG2': 'ami-b03503ad'},
}}
""" |
py | 7dfbb95c2d82ec88bcd771cbea31ed30a901f5c7 | # Copyright (C) 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Toy MLP model."""
from typing import Any
from flax import linen as nn
import jax.numpy as jnp
class MlpModel(nn.Module):
info: Any
@nn.compact
def __call__(self, x):
x = x['tokens']
# x.shape: batch_size, length
batch_size = x.shape[0]
x = nn.Embed(num_embeddings=30000, features=128)(x[:, :30])
# x.shape: batch_size, 30, 128
x = nn.Dense(features=30)(x)
# x.shape: batch_size, 30, 30
x = jnp.reshape(x, (batch_size, -1))
# x.shape: batch_size, 900
x = nn.relu(x)
x = nn.Dense(features=30)(x)
# x.shape: batch_size, 30
x = nn.relu(x)
x = nn.Dense(features=self.info.num_classes)(x)
# x.shape: batch_size, num_classes
return x, None
|
py | 7dfbb964167e60a9f298617968742a92642d0257 | """Support for exposing regular REST commands as services."""
import asyncio
import logging
import aiohttp
from aiohttp import hdrs
import voluptuous as vol
from homeassistant.const import (
CONF_HEADERS,
CONF_METHOD,
CONF_PASSWORD,
CONF_PAYLOAD,
CONF_TIMEOUT,
CONF_URL,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
DOMAIN = "rest_command"
_LOGGER = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 10
DEFAULT_METHOD = "get"
DEFAULT_VERIFY_SSL = True
SUPPORT_REST_METHODS = ["get", "patch", "post", "put", "delete"]
CONF_CONTENT_TYPE = "content_type"
COMMAND_SCHEMA = vol.Schema(
{
vol.Required(CONF_URL): cv.template,
vol.Optional(CONF_METHOD, default=DEFAULT_METHOD): vol.All(
vol.Lower, vol.In(SUPPORT_REST_METHODS)
),
vol.Optional(CONF_HEADERS): vol.Schema({cv.string: cv.template}),
vol.Inclusive(CONF_USERNAME, "authentication"): cv.string,
vol.Inclusive(CONF_PASSWORD, "authentication"): cv.string,
vol.Optional(CONF_PAYLOAD): cv.template,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): vol.Coerce(int),
vol.Optional(CONF_CONTENT_TYPE): cv.string,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: cv.schema_with_slug_keys(COMMAND_SCHEMA)}, extra=vol.ALLOW_EXTRA
)
async def async_setup(hass, config):
"""Set up the REST command component."""
def async_register_rest_command(name, command_config):
"""Create service for rest command."""
websession = async_get_clientsession(hass, command_config.get(CONF_VERIFY_SSL))
timeout = command_config[CONF_TIMEOUT]
method = command_config[CONF_METHOD]
template_url = command_config[CONF_URL]
template_url.hass = hass
auth = None
if CONF_USERNAME in command_config:
username = command_config[CONF_USERNAME]
password = command_config.get(CONF_PASSWORD, "")
auth = aiohttp.BasicAuth(username, password=password)
template_payload = None
if CONF_PAYLOAD in command_config:
template_payload = command_config[CONF_PAYLOAD]
template_payload.hass = hass
template_headers = None
if CONF_HEADERS in command_config:
template_headers = command_config[CONF_HEADERS]
for template_header in template_headers.values():
template_header.hass = hass
content_type = None
if CONF_CONTENT_TYPE in command_config:
content_type = command_config[CONF_CONTENT_TYPE]
async def async_service_handler(service):
"""Execute a shell command service."""
payload = None
if template_payload:
payload = bytes(
template_payload.async_render(variables=service.data), "utf-8"
)
request_url = template_url.async_render(variables=service.data)
headers = None
if template_headers:
headers = {}
for header_name, template_header in template_headers.items():
headers[header_name] = template_header.async_render(
variables=service.data
)
if content_type:
if headers is None:
headers = {}
headers[hdrs.CONTENT_TYPE] = content_type
try:
async with getattr(websession, method)(
request_url,
data=payload,
auth=auth,
headers=headers,
timeout=timeout,
) as response:
if response.status < 400:
_LOGGER.debug(
"Success. Url: %s. Status code: %d.",
response.url,
response.status,
)
else:
_LOGGER.warning(
"Error. Url: %s. Status code %d.",
response.url,
response.status,
)
except asyncio.TimeoutError:
_LOGGER.warning("Timeout call %s.", response.url, exc_info=1)
except aiohttp.ClientError:
_LOGGER.error("Client error %s.", request_url, exc_info=1)
# register services
hass.services.async_register(DOMAIN, name, async_service_handler)
for command, command_config in config[DOMAIN].items():
async_register_rest_command(command, command_config)
return True
|
py | 7dfbb9c1d60022304e9568947e149e62dd407d7d | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: remove all this when cleaning-up the cscc api code
# pylint: disable=invalid-string-quote,missing-return-doc,missing-return-type-doc
# pylint: disable=trailing-whitespace
"""Base GCP client which uses the discovery API."""
import json
import logging
import threading
import google_auth_httplib2
import googleapiclient
from googleapiclient.http import set_user_agent
from googleapiclient import discovery
import httplib2
from ratelimiter import RateLimiter
from retrying import retry
import google.auth
from google.auth.credentials import with_scopes_if_required
from google.cloud import forseti as forseti_security
from google.cloud.forseti.common.gcp_api import _supported_apis
from google.cloud.forseti.common.gcp_api import errors as api_errors
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.common.util import replay
from google.cloud.forseti.common.util import retryable_exceptions
import google.oauth2.credentials
CLOUD_SCOPES = frozenset(['https://www.googleapis.com/auth/cloud-platform'])
# Per request max wait timeout.
HTTP_REQUEST_TIMEOUT = 30.0
# Per thread storage.
LOCAL_THREAD = threading.local()
LOGGER = logger.get_logger(__name__)
# Default value num_retries within HttpRequest execute method
NUM_HTTP_RETRIES = 5
# Support older versions of apiclient without cache support
SUPPORT_DISCOVERY_CACHE = (googleapiclient.__version__ >= '1.4.2')
# Used by the record and replay decorator to store requests across all repos.
REQUEST_RECORDER = dict()
REQUEST_REPLAYER = dict()
# Used for private APIs that need to be created from local discovery documents
BASE_DIR = "google/cloud/forseti/common/gcp_api/discovery_documents/"
PRIVATE_APIS = {
"securitycenter": BASE_DIR + "securitycenter.json"
}
@retry(retry_on_exception=retryable_exceptions.is_retryable_exception,
wait_exponential_multiplier=1000, wait_exponential_max=10000,
stop_max_attempt_number=5)
def _create_service_api(credentials, service_name, version, developer_key=None,
cache_discovery=False):
"""Builds and returns a cloud API service object.
Args:
credentials (OAuth2Credentials): Credentials that will be used to
authenticate the API calls.
service_name (str): The name of the API.
version (str): The version of the API to use.
developer_key (str): The api key to use to determine the project
associated with the API call, most API services do not require
this to be set.
cache_discovery (bool): Whether or not to cache the discovery doc.
Returns:
object: A Resource object with methods for interacting with the service.
"""
# The default logging of the discovery obj is very noisy in recent versions.
# Lower the default logging level of just this module to WARNING unless
# debug is enabled.
if LOGGER.getEffectiveLevel() > logging.DEBUG:
logging.getLogger(discovery.__name__).setLevel(logging.WARNING)
discovery_kwargs = {
'serviceName': service_name,
'version': version,
'developerKey': developer_key,
'credentials': credentials}
if SUPPORT_DISCOVERY_CACHE:
discovery_kwargs['cache_discovery'] = cache_discovery
# Used for private APIs that are built from a local discovery file
if service_name in PRIVATE_APIS:
return _build_from_document(
credentials,
PRIVATE_APIS[service_name]
)
return discovery.build(**discovery_kwargs)
def _build_from_document(credentials, document_path):
"""Builds an API client from a local discovery document
Args:
credentials (OAuth2Credentials): Credentials that will be used to
authenticate the API calls.
document_path (str): The local path of the discovery document
"""
with open(document_path, 'r') as f:
discovery_data = json.load(f)
return discovery.build_from_document(
service=discovery_data,
credentials=credentials
)
def _build_http(http=None):
"""Set custom Forseti user agent and timeouts on a new http object.
Args:
http (object): An instance of httplib2.Http, or compatible, used for
testing.
Returns:
httplib2.Http: An http object with the forseti user agent set.
"""
if not http:
http = httplib2.Http(timeout=HTTP_REQUEST_TIMEOUT)
user_agent = 'Python-httplib2/{} (gzip), {}/{}'.format(
httplib2.__version__,
forseti_security.__package_name__,
forseti_security.__version__)
return set_user_agent(http, user_agent)
class BaseRepositoryClient(object):
"""Base class for API repository for a specified Cloud API."""
def __init__(self,
api_name,
versions=None,
credentials=None,
quota_max_calls=None,
quota_period=None,
use_rate_limiter=False,
**kwargs):
"""Constructor.
Args:
api_name (str): The API name to wrap. More details here:
https://developers.google.com/api-client-library/python/apis/
versions (list): A list of version strings to initialize.
credentials (object): GoogleCredentials.
quota_max_calls (int): Allowed requests per <quota_period> for the
API.
quota_period (float): The time period to track requests over.
use_rate_limiter (bool): Set to false to disable the use of a rate
limiter for this service.
**kwargs (dict): Additional args such as version.
"""
self._use_cached_http = False
if not credentials:
# Only share the http object when using the default credentials.
self._use_cached_http = True
credentials, _ = google.auth.default()
self._credentials = with_scopes_if_required(credentials,
list(CLOUD_SCOPES))
# Lock may be acquired multiple times in the same thread.
self._repository_lock = threading.RLock()
if use_rate_limiter:
self._rate_limiter = RateLimiter(max_calls=quota_max_calls,
period=quota_period)
else:
self._rate_limiter = None
self.name = api_name
# Look to see if the API is formally supported in Forseti.
supported_api = _supported_apis.SUPPORTED_APIS.get(api_name)
if not supported_api:
LOGGER.warn('API "%s" is not formally supported in Forseti, '
'proceed at your own risk.', api_name)
# See if the version is supported by Forseti.
# If no version is specified, use the supported API's default version.
if not versions and supported_api:
versions = [supported_api.get('default_version')]
self.versions = versions
if supported_api:
for version in versions:
if version not in supported_api.get('supported_versions', []):
LOGGER.warn('API "%s" version %s is not formally supported '
'in Forseti, proceed at your own risk.',
api_name, version)
self.gcp_services = {}
for version in versions:
self.gcp_services[version] = _create_service_api(
self._credentials,
self.name,
version,
kwargs.get('developer_key'),
kwargs.get('cache_discovery', False))
def __repr__(self):
"""The object representation.
Returns:
str: The object representation.
"""
return 'API: name=%s, versions=%s' % (self.name, self.versions)
def _init_repository(self, repository_class, version=None):
"""Safely initialize a repository class to a property.
Args:
repository_class (class): The class to initialize.
version (str): The gcp service version for the repository.
Returns:
object: An instance of repository_class.
"""
if not version:
# Use either the default version if defined or the first version
# returned when sorted by name.
version = (
_supported_apis.SUPPORTED_APIS.get(self.name, {})
.get('default_version'))
if not version or version not in self.gcp_services:
version = sorted(self.gcp_services.keys())[0]
with self._repository_lock:
return repository_class(gcp_service=self.gcp_services[version],
credentials=self._credentials,
rate_limiter=self._rate_limiter,
use_cached_http=self._use_cached_http)
# pylint: disable=too-many-instance-attributes, too-many-arguments
class GCPRepository(object):
"""Base class for GCP APIs."""
def __init__(self, gcp_service, credentials, component,
num_retries=NUM_HTTP_RETRIES, key_field='project',
entity_field=None, list_key_field=None, get_key_field=None,
max_results_field='maxResults', search_query_field='query',
rate_limiter=None, use_cached_http=True):
"""Constructor.
Args:
gcp_service (object): A Resource object with methods for interacting
with the service.
credentials (OAuth2Credentials): A Credentials object
component (str): The subcomponent of the gcp service for this
repository instance. E.g. 'instances' for compute.instances().*
APIs
num_retries (int): The number of http retriable errors to retry on
before hard failing.
key_field (str): The field name representing the project to
query in the API.
entity_field (str): The API entity returned generally by the .get()
api. E.g. 'instance' for compute.instances().get()
list_key_field (str): Optional override of key field for calls to
list methods.
get_key_field (str): Optional override of key field for calls to
get methods.
max_results_field (str): The field name that represents the maximum
number of results to return in one page.
search_query_field (str): The field name used to filter search
results.
rate_limiter (object): A RateLimiter object to manage API quota.
use_cached_http (bool): If set to true, calls to the API will use
a thread local shared http object. When false a new http object
is used for each request.
"""
self.gcp_service = gcp_service
self._credentials = credentials
components = component.split('.')
self._component = getattr(
self.gcp_service, components.pop(0))()
for nested_component in components:
self._component = getattr(
self._component, nested_component)()
self._entity_field = entity_field
self._num_retries = num_retries
if list_key_field:
self._list_key_field = list_key_field
else:
self._list_key_field = key_field
if get_key_field:
self._get_key_field = get_key_field
else:
self._get_key_field = key_field
self._max_results_field = max_results_field
self._search_query_field = search_query_field
self._rate_limiter = rate_limiter
self._use_cached_http = use_cached_http
self._local = LOCAL_THREAD
@property
def http(self):
"""A thread local instance of httplib2.Http.
Returns:
google_auth_httplib2.AuthorizedHttp: An Http instance authorized by
the credentials.
"""
if self._use_cached_http and hasattr(self._local, 'http'):
return self._local.http
authorized_http = google_auth_httplib2.AuthorizedHttp(
self._credentials, http=_build_http())
if self._use_cached_http:
self._local.http = authorized_http
return authorized_http
def _build_request(self, verb, verb_arguments):
"""Builds HttpRequest object.
Args:
verb (str): Request verb (ex. insert, update, delete).
verb_arguments (dict): Arguments to be passed with the request.
Returns:
httplib2.HttpRequest: HttpRequest to be sent to the API.
"""
method = getattr(self._component, verb)
# Python insists that keys in **kwargs be strings (not variables).
# Since we initially build our kwargs as a dictionary where one of the
# keys is a variable (target), we need to convert keys to strings,
# even though the variable in question is of type str.
method_args = {str(k): v for k, v in verb_arguments.iteritems()}
return method(**method_args)
def _build_next_request(self, verb, prior_request, prior_response):
"""Builds pagination-aware request object.
More details:
https://developers.google.com/api-client-library/python/guide/pagination
Args:
verb (str): Request verb (ex. insert, update, delete).
prior_request (httplib2.HttpRequest): Request that may trigger
paging.
prior_response (dict): Potentially partial response.
Returns:
httplib2.HttpRequest: HttpRequest or None. None is returned when
there is nothing more to fetch - request completed.
"""
method = getattr(self._component, verb + '_next')
return method(prior_request, prior_response)
def _request_supports_pagination(self, verb):
"""Determines if the API action supports pagination.
Args:
verb (str): Request verb (ex. insert, update, delete).
Returns:
bool: True when API supports pagination, False otherwise.
"""
return getattr(self._component, verb + '_next', None)
def execute_command(self, verb, verb_arguments):
"""Executes command (ex. add) via a dedicated http object.
Async APIs may take minutes to complete. Therefore, callers are
encouraged to leverage concurrent.futures (or similar) to place long
running commands on a separate threads.
Args:
verb (str): Method to execute on the component (ex. get, list).
verb_arguments (dict): key-value pairs to be passed to
_build_request.
Returns:
dict: An async operation Service Response.
"""
request = self._build_request(verb, verb_arguments)
request_submission_status = self._execute(request)
return request_submission_status
def execute_paged_query(self, verb, verb_arguments):
"""Executes query (ex. list) via a dedicated http object.
Args:
verb (str): Method to execute on the component (ex. get, list).
verb_arguments (dict): key-value pairs to be passed to
_BuildRequest.
Yields:
dict: Service Response.
Raises:
PaginationNotSupportedError: When an API does not support paging.
"""
if not self._request_supports_pagination(verb=verb):
raise api_errors.PaginationNotSupportedError(
'{} does not support pagination')
request = self._build_request(verb, verb_arguments)
number_of_pages_processed = 0
while request is not None:
response = self._execute(request)
number_of_pages_processed += 1
LOGGER.debug('Executing paged request # %s',
number_of_pages_processed)
request = self._build_next_request(verb, request, response)
yield response
def execute_search_query(self, verb, verb_arguments):
"""Executes query (ex. search) via a dedicated http object.
Args:
verb (str): Method to execute on the component (ex. search).
verb_arguments (dict): key-value pairs to be passed to
_BuildRequest.
Yields:
dict: Service Response.
"""
# Implementation of search does not follow the standard API pattern.
# Fields need to be in the body rather than sent seperately.
next_page_token = None
number_of_pages_processed = 0
while True:
req_body = verb_arguments.get('body', dict())
if next_page_token:
req_body['pageToken'] = next_page_token
request = self._build_request(verb, verb_arguments)
response = self._execute(request)
number_of_pages_processed += 1
LOGGER.debug('Executing paged request # %s',
number_of_pages_processed)
next_page_token = response.get('nextPageToken')
yield response
if not next_page_token:
break
def execute_query(self, verb, verb_arguments):
"""Executes query (ex. get) via a dedicated http object.
Args:
verb (str): Method to execute on the component (ex. get, list).
verb_arguments (dict): key-value pairs to be passed to
_BuildRequest.
Returns:
dict: Service Response.
"""
request = self._build_request(verb, verb_arguments)
return self._execute(request)
@replay.replay(REQUEST_REPLAYER)
@replay.record(REQUEST_RECORDER)
@retry(retry_on_exception=retryable_exceptions.is_retryable_exception,
wait_exponential_multiplier=1000, wait_exponential_max=10000,
stop_max_attempt_number=5)
def _execute(self, request):
"""Run execute with retries and rate limiting.
Args:
request (object): The HttpRequest object to execute.
Returns:
dict: The response from the API.
"""
if self._rate_limiter:
# Since the ratelimiter library only exposes a context manager
# interface the code has to be duplicated to handle the case where
# no rate limiter is defined.
with self._rate_limiter:
return request.execute(http=self.http,
num_retries=self._num_retries)
return request.execute(http=self.http,
num_retries=self._num_retries)
# pylint: enable=too-many-instance-attributes, too-many-arguments
|
py | 7dfbbab97766af783fcb182a87f48d89c95bea1d | #!/usr/bin/env python
class Solution:
def minSubArrayLen(self, s, nums) -> int:
head, tail, l = 0, 0, len(nums)
ret, partial = l+1, 0
while tail < l:
while tail < l and partial < s:
partial += nums[tail]
tail += 1
while partial >= s:
ret = min(tail-head, ret)
partial -= nums[head]
head += 1
print(f'head: {head-1}, tail: {tail-1}, ret: {ret}')
return 0 if ret == (l+1) else ret
nums = []
nums = [2,3,1,2,4,3]
s = 7
s = 8
s = 10
s = 100
nums = [1,2]
s = 3
sol = Solution()
print(sol.minSubArrayLen(s, nums))
|
py | 7dfbbad6f520e1632d80e56b79e45cbafa77abce | from unittest import TestCase
from jsonflat import JsonFlat
class JsonFlatTestCase(TestCase):
def setUp(self):
self.k = JsonFlat()
self.input_rows = [
{
'a': 1,
'b': 2,
'c': {
'd': 5,
'f': {
'g': 90,
'h': 67,
'i': [1, 2, 3]
},
'j': [
{'k': 37, 'l': 67},
{'k': 22, 'l': 111}
]
},
'e': 7,
'i': [1, 2, 3, 4]
},
{
'foo': 1,
'bar': {},
'c': 2,
'd': [],
'e': {'e1': []},
'f': [[{}]],
'g': [[[[]]]]
},
{},
[{}],
[[{}]]
]
self.output_rows = {
'field_names': ['a', 'b', 'c', 'c.d', 'c.f.g', 'c.f.h', 'c.f.i',
'c.j.k', 'c.j.l', 'e', 'e.e1', 'i', 'foo', 'bar',
'd', 'f', 'g'],
'rows': [
{'a': 1, 'b': 2, 'c.d': 5, 'c.f.g': 90, 'c.f.h': 67, 'e': 7,
'c.f.i': 1, 'c.j.k': 37, 'c.j.l': 67, 'i': 1},
{'a': 1, 'b': 2, 'c.d': 5, 'c.f.g': 90, 'c.f.h': 67, 'e': 7,
'c.f.i': 2, 'c.j.k': 22, 'c.j.l': 111, 'i': 2},
{'a': 1, 'b': 2, 'c.d': 5, 'c.f.g': 90, 'c.f.h': 67, 'e': 7,
'c.f.i': 3, 'i': 3},
{'a': 1, 'b': 2, 'c.d': 5, 'c.f.g': 90, 'c.f.h': 67, 'e': 7,
'i': 4},
{'foo': 1, 'bar': None, 'c': 2, 'd': None, 'e.e1': None,
'g': None, 'f': None},
{},
{},
{}
]
}
def test_a_flat(self):
# Empty list should return the same.
fields, rows = self.k._a_flat([])
self.assertEqual(len(fields), 0)
self.assertListEqual(rows, [])
# List in list. [[[[]]]] ==> [].
fields, rows = self.k._a_flat([[[[]]]])
self.assertEqual(len(fields), 0)
self.assertListEqual(rows, [])
# Dict in list. [{}] ==> [{}].
fields, rows = self.k._a_flat([{}])
self.assertEqual(len(fields), 0)
self.assertListEqual(rows, [{}])
# List of primitives as input.
fields, rows = self.k._a_flat([1, 2, 'hello'])
self.assertEqual(len(fields), 1)
self.assertListEqual(rows, [{self.k._root_element_name: 1},
{self.k._root_element_name: 2},
{self.k._root_element_name: 'hello'}])
def test_o_flat(self):
# Empty dict should return the same.
fields, row, lol = self.k._o_flat({})
self.assertEqual(len(fields), 0)
self.assertDictEqual(row, {})
self.assertListEqual(lol, [])
# Dict in dict. {'a': {}} ==> {'a': None}.
fields, row, lol = self.k._o_flat({'a': {}})
self.assertEqual(len(fields), 1)
self.assertDictEqual(row, {'a': None})
self.assertListEqual(lol, [])
# List in dict. {'a': []} ==> {'a': None}.
fields, row, lol = self.k._o_flat({'a': []})
self.assertEqual(len(fields), 1)
self.assertDictEqual(row, {'a': None})
self.assertListEqual(lol, [])
# Multiple lists in dict. {'a': [[[]]]} ==> {'a': None}.
fields, row, lol = self.k._o_flat({'a': [[[]]]})
self.assertEqual(len(fields), 1)
self.assertDictEqual(row, {'a': None})
self.assertListEqual(lol, [])
# Multiple lists and a dict in dict.
fields, row, lol = self.k._o_flat({'a': [[[{}]]]})
self.assertEqual(len(fields), 1)
self.assertDictEqual(row, {})
self.assertListEqual(lol, [[{'a': None}]])
def test_flatten(self):
# Primitives as the arguments to flatten
for primitive_arg in ['hello', 12, 2.3, True, None]:
self.assertDictEqual(
self.k.flatten(primitive_arg),
{'field_names': [self.k._root_element_name],
'rows': [{self.k._root_element_name: primitive_arg}]})
# Dict as the argument
self.assertDictEqual(self.k.flatten({'a': {'b': 2}}),
{'field_names': ['a.b'], 'rows': [{'a.b': 2}]})
# List as the argument
self.assertDictEqual(self.k.flatten([]),
{'field_names': [], 'rows': []})
self.assertDictEqual(self.k.flatten(self.input_rows),
self.output_rows)
|
py | 7dfbbb0d1d42cae91f60497a0238766e9ad319a8 | from time import time
t1 = time()
num_str = "0"
for i in range(1, 10**6 + 1):
num_str += str(i)
ans = 1
for i in range(7):
ans *= int(num_str[10**i])
print(ans)
print(f"Process completed in {time()-t1}s")
|
py | 7dfbbc006e697cb58fe117a98670ac9279eb646d | #!/usr/bin/env python3
# Copyright 2016, The Android Open Source Project
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""Command-line tool for working with Android Verified Boot images."""
import argparse
import binascii
import bisect
import hashlib
import json
import math
import os
import struct
import subprocess
import sys
import tempfile
import time
# Keep in sync with libavb/avb_version.h.
AVB_VERSION_MAJOR = 1
AVB_VERSION_MINOR = 2
AVB_VERSION_SUB = 0
# Keep in sync with libavb/avb_footer.h.
AVB_FOOTER_VERSION_MAJOR = 1
AVB_FOOTER_VERSION_MINOR = 0
AVB_VBMETA_IMAGE_FLAGS_HASHTREE_DISABLED = 1
# Configuration for enabling logging of calls to avbtool.
AVB_INVOCATION_LOGFILE = os.environ.get('AVB_INVOCATION_LOGFILE')
class AvbError(Exception):
"""Application-specific errors.
These errors represent issues for which a stack-trace should not be
presented.
Attributes:
message: Error message.
"""
def __init__(self, message):
Exception.__init__(self, message)
class Algorithm(object):
"""Contains details about an algorithm.
See the avb_vbmeta_image.h file for more details about algorithms.
The constant |ALGORITHMS| is a dictionary from human-readable
names (e.g 'SHA256_RSA2048') to instances of this class.
Attributes:
algorithm_type: Integer code corresponding to |AvbAlgorithmType|.
hash_name: Empty or a name from |hashlib.algorithms|.
hash_num_bytes: Number of bytes used to store the hash.
signature_num_bytes: Number of bytes used to store the signature.
public_key_num_bytes: Number of bytes used to store the public key.
padding: Padding used for signature as bytes, if any.
"""
def __init__(self, algorithm_type, hash_name, hash_num_bytes,
signature_num_bytes, public_key_num_bytes, padding):
self.algorithm_type = algorithm_type
self.hash_name = hash_name
self.hash_num_bytes = hash_num_bytes
self.signature_num_bytes = signature_num_bytes
self.public_key_num_bytes = public_key_num_bytes
self.padding = padding
# This must be kept in sync with the avb_crypto.h file.
#
# The PKC1-v1.5 padding is a blob of binary DER of ASN.1 and is
# obtained from section 5.2.2 of RFC 4880.
ALGORITHMS = {
'NONE': Algorithm(
algorithm_type=0, # AVB_ALGORITHM_TYPE_NONE
hash_name='',
hash_num_bytes=0,
signature_num_bytes=0,
public_key_num_bytes=0,
padding=b''),
'SHA256_RSA2048': Algorithm(
algorithm_type=1, # AVB_ALGORITHM_TYPE_SHA256_RSA2048
hash_name='sha256',
hash_num_bytes=32,
signature_num_bytes=256,
public_key_num_bytes=8 + 2*2048//8,
padding=bytes(bytearray([
# PKCS1-v1_5 padding
0x00, 0x01] + [0xff]*202 + [0x00] + [
# ASN.1 header
0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05,
0x00, 0x04, 0x20,
]))),
'SHA256_RSA4096': Algorithm(
algorithm_type=2, # AVB_ALGORITHM_TYPE_SHA256_RSA4096
hash_name='sha256',
hash_num_bytes=32,
signature_num_bytes=512,
public_key_num_bytes=8 + 2*4096//8,
padding=bytes(bytearray([
# PKCS1-v1_5 padding
0x00, 0x01] + [0xff]*458 + [0x00] + [
# ASN.1 header
0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05,
0x00, 0x04, 0x20,
]))),
'SHA256_RSA8192': Algorithm(
algorithm_type=3, # AVB_ALGORITHM_TYPE_SHA256_RSA8192
hash_name='sha256',
hash_num_bytes=32,
signature_num_bytes=1024,
public_key_num_bytes=8 + 2*8192//8,
padding=bytes(bytearray([
# PKCS1-v1_5 padding
0x00, 0x01] + [0xff]*970 + [0x00] + [
# ASN.1 header
0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05,
0x00, 0x04, 0x20,
]))),
'SHA512_RSA2048': Algorithm(
algorithm_type=4, # AVB_ALGORITHM_TYPE_SHA512_RSA2048
hash_name='sha512',
hash_num_bytes=64,
signature_num_bytes=256,
public_key_num_bytes=8 + 2*2048//8,
padding=bytes(bytearray([
# PKCS1-v1_5 padding
0x00, 0x01] + [0xff]*170 + [0x00] + [
# ASN.1 header
0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05,
0x00, 0x04, 0x40
]))),
'SHA512_RSA4096': Algorithm(
algorithm_type=5, # AVB_ALGORITHM_TYPE_SHA512_RSA4096
hash_name='sha512',
hash_num_bytes=64,
signature_num_bytes=512,
public_key_num_bytes=8 + 2*4096//8,
padding=bytes(bytearray([
# PKCS1-v1_5 padding
0x00, 0x01] + [0xff]*426 + [0x00] + [
# ASN.1 header
0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05,
0x00, 0x04, 0x40
]))),
'SHA512_RSA8192': Algorithm(
algorithm_type=6, # AVB_ALGORITHM_TYPE_SHA512_RSA8192
hash_name='sha512',
hash_num_bytes=64,
signature_num_bytes=1024,
public_key_num_bytes=8 + 2*8192//8,
padding=bytes(bytearray([
# PKCS1-v1_5 padding
0x00, 0x01] + [0xff]*938 + [0x00] + [
# ASN.1 header
0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05,
0x00, 0x04, 0x40
]))),
}
def get_release_string():
"""Calculates the release string to use in the VBMeta struct."""
# Keep in sync with libavb/avb_version.c:avb_version_string().
return 'avbtool {}.{}.{}'.format(AVB_VERSION_MAJOR,
AVB_VERSION_MINOR,
AVB_VERSION_SUB)
def round_to_multiple(number, size):
"""Rounds a number up to nearest multiple of another number.
Arguments:
number: The number to round up.
size: The multiple to round up to.
Returns:
If |number| is a multiple of |size|, returns |number|, otherwise
returns |number| + |size|.
"""
remainder = number % size
if remainder == 0:
return number
return number + size - remainder
def round_to_pow2(number):
"""Rounds a number up to the next power of 2.
Arguments:
number: The number to round up.
Returns:
If |number| is already a power of 2 then |number| is
returned. Otherwise the smallest power of 2 greater than |number|
is returned.
"""
return 2**((number - 1).bit_length())
def encode_long(num_bits, value):
"""Encodes a long to a bytearray() using a given amount of bits.
This number is written big-endian, e.g. with the most significant
bit first.
This is the reverse of decode_long().
Arguments:
num_bits: The number of bits to write, e.g. 2048.
value: The value to write.
Returns:
A bytearray() with the encoded long.
"""
ret = bytearray()
for bit_pos in range(num_bits, 0, -8):
octet = (value >> (bit_pos - 8)) & 0xff
ret.extend(struct.pack('!B', octet))
return ret
def decode_long(blob):
"""Decodes a long from a bytearray() using a given amount of bits.
This number is expected to be in big-endian, e.g. with the most
significant bit first.
This is the reverse of encode_long().
Arguments:
blob: A bytearray() with the encoded long.
Returns:
The decoded value.
"""
ret = 0
for b in bytearray(blob):
ret *= 256
ret += b
return ret
def egcd(a, b):
"""Calculate greatest common divisor of two numbers.
This implementation uses a recursive version of the extended
Euclidian algorithm.
Arguments:
a: First number.
b: Second number.
Returns:
A tuple (gcd, x, y) that where |gcd| is the greatest common
divisor of |a| and |b| and |a|*|x| + |b|*|y| = |gcd|.
"""
if a == 0:
return (b, 0, 1)
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
def modinv(a, m):
"""Calculate modular multiplicative inverse of |a| modulo |m|.
This calculates the number |x| such that |a| * |x| == 1 (modulo
|m|). This number only exists if |a| and |m| are co-prime - |None|
is returned if this isn't true.
Arguments:
a: The number to calculate a modular inverse of.
m: The modulo to use.
Returns:
The modular multiplicative inverse of |a| and |m| or |None| if
these numbers are not co-prime.
"""
gcd, x, _ = egcd(a, m)
if gcd != 1:
return None # modular inverse does not exist
return x % m
def parse_number(string):
"""Parse a string as a number.
This is just a short-hand for int(string, 0) suitable for use in the
|type| parameter of |ArgumentParser|'s add_argument() function. An
improvement to just using type=int is that this function supports
numbers in other bases, e.g. "0x1234".
Arguments:
string: The string to parse.
Returns:
The parsed integer.
Raises:
ValueError: If the number could not be parsed.
"""
return int(string, 0)
class RSAPublicKey(object):
"""Data structure used for a RSA public key.
Attributes:
exponent: The key exponent.
modulus: The key modulus.
num_bits: The key size.
"""
MODULUS_PREFIX = b'modulus='
def __init__(self, key_path):
"""Loads and parses an RSA key from either a private or public key file.
Arguments:
key_path: The path to a key file.
Raises:
AvbError: If RSA key parameters could not be read from file.
"""
# We used to have something as simple as this:
#
# key = Crypto.PublicKey.RSA.importKey(open(key_path).read())
# self.exponent = key.e
# self.modulus = key.n
# self.num_bits = key.size() + 1
#
# but unfortunately PyCrypto is not available in the builder. So
# instead just parse openssl(1) output to get this
# information. It's ugly but...
args = ['openssl', 'rsa', '-in', key_path, '-modulus', '-noout']
p = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(pout, perr) = p.communicate()
if p.wait() != 0:
# Could be just a public key is passed, try that.
args.append('-pubin')
p = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(pout, perr) = p.communicate()
if p.wait() != 0:
raise AvbError('Error getting public key: {}'.format(perr))
if not pout.lower().startswith(self.MODULUS_PREFIX):
raise AvbError('Unexpected modulus output')
modulus_hexstr = pout[len(self.MODULUS_PREFIX):]
# The exponent is assumed to always be 65537 and the number of
# bits can be derived from the modulus by rounding up to the
# nearest power of 2.
self.key_path = key_path
self.modulus = int(modulus_hexstr, 16)
self.num_bits = round_to_pow2(int(math.ceil(math.log(self.modulus, 2))))
self.exponent = 65537
def encode(self):
"""Encodes the public RSA key in |AvbRSAPublicKeyHeader| format.
This creates a |AvbRSAPublicKeyHeader| as well as the two large
numbers (|key_num_bits| bits long) following it.
Returns:
The |AvbRSAPublicKeyHeader| followed by two large numbers as bytes.
Raises:
AvbError: If given RSA key exponent is not 65537.
"""
if self.exponent != 65537:
raise AvbError('Only RSA keys with exponent 65537 are supported.')
ret = bytearray()
# Calculate n0inv = -1/n[0] (mod 2^32)
b = 2 ** 32
n0inv = b - modinv(self.modulus, b)
# Calculate rr = r^2 (mod N), where r = 2^(# of key bits)
r = 2 ** self.modulus.bit_length()
rrmodn = r * r % self.modulus
ret.extend(struct.pack('!II', self.num_bits, n0inv))
ret.extend(encode_long(self.num_bits, self.modulus))
ret.extend(encode_long(self.num_bits, rrmodn))
return bytes(ret)
def sign(self, algorithm_name, data_to_sign, signing_helper=None,
signing_helper_with_files=None):
"""Sign given data using |signing_helper| or openssl.
openssl is used if neither the parameters signing_helper nor
signing_helper_with_files are given.
Arguments:
algorithm_name: The algorithm name as per the ALGORITHMS dict.
data_to_sign: Data to sign as bytes or bytearray.
signing_helper: Program which signs a hash and returns the signature.
signing_helper_with_files: Same as signing_helper but uses files instead.
Returns:
The signature as bytes.
Raises:
AvbError: If an error occurred during signing.
"""
# Checks requested algorithm for validity.
algorithm = ALGORITHMS.get(algorithm_name)
if not algorithm:
raise AvbError('Algorithm with name {} is not supported.'
.format(algorithm_name))
if self.num_bits != (algorithm.signature_num_bytes * 8):
raise AvbError('Key size of key ({} bits) does not match key size '
'({} bits) of given algorithm {}.'
.format(self.num_bits, algorithm.signature_num_bytes * 8,
algorithm_name))
# Hashes the data.
hasher = hashlib.new(algorithm.hash_name)
hasher.update(data_to_sign)
digest = hasher.digest()
# Calculates the signature.
padding_and_hash = algorithm.padding + digest
p = None
if signing_helper_with_files is not None:
with tempfile.NamedTemporaryFile() as signing_file:
signing_file.write(padding_and_hash)
signing_file.flush()
p = subprocess.Popen([signing_helper_with_files, algorithm_name,
self.key_path, signing_file.name])
retcode = p.wait()
if retcode != 0:
raise AvbError('Error signing')
signing_file.seek(0)
signature = signing_file.read()
else:
if signing_helper is not None:
p = subprocess.Popen(
[signing_helper, algorithm_name, self.key_path],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
p = subprocess.Popen(
['openssl', 'rsautl', '-sign', '-inkey', self.key_path, '-raw'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(pout, perr) = p.communicate(padding_and_hash)
retcode = p.wait()
if retcode != 0:
raise AvbError('Error signing: {}'.format(perr))
signature = pout
if len(signature) != algorithm.signature_num_bytes:
raise AvbError('Error signing: Invalid length of signature')
return signature
def lookup_algorithm_by_type(alg_type):
"""Looks up algorithm by type.
Arguments:
alg_type: The integer representing the type.
Returns:
A tuple with the algorithm name and an |Algorithm| instance.
Raises:
Exception: If the algorithm cannot be found
"""
for alg_name in ALGORITHMS:
alg_data = ALGORITHMS[alg_name]
if alg_data.algorithm_type == alg_type:
return (alg_name, alg_data)
raise AvbError('Unknown algorithm type {}'.format(alg_type))
def lookup_hash_size_by_type(alg_type):
"""Looks up hash size by type.
Arguments:
alg_type: The integer representing the type.
Returns:
The corresponding hash size.
Raises:
AvbError: If the algorithm cannot be found.
"""
for alg_name in ALGORITHMS:
alg_data = ALGORITHMS[alg_name]
if alg_data.algorithm_type == alg_type:
return alg_data.hash_num_bytes
raise AvbError('Unsupported algorithm type {}'.format(alg_type))
def verify_vbmeta_signature(vbmeta_header, vbmeta_blob):
"""Checks that signature in a vbmeta blob was made by the embedded public key.
Arguments:
vbmeta_header: A AvbVBMetaHeader.
vbmeta_blob: The whole vbmeta blob, including the header as bytes or
bytearray.
Returns:
True if the signature is valid and corresponds to the embedded
public key. Also returns True if the vbmeta blob is not signed.
Raises:
AvbError: If there errors calling out to openssl command during
signature verification.
"""
(_, alg) = lookup_algorithm_by_type(vbmeta_header.algorithm_type)
if not alg.hash_name:
return True
header_blob = vbmeta_blob[0:256]
auth_offset = 256
aux_offset = auth_offset + vbmeta_header.authentication_data_block_size
aux_size = vbmeta_header.auxiliary_data_block_size
aux_blob = vbmeta_blob[aux_offset:aux_offset + aux_size]
pubkey_offset = aux_offset + vbmeta_header.public_key_offset
pubkey_size = vbmeta_header.public_key_size
pubkey_blob = vbmeta_blob[pubkey_offset:pubkey_offset + pubkey_size]
digest_offset = auth_offset + vbmeta_header.hash_offset
digest_size = vbmeta_header.hash_size
digest_blob = vbmeta_blob[digest_offset:digest_offset + digest_size]
sig_offset = auth_offset + vbmeta_header.signature_offset
sig_size = vbmeta_header.signature_size
sig_blob = vbmeta_blob[sig_offset:sig_offset + sig_size]
# Now that we've got the stored digest, public key, and signature
# all we need to do is to verify. This is the exactly the same
# steps as performed in the avb_vbmeta_image_verify() function in
# libavb/avb_vbmeta_image.c.
ha = hashlib.new(alg.hash_name)
ha.update(header_blob)
ha.update(aux_blob)
computed_digest = ha.digest()
if computed_digest != digest_blob:
return False
padding_and_digest = alg.padding + computed_digest
(num_bits,) = struct.unpack('!I', pubkey_blob[0:4])
modulus_blob = pubkey_blob[8:8 + num_bits//8]
modulus = decode_long(modulus_blob)
exponent = 65537
# We used to have this:
#
# import Crypto.PublicKey.RSA
# key = Crypto.PublicKey.RSA.construct((modulus, long(exponent)))
# if not key.verify(decode_long(padding_and_digest),
# (decode_long(sig_blob), None)):
# return False
# return True
#
# but since 'avbtool verify_image' is used on the builders we don't want
# to rely on Crypto.PublicKey.RSA. Instead just use openssl(1) to verify.
asn1_str = ('asn1=SEQUENCE:pubkeyinfo\n'
'\n'
'[pubkeyinfo]\n'
'algorithm=SEQUENCE:rsa_alg\n'
'pubkey=BITWRAP,SEQUENCE:rsapubkey\n'
'\n'
'[rsa_alg]\n'
'algorithm=OID:rsaEncryption\n'
'parameter=NULL\n'
'\n'
'[rsapubkey]\n'
'n=INTEGER:{}\n'
'e=INTEGER:{}\n').format(hex(modulus).rstrip('L'),
hex(exponent).rstrip('L'))
with tempfile.NamedTemporaryFile() as asn1_tmpfile:
asn1_tmpfile.write(asn1_str.encode('ascii'))
asn1_tmpfile.flush()
with tempfile.NamedTemporaryFile() as der_tmpfile:
p = subprocess.Popen(
['openssl', 'asn1parse', '-genconf', asn1_tmpfile.name, '-out',
der_tmpfile.name, '-noout'])
retcode = p.wait()
if retcode != 0:
raise AvbError('Error generating DER file')
p = subprocess.Popen(
['openssl', 'rsautl', '-verify', '-pubin', '-inkey', der_tmpfile.name,
'-keyform', 'DER', '-raw'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(pout, perr) = p.communicate(sig_blob)
retcode = p.wait()
if retcode != 0:
raise AvbError('Error verifying data: {}'.format(perr))
if pout != padding_and_digest:
sys.stderr.write('Signature not correct\n')
return False
return True
class ImageChunk(object):
"""Data structure used for representing chunks in Android sparse files.
Attributes:
chunk_type: One of TYPE_RAW, TYPE_FILL, or TYPE_DONT_CARE.
chunk_offset: Offset in the sparse file where this chunk begins.
output_offset: Offset in de-sparsified file where output begins.
output_size: Number of bytes in output.
input_offset: Offset in sparse file for data if TYPE_RAW otherwise None.
fill_data: Blob with data to fill if TYPE_FILL otherwise None.
"""
FORMAT = '<2H2I'
TYPE_RAW = 0xcac1
TYPE_FILL = 0xcac2
TYPE_DONT_CARE = 0xcac3
TYPE_CRC32 = 0xcac4
def __init__(self, chunk_type, chunk_offset, output_offset, output_size,
input_offset, fill_data):
"""Initializes an ImageChunk object.
Arguments:
chunk_type: One of TYPE_RAW, TYPE_FILL, or TYPE_DONT_CARE.
chunk_offset: Offset in the sparse file where this chunk begins.
output_offset: Offset in de-sparsified file.
output_size: Number of bytes in output.
input_offset: Offset in sparse file if TYPE_RAW otherwise None.
fill_data: Blob as bytes with data to fill if TYPE_FILL otherwise None.
Raises:
ValueError: If given chunk parameters are invalid.
"""
self.chunk_type = chunk_type
self.chunk_offset = chunk_offset
self.output_offset = output_offset
self.output_size = output_size
self.input_offset = input_offset
self.fill_data = fill_data
# Check invariants.
if self.chunk_type == self.TYPE_RAW:
if self.fill_data is not None:
raise ValueError('RAW chunk cannot have fill_data set.')
if not self.input_offset:
raise ValueError('RAW chunk must have input_offset set.')
elif self.chunk_type == self.TYPE_FILL:
if self.fill_data is None:
raise ValueError('FILL chunk must have fill_data set.')
if self.input_offset:
raise ValueError('FILL chunk cannot have input_offset set.')
elif self.chunk_type == self.TYPE_DONT_CARE:
if self.fill_data is not None:
raise ValueError('DONT_CARE chunk cannot have fill_data set.')
if self.input_offset:
raise ValueError('DONT_CARE chunk cannot have input_offset set.')
else:
raise ValueError('Invalid chunk type')
class ImageHandler(object):
"""Abstraction for image I/O with support for Android sparse images.
This class provides an interface for working with image files that
may be using the Android Sparse Image format. When an instance is
constructed, we test whether it's an Android sparse file. If so,
operations will be on the sparse file by interpreting the sparse
format, otherwise they will be directly on the file. Either way the
operations do the same.
For reading, this interface mimics a file object - it has seek(),
tell(), and read() methods. For writing, only truncation
(truncate()) and appending is supported (append_raw() and
append_dont_care()). Additionally, data can only be written in units
of the block size.
Attributes:
filename: Name of file.
is_sparse: Whether the file being operated on is sparse.
block_size: The block size, typically 4096.
image_size: The size of the unsparsified file.
"""
# See system/core/libsparse/sparse_format.h for details.
MAGIC = 0xed26ff3a
HEADER_FORMAT = '<I4H4I'
# These are formats and offset of just the |total_chunks| and
# |total_blocks| fields.
NUM_CHUNKS_AND_BLOCKS_FORMAT = '<II'
NUM_CHUNKS_AND_BLOCKS_OFFSET = 16
def __init__(self, image_filename, read_only=False):
"""Initializes an image handler.
Arguments:
image_filename: The name of the file to operate on.
read_only: True if file is only opened for read-only operations.
Raises:
ValueError: If data in the file is invalid.
"""
self.filename = image_filename
self._num_total_blocks = 0
self._num_total_chunks = 0
self._file_pos = 0
self._read_only = read_only
self._read_header()
def _read_header(self):
"""Initializes internal data structures used for reading file.
This may be called multiple times and is typically called after
modifying the file (e.g. appending, truncation).
Raises:
ValueError: If data in the file is invalid.
"""
self.is_sparse = False
self.block_size = 4096
self._file_pos = 0
if self._read_only:
self._image = open(self.filename, 'rb')
else:
self._image = open(self.filename, 'r+b')
self._image.seek(0, os.SEEK_END)
self.image_size = self._image.tell()
self._image.seek(0, os.SEEK_SET)
header_bin = self._image.read(struct.calcsize(self.HEADER_FORMAT))
(magic, major_version, minor_version, file_hdr_sz, chunk_hdr_sz,
block_size, self._num_total_blocks, self._num_total_chunks,
_) = struct.unpack(self.HEADER_FORMAT, header_bin)
if magic != self.MAGIC:
# Not a sparse image, our job here is done.
return
if not (major_version == 1 and minor_version == 0):
raise ValueError('Encountered sparse image format version {}.{} but '
'only 1.0 is supported'.format(major_version,
minor_version))
if file_hdr_sz != struct.calcsize(self.HEADER_FORMAT):
raise ValueError('Unexpected file_hdr_sz value {}.'.
format(file_hdr_sz))
if chunk_hdr_sz != struct.calcsize(ImageChunk.FORMAT):
raise ValueError('Unexpected chunk_hdr_sz value {}.'.
format(chunk_hdr_sz))
self.block_size = block_size
# Build an list of chunks by parsing the file.
self._chunks = []
# Find the smallest offset where only "Don't care" chunks
# follow. This will be the size of the content in the sparse
# image.
offset = 0
output_offset = 0
for _ in range(1, self._num_total_chunks + 1):
chunk_offset = self._image.tell()
header_bin = self._image.read(struct.calcsize(ImageChunk.FORMAT))
(chunk_type, _, chunk_sz, total_sz) = struct.unpack(ImageChunk.FORMAT,
header_bin)
data_sz = total_sz - struct.calcsize(ImageChunk.FORMAT)
if chunk_type == ImageChunk.TYPE_RAW:
if data_sz != (chunk_sz * self.block_size):
raise ValueError('Raw chunk input size ({}) does not match output '
'size ({})'.
format(data_sz, chunk_sz*self.block_size))
self._chunks.append(ImageChunk(ImageChunk.TYPE_RAW,
chunk_offset,
output_offset,
chunk_sz*self.block_size,
self._image.tell(),
None))
self._image.seek(data_sz, os.SEEK_CUR)
elif chunk_type == ImageChunk.TYPE_FILL:
if data_sz != 4:
raise ValueError('Fill chunk should have 4 bytes of fill, but this '
'has {}'.format(data_sz))
fill_data = self._image.read(4)
self._chunks.append(ImageChunk(ImageChunk.TYPE_FILL,
chunk_offset,
output_offset,
chunk_sz*self.block_size,
None,
fill_data))
elif chunk_type == ImageChunk.TYPE_DONT_CARE:
if data_sz != 0:
raise ValueError('Don\'t care chunk input size is non-zero ({})'.
format(data_sz))
self._chunks.append(ImageChunk(ImageChunk.TYPE_DONT_CARE,
chunk_offset,
output_offset,
chunk_sz*self.block_size,
None,
None))
elif chunk_type == ImageChunk.TYPE_CRC32:
if data_sz != 4:
raise ValueError('CRC32 chunk should have 4 bytes of CRC, but '
'this has {}'.format(data_sz))
self._image.read(4)
else:
raise ValueError('Unknown chunk type {}'.format(chunk_type))
offset += chunk_sz
output_offset += chunk_sz*self.block_size
# Record where sparse data end.
self._sparse_end = self._image.tell()
# Now that we've traversed all chunks, sanity check.
if self._num_total_blocks != offset:
raise ValueError('The header said we should have {} output blocks, '
'but we saw {}'.format(self._num_total_blocks, offset))
junk_len = len(self._image.read())
if junk_len > 0:
raise ValueError('There were {} bytes of extra data at the end of the '
'file.'.format(junk_len))
# Assign |image_size|.
self.image_size = output_offset
# This is used when bisecting in read() to find the initial slice.
self._chunk_output_offsets = [i.output_offset for i in self._chunks]
self.is_sparse = True
def _update_chunks_and_blocks(self):
"""Helper function to update the image header.
The the |total_chunks| and |total_blocks| fields in the header
will be set to value of the |_num_total_blocks| and
|_num_total_chunks| attributes.
"""
self._image.seek(self.NUM_CHUNKS_AND_BLOCKS_OFFSET, os.SEEK_SET)
self._image.write(struct.pack(self.NUM_CHUNKS_AND_BLOCKS_FORMAT,
self._num_total_blocks,
self._num_total_chunks))
def append_dont_care(self, num_bytes):
"""Appends a DONT_CARE chunk to the sparse file.
The given number of bytes must be a multiple of the block size.
Arguments:
num_bytes: Size in number of bytes of the DONT_CARE chunk.
Raises
OSError: If ImageHandler was initialized in read-only mode.
"""
assert num_bytes % self.block_size == 0
if self._read_only:
raise OSError('ImageHandler is in read-only mode.')
if not self.is_sparse:
self._image.seek(0, os.SEEK_END)
# This is more efficient that writing NUL bytes since it'll add
# a hole on file systems that support sparse files (native
# sparse, not Android sparse).
self._image.truncate(self._image.tell() + num_bytes)
self._read_header()
return
self._num_total_chunks += 1
self._num_total_blocks += num_bytes // self.block_size
self._update_chunks_and_blocks()
self._image.seek(self._sparse_end, os.SEEK_SET)
self._image.write(struct.pack(ImageChunk.FORMAT,
ImageChunk.TYPE_DONT_CARE,
0, # Reserved
num_bytes // self.block_size,
struct.calcsize(ImageChunk.FORMAT)))
self._read_header()
def append_raw(self, data):
"""Appends a RAW chunk to the sparse file.
The length of the given data must be a multiple of the block size.
Arguments:
data: Data to append as bytes.
Raises
OSError: If ImageHandler was initialized in read-only mode.
"""
assert len(data) % self.block_size == 0
if self._read_only:
raise OSError('ImageHandler is in read-only mode.')
if not self.is_sparse:
self._image.seek(0, os.SEEK_END)
self._image.write(data)
self._read_header()
return
self._num_total_chunks += 1
self._num_total_blocks += len(data) // self.block_size
self._update_chunks_and_blocks()
self._image.seek(self._sparse_end, os.SEEK_SET)
self._image.write(struct.pack(ImageChunk.FORMAT,
ImageChunk.TYPE_RAW,
0, # Reserved
len(data) // self.block_size,
len(data) +
struct.calcsize(ImageChunk.FORMAT)))
self._image.write(data)
self._read_header()
def append_fill(self, fill_data, size):
"""Appends a fill chunk to the sparse file.
The total length of the fill data must be a multiple of the block size.
Arguments:
fill_data: Fill data to append - must be four bytes.
size: Number of chunk - must be a multiple of four and the block size.
Raises
OSError: If ImageHandler was initialized in read-only mode.
"""
assert len(fill_data) == 4
assert size % 4 == 0
assert size % self.block_size == 0
if self._read_only:
raise OSError('ImageHandler is in read-only mode.')
if not self.is_sparse:
self._image.seek(0, os.SEEK_END)
self._image.write(fill_data * (size//4))
self._read_header()
return
self._num_total_chunks += 1
self._num_total_blocks += size // self.block_size
self._update_chunks_and_blocks()
self._image.seek(self._sparse_end, os.SEEK_SET)
self._image.write(struct.pack(ImageChunk.FORMAT,
ImageChunk.TYPE_FILL,
0, # Reserved
size // self.block_size,
4 + struct.calcsize(ImageChunk.FORMAT)))
self._image.write(fill_data)
self._read_header()
def seek(self, offset):
"""Sets the cursor position for reading from unsparsified file.
Arguments:
offset: Offset to seek to from the beginning of the file.
Raises:
RuntimeError: If the given offset is negative.
"""
if offset < 0:
raise RuntimeError('Seeking with negative offset: {}'.format(offset))
self._file_pos = offset
def read(self, size):
"""Reads data from the unsparsified file.
This method may return fewer than |size| bytes of data if the end
of the file was encountered.
The file cursor for reading is advanced by the number of bytes
read.
Arguments:
size: Number of bytes to read.
Returns:
The data as bytes.
"""
if not self.is_sparse:
self._image.seek(self._file_pos)
data = self._image.read(size)
self._file_pos += len(data)
return data
# Iterate over all chunks.
chunk_idx = bisect.bisect_right(self._chunk_output_offsets,
self._file_pos) - 1
data = bytearray()
to_go = size
while to_go > 0:
chunk = self._chunks[chunk_idx]
chunk_pos_offset = self._file_pos - chunk.output_offset
chunk_pos_to_go = min(chunk.output_size - chunk_pos_offset, to_go)
if chunk.chunk_type == ImageChunk.TYPE_RAW:
self._image.seek(chunk.input_offset + chunk_pos_offset)
data.extend(self._image.read(chunk_pos_to_go))
elif chunk.chunk_type == ImageChunk.TYPE_FILL:
all_data = chunk.fill_data*(chunk_pos_to_go // len(chunk.fill_data) + 2)
offset_mod = chunk_pos_offset % len(chunk.fill_data)
data.extend(all_data[offset_mod:(offset_mod + chunk_pos_to_go)])
else:
assert chunk.chunk_type == ImageChunk.TYPE_DONT_CARE
data.extend(b'\0' * chunk_pos_to_go)
to_go -= chunk_pos_to_go
self._file_pos += chunk_pos_to_go
chunk_idx += 1
# Generate partial read in case of EOF.
if chunk_idx >= len(self._chunks):
break
return bytes(data)
def tell(self):
"""Returns the file cursor position for reading from unsparsified file.
Returns:
The file cursor position for reading.
"""
return self._file_pos
def truncate(self, size):
"""Truncates the unsparsified file.
Arguments:
size: Desired size of unsparsified file.
Raises:
ValueError: If desired size isn't a multiple of the block size.
OSError: If ImageHandler was initialized in read-only mode.
"""
if self._read_only:
raise OSError('ImageHandler is in read-only mode.')
if not self.is_sparse:
self._image.truncate(size)
self._read_header()
return
if size % self.block_size != 0:
raise ValueError('Cannot truncate to a size which is not a multiple '
'of the block size')
if size == self.image_size:
# Trivial where there's nothing to do.
return
if size < self.image_size:
chunk_idx = bisect.bisect_right(self._chunk_output_offsets, size) - 1
chunk = self._chunks[chunk_idx]
if chunk.output_offset != size:
# Truncation in the middle of a trunk - need to keep the chunk
# and modify it.
chunk_idx_for_update = chunk_idx + 1
num_to_keep = size - chunk.output_offset
assert num_to_keep % self.block_size == 0
if chunk.chunk_type == ImageChunk.TYPE_RAW:
truncate_at = (chunk.chunk_offset +
struct.calcsize(ImageChunk.FORMAT) + num_to_keep)
data_sz = num_to_keep
elif chunk.chunk_type == ImageChunk.TYPE_FILL:
truncate_at = (chunk.chunk_offset +
struct.calcsize(ImageChunk.FORMAT) + 4)
data_sz = 4
else:
assert chunk.chunk_type == ImageChunk.TYPE_DONT_CARE
truncate_at = chunk.chunk_offset + struct.calcsize(ImageChunk.FORMAT)
data_sz = 0
chunk_sz = num_to_keep // self.block_size
total_sz = data_sz + struct.calcsize(ImageChunk.FORMAT)
self._image.seek(chunk.chunk_offset)
self._image.write(struct.pack(ImageChunk.FORMAT,
chunk.chunk_type,
0, # Reserved
chunk_sz,
total_sz))
chunk.output_size = num_to_keep
else:
# Truncation at trunk boundary.
truncate_at = chunk.chunk_offset
chunk_idx_for_update = chunk_idx
self._num_total_chunks = chunk_idx_for_update
self._num_total_blocks = 0
for i in range(0, chunk_idx_for_update):
self._num_total_blocks += self._chunks[i].output_size // self.block_size
self._update_chunks_and_blocks()
self._image.truncate(truncate_at)
# We've modified the file so re-read all data.
self._read_header()
else:
# Truncating to grow - just add a DONT_CARE section.
self.append_dont_care(size - self.image_size)
class AvbDescriptor(object):
"""Class for AVB descriptor.
See the |AvbDescriptor| C struct for more information.
Attributes:
tag: The tag identifying what kind of descriptor this is.
data: The data in the descriptor.
"""
SIZE = 16
FORMAT_STRING = ('!QQ') # tag, num_bytes_following (descriptor header)
def __init__(self, data):
"""Initializes a new property descriptor.
Arguments:
data: If not None, must be a bytearray().
Raises:
LookupError: If the given descriptor is malformed.
"""
assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
if data:
(self.tag, num_bytes_following) = (
struct.unpack(self.FORMAT_STRING, data[0:self.SIZE]))
self.data = data[self.SIZE:self.SIZE + num_bytes_following]
else:
self.tag = None
self.data = None
def print_desc(self, o):
"""Print the descriptor.
Arguments:
o: The object to write the output to.
"""
o.write(' Unknown descriptor:\n')
o.write(' Tag: {}\n'.format(self.tag))
if len(self.data) < 256:
o.write(' Data: {} ({} bytes)\n'.format(
repr(str(self.data)), len(self.data)))
else:
o.write(' Data: {} bytes\n'.format(len(self.data)))
def encode(self):
"""Serializes the descriptor.
Returns:
A bytearray() with the descriptor data.
"""
num_bytes_following = len(self.data)
nbf_with_padding = round_to_multiple(num_bytes_following, 8)
padding_size = nbf_with_padding - num_bytes_following
desc = struct.pack(self.FORMAT_STRING, self.tag, nbf_with_padding)
padding = struct.pack(str(padding_size) + 'x')
ret = desc + self.data + padding
return bytearray(ret)
def verify(self, image_dir, image_ext, expected_chain_partitions_map,
image_containing_descriptor, accept_zeroed_hashtree):
"""Verifies contents of the descriptor - used in verify_image sub-command.
Arguments:
image_dir: The directory of the file being verified.
image_ext: The extension of the file being verified (e.g. '.img').
expected_chain_partitions_map: A map from partition name to the
tuple (rollback_index_location, key_blob).
image_containing_descriptor: The image the descriptor is in.
accept_zeroed_hashtree: If True, don't fail if hashtree or FEC data is
zeroed out.
Returns:
True if the descriptor verifies, False otherwise.
"""
# Deletes unused parameters to prevent pylint warning unused-argument.
del image_dir, image_ext, expected_chain_partitions_map
del image_containing_descriptor, accept_zeroed_hashtree
# Nothing to do.
return True
class AvbPropertyDescriptor(AvbDescriptor):
"""A class for property descriptors.
See the |AvbPropertyDescriptor| C struct for more information.
Attributes:
key: The key as string.
value: The value as bytes.
"""
TAG = 0
SIZE = 32
FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header)
'Q' # key size (bytes)
'Q') # value size (bytes)
def __init__(self, data=None):
"""Initializes a new property descriptor.
Arguments:
data: If not None, must be as bytes of size |SIZE|.
Raises:
LookupError: If the given descriptor is malformed.
"""
super(AvbPropertyDescriptor, self).__init__(None)
assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
if data:
(tag, num_bytes_following, key_size,
value_size) = struct.unpack(self.FORMAT_STRING, data[0:self.SIZE])
expected_size = round_to_multiple(
self.SIZE - 16 + key_size + 1 + value_size + 1, 8)
if tag != self.TAG or num_bytes_following != expected_size:
raise LookupError('Given data does not look like a property '
'descriptor.')
try:
self.key = data[self.SIZE:(self.SIZE + key_size)].decode('utf-8')
except UnicodeDecodeError as e:
raise LookupError('Key cannot be decoded as UTF-8: {}.'.format(e))
self.value = data[(self.SIZE + key_size + 1):(self.SIZE + key_size + 1 +
value_size)]
else:
self.key = ''
self.value = b''
def print_desc(self, o):
"""Print the descriptor.
Arguments:
o: The object to write the output to.
"""
# Go forward with python 3, bytes are represented with the 'b' prefix,
# e.g. b'foobar'. Thus, we trim off the 'b' to keep the print output
# the same between python 2 and python 3.
printable_value = repr(self.value)
if printable_value.startswith('b\''):
printable_value = printable_value[1:]
if len(self.value) < 256:
o.write(' Prop: {} -> {}\n'.format(self.key, printable_value))
else:
o.write(' Prop: {} -> ({} bytes)\n'.format(self.key, len(self.value)))
def encode(self):
"""Serializes the descriptor.
Returns:
The descriptor data as bytes.
"""
key_encoded = self.key.encode('utf-8')
num_bytes_following = (
self.SIZE + len(key_encoded) + len(self.value) + 2 - 16)
nbf_with_padding = round_to_multiple(num_bytes_following, 8)
padding_size = nbf_with_padding - num_bytes_following
desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding,
len(key_encoded), len(self.value))
ret = (desc + key_encoded + b'\0' + self.value + b'\0' +
padding_size * b'\0')
return ret
def verify(self, image_dir, image_ext, expected_chain_partitions_map,
image_containing_descriptor, accept_zeroed_hashtree):
"""Verifies contents of the descriptor - used in verify_image sub-command.
Arguments:
image_dir: The directory of the file being verified.
image_ext: The extension of the file being verified (e.g. '.img').
expected_chain_partitions_map: A map from partition name to the
tuple (rollback_index_location, key_blob).
image_containing_descriptor: The image the descriptor is in.
accept_zeroed_hashtree: If True, don't fail if hashtree or FEC data is
zeroed out.
Returns:
True if the descriptor verifies, False otherwise.
"""
# Nothing to do.
return True
class AvbHashtreeDescriptor(AvbDescriptor):
"""A class for hashtree descriptors.
See the |AvbHashtreeDescriptor| C struct for more information.
Attributes:
dm_verity_version: dm-verity version used.
image_size: Size of the image, after rounding up to |block_size|.
tree_offset: Offset of the hash tree in the file.
tree_size: Size of the tree.
data_block_size: Data block size.
hash_block_size: Hash block size.
fec_num_roots: Number of roots used for FEC (0 if FEC is not used).
fec_offset: Offset of FEC data (0 if FEC is not used).
fec_size: Size of FEC data (0 if FEC is not used).
hash_algorithm: Hash algorithm used as string.
partition_name: Partition name as string.
salt: Salt used as bytes.
root_digest: Root digest as bytes.
flags: Descriptor flags (see avb_hashtree_descriptor.h).
"""
TAG = 1
RESERVED = 60
SIZE = 120 + RESERVED
FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header)
'L' # dm-verity version used
'Q' # image size (bytes)
'Q' # tree offset (bytes)
'Q' # tree size (bytes)
'L' # data block size (bytes)
'L' # hash block size (bytes)
'L' # FEC number of roots
'Q' # FEC offset (bytes)
'Q' # FEC size (bytes)
'32s' # hash algorithm used
'L' # partition name (bytes)
'L' # salt length (bytes)
'L' # root digest length (bytes)
'L' + # flags
str(RESERVED) + 's') # reserved
def __init__(self, data=None):
"""Initializes a new hashtree descriptor.
Arguments:
data: If not None, must be bytes of size |SIZE|.
Raises:
LookupError: If the given descriptor is malformed.
"""
super(AvbHashtreeDescriptor, self).__init__(None)
assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
if data:
(tag, num_bytes_following, self.dm_verity_version, self.image_size,
self.tree_offset, self.tree_size, self.data_block_size,
self.hash_block_size, self.fec_num_roots, self.fec_offset, self.fec_size,
self.hash_algorithm, partition_name_len, salt_len,
root_digest_len, self.flags, _) = struct.unpack(self.FORMAT_STRING,
data[0:self.SIZE])
expected_size = round_to_multiple(
self.SIZE - 16 + partition_name_len + salt_len + root_digest_len, 8)
if tag != self.TAG or num_bytes_following != expected_size:
raise LookupError('Given data does not look like a hashtree '
'descriptor.')
# Nuke NUL-bytes at the end.
self.hash_algorithm = self.hash_algorithm.rstrip(b'\0').decode('ascii')
o = 0
try:
self.partition_name = data[
(self.SIZE + o):(self.SIZE + o + partition_name_len)
].decode('utf-8')
except UnicodeDecodeError as e:
raise LookupError('Partition name cannot be decoded as UTF-8: {}.'
.format(e))
o += partition_name_len
self.salt = data[(self.SIZE + o):(self.SIZE + o + salt_len)]
o += salt_len
self.root_digest = data[(self.SIZE + o):(self.SIZE + o + root_digest_len)]
if root_digest_len != len(hashlib.new(self.hash_algorithm).digest()):
if root_digest_len != 0:
raise LookupError('root_digest_len doesn\'t match hash algorithm')
else:
self.dm_verity_version = 0
self.image_size = 0
self.tree_offset = 0
self.tree_size = 0
self.data_block_size = 0
self.hash_block_size = 0
self.fec_num_roots = 0
self.fec_offset = 0
self.fec_size = 0
self.hash_algorithm = ''
self.partition_name = ''
self.salt = b''
self.root_digest = b''
self.flags = 0
def print_desc(self, o):
"""Print the descriptor.
Arguments:
o: The object to write the output to.
"""
o.write(' Hashtree descriptor:\n')
o.write(' Version of dm-verity: {}\n'.format(self.dm_verity_version))
o.write(' Image Size: {} bytes\n'.format(self.image_size))
o.write(' Tree Offset: {}\n'.format(self.tree_offset))
o.write(' Tree Size: {} bytes\n'.format(self.tree_size))
o.write(' Data Block Size: {} bytes\n'.format(
self.data_block_size))
o.write(' Hash Block Size: {} bytes\n'.format(
self.hash_block_size))
o.write(' FEC num roots: {}\n'.format(self.fec_num_roots))
o.write(' FEC offset: {}\n'.format(self.fec_offset))
o.write(' FEC size: {} bytes\n'.format(self.fec_size))
o.write(' Hash Algorithm: {}\n'.format(self.hash_algorithm))
o.write(' Partition Name: {}\n'.format(self.partition_name))
o.write(' Salt: {}\n'.format(self.salt.hex()))
o.write(' Root Digest: {}\n'.format(self.root_digest.hex()))
o.write(' Flags: {}\n'.format(self.flags))
def encode(self):
"""Serializes the descriptor.
Returns:
The descriptor data as bytes.
"""
hash_algorithm_encoded = self.hash_algorithm.encode('ascii')
partition_name_encoded = self.partition_name.encode('utf-8')
num_bytes_following = (self.SIZE + len(partition_name_encoded)
+ len(self.salt) + len(self.root_digest) - 16)
nbf_with_padding = round_to_multiple(num_bytes_following, 8)
padding_size = nbf_with_padding - num_bytes_following
desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding,
self.dm_verity_version, self.image_size,
self.tree_offset, self.tree_size, self.data_block_size,
self.hash_block_size, self.fec_num_roots,
self.fec_offset, self.fec_size, hash_algorithm_encoded,
len(partition_name_encoded), len(self.salt),
len(self.root_digest), self.flags, self.RESERVED * b'\0')
ret = (desc + partition_name_encoded + self.salt + self.root_digest +
padding_size * b'\0')
return ret
def verify(self, image_dir, image_ext, expected_chain_partitions_map,
image_containing_descriptor, accept_zeroed_hashtree):
"""Verifies contents of the descriptor - used in verify_image sub-command.
Arguments:
image_dir: The directory of the file being verified.
image_ext: The extension of the file being verified (e.g. '.img').
expected_chain_partitions_map: A map from partition name to the
tuple (rollback_index_location, key_blob).
image_containing_descriptor: The image the descriptor is in.
accept_zeroed_hashtree: If True, don't fail if hashtree or FEC data is
zeroed out.
Returns:
True if the descriptor verifies, False otherwise.
"""
if not self.partition_name:
image_filename = image_containing_descriptor.filename
image = image_containing_descriptor
else:
image_filename = os.path.join(image_dir, self.partition_name + image_ext)
image = ImageHandler(image_filename, read_only=True)
# Generate the hashtree and checks that it matches what's in the file.
digest_size = len(hashlib.new(self.hash_algorithm).digest())
digest_padding = round_to_pow2(digest_size) - digest_size
(hash_level_offsets, tree_size) = calc_hash_level_offsets(
self.image_size, self.data_block_size, digest_size + digest_padding)
root_digest, hash_tree = generate_hash_tree(image, self.image_size,
self.data_block_size,
self.hash_algorithm, self.salt,
digest_padding,
hash_level_offsets,
tree_size)
# The root digest must match unless it is not embedded in the descriptor.
if self.root_digest and root_digest != self.root_digest:
sys.stderr.write('hashtree of {} does not match descriptor\n'.
format(image_filename))
return False
# ... also check that the on-disk hashtree matches
image.seek(self.tree_offset)
hash_tree_ondisk = image.read(self.tree_size)
is_zeroed = (self.tree_size == 0) or (hash_tree_ondisk[0:8] == b'ZeRoHaSH')
if is_zeroed and accept_zeroed_hashtree:
print('{}: skipping verification since hashtree is zeroed and '
'--accept_zeroed_hashtree was given'
.format(self.partition_name))
else:
if hash_tree != hash_tree_ondisk:
sys.stderr.write('hashtree of {} contains invalid data\n'.
format(image_filename))
return False
print('{}: Successfully verified {} hashtree of {} for image of {} bytes'
.format(self.partition_name, self.hash_algorithm, image.filename,
self.image_size))
# TODO(zeuthen): we could also verify that the FEC stored in the image is
# correct but this a) currently requires the 'fec' binary; and b) takes a
# long time; and c) is not strictly needed for verification purposes as
# we've already verified the root hash.
return True
class AvbHashDescriptor(AvbDescriptor):
"""A class for hash descriptors.
See the |AvbHashDescriptor| C struct for more information.
Attributes:
image_size: Image size, in bytes.
hash_algorithm: Hash algorithm used as string.
partition_name: Partition name as string.
salt: Salt used as bytes.
digest: The hash value of salt and data combined as bytes.
flags: The descriptor flags (see avb_hash_descriptor.h).
"""
TAG = 2
RESERVED = 60
SIZE = 72 + RESERVED
FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header)
'Q' # image size (bytes)
'32s' # hash algorithm used
'L' # partition name (bytes)
'L' # salt length (bytes)
'L' # digest length (bytes)
'L' + # flags
str(RESERVED) + 's') # reserved
def __init__(self, data=None):
"""Initializes a new hash descriptor.
Arguments:
data: If not None, must be bytes of size |SIZE|.
Raises:
LookupError: If the given descriptor is malformed.
"""
super(AvbHashDescriptor, self).__init__(None)
assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
if data:
(tag, num_bytes_following, self.image_size, self.hash_algorithm,
partition_name_len, salt_len,
digest_len, self.flags, _) = struct.unpack(self.FORMAT_STRING,
data[0:self.SIZE])
expected_size = round_to_multiple(
self.SIZE - 16 + partition_name_len + salt_len + digest_len, 8)
if tag != self.TAG or num_bytes_following != expected_size:
raise LookupError('Given data does not look like a hash descriptor.')
# Nuke NUL-bytes at the end.
self.hash_algorithm = self.hash_algorithm.rstrip(b'\0').decode('ascii')
o = 0
try:
self.partition_name = data[
(self.SIZE + o):(self.SIZE + o + partition_name_len)
].decode('utf-8')
except UnicodeDecodeError as e:
raise LookupError('Partition name cannot be decoded as UTF-8: {}.'
.format(e))
o += partition_name_len
self.salt = data[(self.SIZE + o):(self.SIZE + o + salt_len)]
o += salt_len
self.digest = data[(self.SIZE + o):(self.SIZE + o + digest_len)]
if digest_len != len(hashlib.new(self.hash_algorithm).digest()):
if digest_len != 0:
raise LookupError('digest_len doesn\'t match hash algorithm')
else:
self.image_size = 0
self.hash_algorithm = ''
self.partition_name = ''
self.salt = b''
self.digest = b''
self.flags = 0
def print_desc(self, o):
"""Print the descriptor.
Arguments:
o: The object to write the output to.
"""
o.write(' Hash descriptor:\n')
o.write(' Image Size: {} bytes\n'.format(self.image_size))
o.write(' Hash Algorithm: {}\n'.format(self.hash_algorithm))
o.write(' Partition Name: {}\n'.format(self.partition_name))
o.write(' Salt: {}\n'.format(self.salt.hex()))
o.write(' Digest: {}\n'.format(self.digest.hex()))
o.write(' Flags: {}\n'.format(self.flags))
def encode(self):
"""Serializes the descriptor.
Returns:
The descriptor data as bytes.
"""
hash_algorithm_encoded = self.hash_algorithm.encode('ascii')
partition_name_encoded = self.partition_name.encode('utf-8')
num_bytes_following = (self.SIZE + len(partition_name_encoded) +
len(self.salt) + len(self.digest) - 16)
nbf_with_padding = round_to_multiple(num_bytes_following, 8)
padding_size = nbf_with_padding - num_bytes_following
desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding,
self.image_size, hash_algorithm_encoded,
len(partition_name_encoded), len(self.salt),
len(self.digest), self.flags, self.RESERVED * b'\0')
ret = (desc + partition_name_encoded + self.salt + self.digest +
padding_size * b'\0')
return ret
def verify(self, image_dir, image_ext, expected_chain_partitions_map,
image_containing_descriptor, accept_zeroed_hashtree):
"""Verifies contents of the descriptor - used in verify_image sub-command.
Arguments:
image_dir: The directory of the file being verified.
image_ext: The extension of the file being verified (e.g. '.img').
expected_chain_partitions_map: A map from partition name to the
tuple (rollback_index_location, key_blob).
image_containing_descriptor: The image the descriptor is in.
accept_zeroed_hashtree: If True, don't fail if hashtree or FEC data is
zeroed out.
Returns:
True if the descriptor verifies, False otherwise.
"""
if not self.partition_name:
image_filename = image_containing_descriptor.filename
image = image_containing_descriptor
else:
image_filename = os.path.join(image_dir, self.partition_name + image_ext)
image = ImageHandler(image_filename, read_only=True)
data = image.read(self.image_size)
ha = hashlib.new(self.hash_algorithm)
ha.update(self.salt)
ha.update(data)
digest = ha.digest()
# The digest must match unless there is no digest in the descriptor.
if self.digest and digest != self.digest:
sys.stderr.write('{} digest of {} does not match digest in descriptor\n'.
format(self.hash_algorithm, image_filename))
return False
print('{}: Successfully verified {} hash of {} for image of {} bytes'
.format(self.partition_name, self.hash_algorithm, image.filename,
self.image_size))
return True
class AvbKernelCmdlineDescriptor(AvbDescriptor):
"""A class for kernel command-line descriptors.
See the |AvbKernelCmdlineDescriptor| C struct for more information.
Attributes:
flags: Flags.
kernel_cmdline: The kernel command-line as string.
"""
TAG = 3
SIZE = 24
FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header)
'L' # flags
'L') # cmdline length (bytes)
FLAGS_USE_ONLY_IF_HASHTREE_NOT_DISABLED = (1 << 0)
FLAGS_USE_ONLY_IF_HASHTREE_DISABLED = (1 << 1)
def __init__(self, data=None):
"""Initializes a new kernel cmdline descriptor.
Arguments:
data: If not None, must be bytes of size |SIZE|.
Raises:
LookupError: If the given descriptor is malformed.
"""
super(AvbKernelCmdlineDescriptor, self).__init__(None)
assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
if data:
(tag, num_bytes_following, self.flags, kernel_cmdline_length) = (
struct.unpack(self.FORMAT_STRING, data[0:self.SIZE]))
expected_size = round_to_multiple(self.SIZE - 16 + kernel_cmdline_length,
8)
if tag != self.TAG or num_bytes_following != expected_size:
raise LookupError('Given data does not look like a kernel cmdline '
'descriptor.')
# Nuke NUL-bytes at the end.
try:
self.kernel_cmdline = data[
self.SIZE:(self.SIZE + kernel_cmdline_length)].decode('utf-8')
except UnicodeDecodeError as e:
raise LookupError('Kernel command-line cannot be decoded as UTF-8: {}.'
.format(e))
else:
self.flags = 0
self.kernel_cmdline = ''
def print_desc(self, o):
"""Print the descriptor.
Arguments:
o: The object to write the output to.
"""
o.write(' Kernel Cmdline descriptor:\n')
o.write(' Flags: {}\n'.format(self.flags))
o.write(' Kernel Cmdline: \'{}\'\n'.format(self.kernel_cmdline))
def encode(self):
"""Serializes the descriptor.
Returns:
The descriptor data as bytes.
"""
kernel_cmd_encoded = self.kernel_cmdline.encode('utf-8')
num_bytes_following = (self.SIZE + len(kernel_cmd_encoded) - 16)
nbf_with_padding = round_to_multiple(num_bytes_following, 8)
padding_size = nbf_with_padding - num_bytes_following
desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding,
self.flags, len(kernel_cmd_encoded))
ret = desc + kernel_cmd_encoded + padding_size * b'\0'
return ret
def verify(self, image_dir, image_ext, expected_chain_partitions_map,
image_containing_descriptor, accept_zeroed_hashtree):
"""Verifies contents of the descriptor - used in verify_image sub-command.
Arguments:
image_dir: The directory of the file being verified.
image_ext: The extension of the file being verified (e.g. '.img').
expected_chain_partitions_map: A map from partition name to the
tuple (rollback_index_location, key_blob).
image_containing_descriptor: The image the descriptor is in.
accept_zeroed_hashtree: If True, don't fail if hashtree or FEC data is
zeroed out.
Returns:
True if the descriptor verifies, False otherwise.
"""
# Nothing to verify.
return True
class AvbChainPartitionDescriptor(AvbDescriptor):
"""A class for chained partition descriptors.
See the |AvbChainPartitionDescriptor| C struct for more information.
Attributes:
rollback_index_location: The rollback index location to use.
partition_name: Partition name as string.
public_key: The public key as bytes.
"""
TAG = 4
RESERVED = 64
SIZE = 28 + RESERVED
FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header)
'L' # rollback_index_location
'L' # partition_name_size (bytes)
'L' + # public_key_size (bytes)
str(RESERVED) + 's') # reserved
def __init__(self, data=None):
"""Initializes a new chain partition descriptor.
Arguments:
data: If not None, must be a bytearray of size |SIZE|.
Raises:
LookupError: If the given descriptor is malformed.
"""
AvbDescriptor.__init__(self, None)
assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
if data:
(tag, num_bytes_following, self.rollback_index_location,
partition_name_len,
public_key_len, _) = struct.unpack(self.FORMAT_STRING, data[0:self.SIZE])
expected_size = round_to_multiple(
self.SIZE - 16 + partition_name_len + public_key_len, 8)
if tag != self.TAG or num_bytes_following != expected_size:
raise LookupError('Given data does not look like a chain partition '
'descriptor.')
o = 0
try:
self.partition_name = data[
(self.SIZE + o):(self.SIZE + o + partition_name_len)
].decode('utf-8')
except UnicodeDecodeError as e:
raise LookupError('Partition name cannot be decoded as UTF-8: {}.'
.format(e))
o += partition_name_len
self.public_key = data[(self.SIZE + o):(self.SIZE + o + public_key_len)]
else:
self.rollback_index_location = 0
self.partition_name = ''
self.public_key = b''
def print_desc(self, o):
"""Print the descriptor.
Arguments:
o: The object to write the output to.
"""
o.write(' Chain Partition descriptor:\n')
o.write(' Partition Name: {}\n'.format(self.partition_name))
o.write(' Rollback Index Location: {}\n'.format(
self.rollback_index_location))
# Just show the SHA1 of the key, for size reasons.
pubkey_digest = hashlib.sha1(self.public_key).hexdigest()
o.write(' Public key (sha1): {}\n'.format(pubkey_digest))
def encode(self):
"""Serializes the descriptor.
Returns:
The descriptor data as bytes.
"""
partition_name_encoded = self.partition_name.encode('utf-8')
num_bytes_following = (
self.SIZE + len(partition_name_encoded) + len(self.public_key) - 16)
nbf_with_padding = round_to_multiple(num_bytes_following, 8)
padding_size = nbf_with_padding - num_bytes_following
desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding,
self.rollback_index_location,
len(partition_name_encoded), len(self.public_key),
self.RESERVED * b'\0')
ret = desc + partition_name_encoded + self.public_key + padding_size * b'\0'
return ret
def verify(self, image_dir, image_ext, expected_chain_partitions_map,
image_containing_descriptor, accept_zeroed_hashtree):
"""Verifies contents of the descriptor - used in verify_image sub-command.
Arguments:
image_dir: The directory of the file being verified.
image_ext: The extension of the file being verified (e.g. '.img').
expected_chain_partitions_map: A map from partition name to the
tuple (rollback_index_location, key_blob).
image_containing_descriptor: The image the descriptor is in.
accept_zeroed_hashtree: If True, don't fail if hashtree or FEC data is
zeroed out.
Returns:
True if the descriptor verifies, False otherwise.
"""
value = expected_chain_partitions_map.get(self.partition_name)
if not value:
sys.stderr.write('No expected chain partition for partition {}. Use '
'--expected_chain_partition to specify expected '
'contents or --follow_chain_partitions.\n'.
format(self.partition_name))
return False
rollback_index_location, pk_blob = value
if self.rollback_index_location != rollback_index_location:
sys.stderr.write('Expected rollback_index_location {} does not '
'match {} in descriptor for partition {}\n'.
format(rollback_index_location,
self.rollback_index_location,
self.partition_name))
return False
if self.public_key != pk_blob:
sys.stderr.write('Expected public key blob does not match public '
'key blob in descriptor for partition {}\n'.
format(self.partition_name))
return False
print('{}: Successfully verified chain partition descriptor matches '
'expected data'.format(self.partition_name))
return True
DESCRIPTOR_CLASSES = [
AvbPropertyDescriptor, AvbHashtreeDescriptor, AvbHashDescriptor,
AvbKernelCmdlineDescriptor, AvbChainPartitionDescriptor
]
def parse_descriptors(data):
"""Parses a blob of data into descriptors.
Arguments:
data: Encoded descriptors as bytes.
Returns:
A list of instances of objects derived from AvbDescriptor. For
unknown descriptors, the class AvbDescriptor is used.
"""
o = 0
ret = []
while o < len(data):
tag, nb_following = struct.unpack('!2Q', data[o:o + 16])
if tag < len(DESCRIPTOR_CLASSES):
clazz = DESCRIPTOR_CLASSES[tag]
else:
clazz = AvbDescriptor
ret.append(clazz(data[o:o + 16 + nb_following]))
o += 16 + nb_following
return ret
class AvbFooter(object):
"""A class for parsing and writing footers.
Footers are stored at the end of partitions and point to where the
AvbVBMeta blob is located. They also contain the original size of
the image before AVB information was added.
Attributes:
magic: Magic for identifying the footer, see |MAGIC|.
version_major: The major version of avbtool that wrote the footer.
version_minor: The minor version of avbtool that wrote the footer.
original_image_size: Original image size.
vbmeta_offset: Offset of where the AvbVBMeta blob is stored.
vbmeta_size: Size of the AvbVBMeta blob.
"""
MAGIC = b'AVBf'
SIZE = 64
RESERVED = 28
FOOTER_VERSION_MAJOR = AVB_FOOTER_VERSION_MAJOR
FOOTER_VERSION_MINOR = AVB_FOOTER_VERSION_MINOR
FORMAT_STRING = ('!4s2L' # magic, 2 x version.
'Q' # Original image size.
'Q' # Offset of VBMeta blob.
'Q' + # Size of VBMeta blob.
str(RESERVED) + 'x') # padding for reserved bytes
def __init__(self, data=None):
"""Initializes a new footer object.
Arguments:
data: If not None, must be bytes of size 4096.
Raises:
LookupError: If the given footer is malformed.
struct.error: If the given data has no footer.
"""
assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
if data:
(self.magic, self.version_major, self.version_minor,
self.original_image_size, self.vbmeta_offset,
self.vbmeta_size) = struct.unpack(self.FORMAT_STRING, data)
if self.magic != self.MAGIC:
raise LookupError('Given data does not look like a AVB footer.')
else:
self.magic = self.MAGIC
self.version_major = self.FOOTER_VERSION_MAJOR
self.version_minor = self.FOOTER_VERSION_MINOR
self.original_image_size = 0
self.vbmeta_offset = 0
self.vbmeta_size = 0
def encode(self):
"""Serializes the footer.
Returns:
The footer as bytes.
"""
return struct.pack(self.FORMAT_STRING, self.magic, self.version_major,
self.version_minor, self.original_image_size,
self.vbmeta_offset, self.vbmeta_size)
class AvbVBMetaHeader(object):
"""A class for parsing and writing AVB vbmeta images.
The attributes correspond to the |AvbVBMetaImageHeader| struct defined in
avb_vbmeta_image.h.
Attributes:
magic: Four bytes equal to "AVB0" (AVB_MAGIC).
required_libavb_version_major: The major version of libavb required for this
header.
required_libavb_version_minor: The minor version of libavb required for this
header.
authentication_data_block_size: The size of the signature block.
auxiliary_data_block_size: The size of the auxiliary data block.
algorithm_type: The verification algorithm used, see |AvbAlgorithmType|
enum.
hash_offset: Offset into the "Authentication data" block of hash data.
hash_size: Length of the hash data.
signature_offset: Offset into the "Authentication data" block of signature
data.
signature_size: Length of the signature data.
public_key_offset: Offset into the "Auxiliary data" block of public key
data.
public_key_size: Length of the public key data.
public_key_metadata_offset: Offset into the "Auxiliary data" block of public
key metadata.
public_key_metadata_size: Length of the public key metadata. Must be set to
zero if there is no public key metadata.
descriptors_offset: Offset into the "Auxiliary data" block of descriptor
data.
descriptors_size: Length of descriptor data.
rollback_index: The rollback index which can be used to prevent rollback to
older versions.
flags: Flags from the AvbVBMetaImageFlags enumeration. This must be set to
zero if the vbmeta image is not a top-level image.
rollback_index_location: The location of the rollback index defined in this
header. Only valid for the main vbmeta. For chained partitions, the
rollback index location must be specified in the
AvbChainPartitionDescriptor and this value must be set to 0.
release_string: The release string from avbtool, e.g. "avbtool 1.0.0" or
"avbtool 1.0.0 xyz_board Git-234abde89". Is guaranteed to be NUL
terminated. Applications must not make assumptions about how this
string is formatted.
"""
MAGIC = b'AVB0'
SIZE = 256
# Keep in sync with |reserved| field of |AvbVBMetaImageHeader|.
RESERVED = 80
# Keep in sync with |AvbVBMetaImageHeader|.
FORMAT_STRING = ('!4s2L' # magic, 2 x version
'2Q' # 2 x block size
'L' # algorithm type
'2Q' # offset, size (hash)
'2Q' # offset, size (signature)
'2Q' # offset, size (public key)
'2Q' # offset, size (public key metadata)
'2Q' # offset, size (descriptors)
'Q' # rollback_index
'L' # flags
'L' # rollback_index_location
'47sx' + # NUL-terminated release string
str(RESERVED) + 'x') # padding for reserved bytes
def __init__(self, data=None):
"""Initializes a new header object.
Arguments:
data: If not None, must be a bytearray of size 8192.
Raises:
Exception: If the given data is malformed.
"""
assert struct.calcsize(self.FORMAT_STRING) == self.SIZE
if data:
(self.magic, self.required_libavb_version_major,
self.required_libavb_version_minor,
self.authentication_data_block_size, self.auxiliary_data_block_size,
self.algorithm_type, self.hash_offset, self.hash_size,
self.signature_offset, self.signature_size, self.public_key_offset,
self.public_key_size, self.public_key_metadata_offset,
self.public_key_metadata_size, self.descriptors_offset,
self.descriptors_size,
self.rollback_index,
self.flags,
self.rollback_index_location,
release_string) = struct.unpack(self.FORMAT_STRING, data)
# Nuke NUL-bytes at the end of the string.
if self.magic != self.MAGIC:
raise AvbError('Given image does not look like a vbmeta image.')
self.release_string = release_string.rstrip(b'\0').decode('utf-8')
else:
self.magic = self.MAGIC
# Start by just requiring version 1.0. Code that adds features
# in a future version can use bump_required_libavb_version_minor() to
# bump the minor.
self.required_libavb_version_major = AVB_VERSION_MAJOR
self.required_libavb_version_minor = 0
self.authentication_data_block_size = 0
self.auxiliary_data_block_size = 0
self.algorithm_type = 0
self.hash_offset = 0
self.hash_size = 0
self.signature_offset = 0
self.signature_size = 0
self.public_key_offset = 0
self.public_key_size = 0
self.public_key_metadata_offset = 0
self.public_key_metadata_size = 0
self.descriptors_offset = 0
self.descriptors_size = 0
self.rollback_index = 0
self.flags = 0
self.rollback_index_location = 0
self.release_string = get_release_string()
def bump_required_libavb_version_minor(self, minor):
"""Function to bump required_libavb_version_minor.
Call this when writing data that requires a specific libavb
version to parse it.
Arguments:
minor: The minor version of libavb that has support for the feature.
"""
self.required_libavb_version_minor = (
max(self.required_libavb_version_minor, minor))
def encode(self):
"""Serializes the header.
Returns:
The header as bytes.
"""
release_string_encoded = self.release_string.encode('utf-8')
return struct.pack(self.FORMAT_STRING, self.magic,
self.required_libavb_version_major,
self.required_libavb_version_minor,
self.authentication_data_block_size,
self.auxiliary_data_block_size, self.algorithm_type,
self.hash_offset, self.hash_size, self.signature_offset,
self.signature_size, self.public_key_offset,
self.public_key_size, self.public_key_metadata_offset,
self.public_key_metadata_size, self.descriptors_offset,
self.descriptors_size, self.rollback_index, self.flags,
self.rollback_index_location, release_string_encoded)
class Avb(object):
"""Business logic for avbtool command-line tool."""
# Keep in sync with avb_ab_flow.h.
AB_FORMAT_NO_CRC = '!4sBB2xBBBxBBBx12x'
AB_MAGIC = b'\0AB0'
AB_MAJOR_VERSION = 1
AB_MINOR_VERSION = 0
AB_MISC_METADATA_OFFSET = 2048
# Constants for maximum metadata size. These are used to give
# meaningful errors if the value passed in via --partition_size is
# too small and when --calc_max_image_size is used. We use
# conservative figures.
MAX_VBMETA_SIZE = 64 * 1024
MAX_FOOTER_SIZE = 4096
def generate_test_image(self, output, image_size, start_byte):
"""Generates a test image for testing avbtool with known content.
The content has following pattern: 0x00 0x01 0x02 .. 0xff 0x00 0x01 ..).
Arguments:
output: Write test image to this file.
image_size: The size of the requested file in bytes.
start_byte: The integer value of the start byte to use for pattern
generation.
"""
pattern = bytearray([x & 0xFF for x in range(start_byte, start_byte + 256)])
buf = bytearray()
c = int(math.ceil(image_size / 256.0))
for _ in range(0, c):
buf.extend(pattern)
output.write(buf[0:image_size])
def extract_vbmeta_image(self, output, image_filename, padding_size):
"""Implements the 'extract_vbmeta_image' command.
Arguments:
output: Write vbmeta struct to this file.
image_filename: File to extract vbmeta data from (with a footer).
padding_size: If not 0, pads output so size is a multiple of the number.
Raises:
AvbError: If there's no footer in the image.
"""
image = ImageHandler(image_filename, read_only=True)
(footer, _, _, _) = self._parse_image(image)
if not footer:
raise AvbError('Given image does not have a footer.')
image.seek(footer.vbmeta_offset)
vbmeta_blob = image.read(footer.vbmeta_size)
output.write(vbmeta_blob)
if padding_size > 0:
padded_size = round_to_multiple(len(vbmeta_blob), padding_size)
padding_needed = padded_size - len(vbmeta_blob)
output.write(b'\0' * padding_needed)
def erase_footer(self, image_filename, keep_hashtree):
"""Implements the 'erase_footer' command.
Arguments:
image_filename: File to erase a footer from.
keep_hashtree: If True, keep the hashtree and FEC around.
Raises:
AvbError: If there's no footer in the image.
"""
image = ImageHandler(image_filename)
(footer, _, descriptors, _) = self._parse_image(image)
if not footer:
raise AvbError('Given image does not have a footer.')
new_image_size = None
if not keep_hashtree:
new_image_size = footer.original_image_size
else:
# If requested to keep the hashtree, search for a hashtree
# descriptor to figure out the location and size of the hashtree
# and FEC.
for desc in descriptors:
if isinstance(desc, AvbHashtreeDescriptor):
# The hashtree is always just following the main data so the
# new size is easily derived.
new_image_size = desc.tree_offset + desc.tree_size
# If the image has FEC codes, also keep those.
if desc.fec_offset > 0:
fec_end = desc.fec_offset + desc.fec_size
new_image_size = max(new_image_size, fec_end)
break
if not new_image_size:
raise AvbError('Requested to keep hashtree but no hashtree '
'descriptor was found.')
# And cut...
image.truncate(new_image_size)
def zero_hashtree(self, image_filename):
"""Implements the 'zero_hashtree' command.
Arguments:
image_filename: File to zero hashtree and FEC data from.
Raises:
AvbError: If there's no footer in the image.
"""
image = ImageHandler(image_filename)
(footer, _, descriptors, _) = self._parse_image(image)
if not footer:
raise AvbError('Given image does not have a footer.')
# Search for a hashtree descriptor to figure out the location and
# size of the hashtree and FEC.
ht_desc = None
for desc in descriptors:
if isinstance(desc, AvbHashtreeDescriptor):
ht_desc = desc
break
if not ht_desc:
raise AvbError('No hashtree descriptor was found.')
zero_ht_start_offset = ht_desc.tree_offset
zero_ht_num_bytes = ht_desc.tree_size
zero_fec_start_offset = None
zero_fec_num_bytes = 0
if ht_desc.fec_offset > 0:
if ht_desc.fec_offset != ht_desc.tree_offset + ht_desc.tree_size:
raise AvbError('Hash-tree and FEC data must be adjacent.')
zero_fec_start_offset = ht_desc.fec_offset
zero_fec_num_bytes = ht_desc.fec_size
zero_end_offset = (zero_ht_start_offset + zero_ht_num_bytes
+ zero_fec_num_bytes)
image.seek(zero_end_offset)
data = image.read(image.image_size - zero_end_offset)
# Write zeroes all over hashtree and FEC, except for the first eight bytes
# where a magic marker - ZeroHaSH - is placed. Place these markers in the
# beginning of both hashtree and FEC. (That way, in the future we can add
# options to 'avbtool zero_hashtree' so as to zero out only either/or.)
#
# Applications can use these markers to detect that the hashtree and/or
# FEC needs to be recomputed.
image.truncate(zero_ht_start_offset)
data_zeroed_firstblock = b'ZeRoHaSH' + b'\0' * (image.block_size - 8)
image.append_raw(data_zeroed_firstblock)
image.append_fill(b'\0\0\0\0', zero_ht_num_bytes - image.block_size)
if zero_fec_start_offset:
image.append_raw(data_zeroed_firstblock)
image.append_fill(b'\0\0\0\0', zero_fec_num_bytes - image.block_size)
image.append_raw(data)
def resize_image(self, image_filename, partition_size):
"""Implements the 'resize_image' command.
Arguments:
image_filename: File with footer to resize.
partition_size: The new size of the image.
Raises:
AvbError: If there's no footer in the image.
"""
image = ImageHandler(image_filename)
if partition_size % image.block_size != 0:
raise AvbError('Partition size of {} is not a multiple of the image '
'block size {}.'.format(partition_size,
image.block_size))
(footer, _, _, _) = self._parse_image(image)
if not footer:
raise AvbError('Given image does not have a footer.')
# The vbmeta blob is always at the end of the data so resizing an
# image amounts to just moving the footer around.
vbmeta_end_offset = footer.vbmeta_offset + footer.vbmeta_size
if vbmeta_end_offset % image.block_size != 0:
vbmeta_end_offset += image.block_size - (vbmeta_end_offset
% image.block_size)
if partition_size < vbmeta_end_offset + 1 * image.block_size:
raise AvbError('Requested size of {} is too small for an image '
'of size {}.'
.format(partition_size,
vbmeta_end_offset + 1 * image.block_size))
# Cut at the end of the vbmeta blob and insert a DONT_CARE chunk
# with enough bytes such that the final Footer block is at the end
# of partition_size.
image.truncate(vbmeta_end_offset)
image.append_dont_care(partition_size - vbmeta_end_offset -
1 * image.block_size)
# Just reuse the same footer - only difference is that we're
# writing it in a different place.
footer_blob = footer.encode()
footer_blob_with_padding = (b'\0' * (image.block_size - AvbFooter.SIZE) +
footer_blob)
image.append_raw(footer_blob_with_padding)
def set_ab_metadata(self, misc_image, slot_data):
"""Implements the 'set_ab_metadata' command.
The |slot_data| argument must be of the form 'A_priority:A_tries_remaining:
A_successful_boot:B_priority:B_tries_remaining:B_successful_boot'.
Arguments:
misc_image: The misc image to write to.
slot_data: Slot data as a string
Raises:
AvbError: If slot data is malformed.
"""
tokens = slot_data.split(':')
if len(tokens) != 6:
raise AvbError('Malformed slot data "{}".'.format(slot_data))
a_priority = int(tokens[0])
a_tries_remaining = int(tokens[1])
a_success = int(tokens[2]) != 0
b_priority = int(tokens[3])
b_tries_remaining = int(tokens[4])
b_success = int(tokens[5]) != 0
ab_data_no_crc = struct.pack(self.AB_FORMAT_NO_CRC,
self.AB_MAGIC,
self.AB_MAJOR_VERSION, self.AB_MINOR_VERSION,
a_priority, a_tries_remaining, a_success,
b_priority, b_tries_remaining, b_success)
# Force CRC to be unsigned, see https://bugs.python.org/issue4903 for why.
crc_value = binascii.crc32(ab_data_no_crc) & 0xffffffff
ab_data = ab_data_no_crc + struct.pack('!I', crc_value)
misc_image.seek(self.AB_MISC_METADATA_OFFSET)
misc_image.write(ab_data)
def info_image(self, image_filename, output):
"""Implements the 'info_image' command.
Arguments:
image_filename: Image file to get information from (file object).
output: Output file to write human-readable information to (file object).
"""
image = ImageHandler(image_filename, read_only=True)
o = output
(footer, header, descriptors, image_size) = self._parse_image(image)
# To show the SHA1 of the public key.
vbmeta_blob = self._load_vbmeta_blob(image)
key_offset = (header.SIZE +
header.authentication_data_block_size +
header.public_key_offset)
key_blob = vbmeta_blob[key_offset:key_offset + header.public_key_size]
if footer:
o.write('Footer version: {}.{}\n'.format(footer.version_major,
footer.version_minor))
o.write('Image size: {} bytes\n'.format(image_size))
o.write('Original image size: {} bytes\n'.format(
footer.original_image_size))
o.write('VBMeta offset: {}\n'.format(footer.vbmeta_offset))
o.write('VBMeta size: {} bytes\n'.format(footer.vbmeta_size))
o.write('--\n')
(alg_name, _) = lookup_algorithm_by_type(header.algorithm_type)
o.write('Minimum libavb version: {}.{}{}\n'.format(
header.required_libavb_version_major,
header.required_libavb_version_minor,
' (Sparse)' if image.is_sparse else ''))
o.write('Header Block: {} bytes\n'.format(AvbVBMetaHeader.SIZE))
o.write('Authentication Block: {} bytes\n'.format(
header.authentication_data_block_size))
o.write('Auxiliary Block: {} bytes\n'.format(
header.auxiliary_data_block_size))
if key_blob:
hexdig = hashlib.sha1(key_blob).hexdigest()
o.write('Public key (sha1): {}\n'.format(hexdig))
o.write('Algorithm: {}\n'.format(alg_name))
o.write('Rollback Index: {}\n'.format(header.rollback_index))
o.write('Flags: {}\n'.format(header.flags))
o.write('Rollback Index Location: {}\n'.format(
header.rollback_index_location))
o.write('Release String: \'{}\'\n'.format(header.release_string))
# Print descriptors.
num_printed = 0
o.write('Descriptors:\n')
for desc in descriptors:
desc.print_desc(o)
num_printed += 1
if num_printed == 0:
o.write(' (none)\n')
def verify_image(self, image_filename, key_path, expected_chain_partitions,
follow_chain_partitions, accept_zeroed_hashtree):
"""Implements the 'verify_image' command.
Arguments:
image_filename: Image file to get information from (file object).
key_path: None or check that embedded public key matches key at given
path.
expected_chain_partitions: List of chain partitions to check or None.
follow_chain_partitions:
If True, will follows chain partitions even when not specified with
the --expected_chain_partition option
accept_zeroed_hashtree: If True, don't fail if hashtree or FEC data is
zeroed out.
Raises:
AvbError: If verification of the image fails.
"""
expected_chain_partitions_map = {}
if expected_chain_partitions:
for cp in expected_chain_partitions:
cp_tokens = cp.split(':')
if len(cp_tokens) != 3:
raise AvbError('Malformed chained partition "{}".'.format(cp))
partition_name = cp_tokens[0]
rollback_index_location = int(cp_tokens[1])
file_path = cp_tokens[2]
with open(file_path, 'rb') as f:
pk_blob = f.read()
expected_chain_partitions_map[partition_name] = (
rollback_index_location, pk_blob)
image_dir = os.path.dirname(image_filename)
image_ext = os.path.splitext(image_filename)[1]
key_blob = None
if key_path:
print('Verifying image {} using key at {}'.format(image_filename,
key_path))
key_blob = RSAPublicKey(key_path).encode()
else:
print('Verifying image {} using embedded public key'.format(
image_filename))
image = ImageHandler(image_filename, read_only=True)
(footer, header, descriptors, _) = self._parse_image(image)
offset = 0
if footer:
offset = footer.vbmeta_offset
image.seek(offset)
vbmeta_blob = image.read(header.SIZE
+ header.authentication_data_block_size
+ header.auxiliary_data_block_size)
alg_name, _ = lookup_algorithm_by_type(header.algorithm_type)
if not verify_vbmeta_signature(header, vbmeta_blob):
raise AvbError('Signature check failed for {} vbmeta struct {}'
.format(alg_name, image_filename))
if key_blob:
# The embedded public key is in the auxiliary block at an offset.
key_offset = AvbVBMetaHeader.SIZE
key_offset += header.authentication_data_block_size
key_offset += header.public_key_offset
key_blob_in_vbmeta = vbmeta_blob[key_offset:key_offset
+ header.public_key_size]
if key_blob != key_blob_in_vbmeta:
raise AvbError('Embedded public key does not match given key.')
if footer:
print('vbmeta: Successfully verified footer and {} vbmeta struct in {}'
.format(alg_name, image.filename))
else:
print('vbmeta: Successfully verified {} vbmeta struct in {}'
.format(alg_name, image.filename))
for desc in descriptors:
if (isinstance(desc, AvbChainPartitionDescriptor)
and follow_chain_partitions
and expected_chain_partitions_map.get(desc.partition_name) is None):
# In this case we're processing a chain descriptor but don't have a
# --expect_chain_partition ... however --follow_chain_partitions was
# specified so we shouldn't error out in desc.verify().
print('{}: Chained but ROLLBACK_SLOT (which is {}) '
'and KEY (which has sha1 {}) not specified'
.format(desc.partition_name, desc.rollback_index_location,
hashlib.sha1(desc.public_key).hexdigest()))
elif not desc.verify(image_dir, image_ext, expected_chain_partitions_map,
image, accept_zeroed_hashtree):
raise AvbError('Error verifying descriptor.')
# Honor --follow_chain_partitions - add '--' to make the output more
# readable.
if (isinstance(desc, AvbChainPartitionDescriptor)
and follow_chain_partitions):
print('--')
chained_image_filename = os.path.join(image_dir,
desc.partition_name + image_ext)
self.verify_image(chained_image_filename, key_path, None, False,
accept_zeroed_hashtree)
def print_partition_digests(self, image_filename, output, as_json):
"""Implements the 'print_partition_digests' command.
Arguments:
image_filename: Image file to get information from (file object).
output: Output file to write human-readable information to (file object).
as_json: If True, print information as JSON
Raises:
AvbError: If getting the partition digests from the image fails.
"""
image_dir = os.path.dirname(image_filename)
image_ext = os.path.splitext(image_filename)[1]
json_partitions = None
if as_json:
json_partitions = []
self._print_partition_digests(
image_filename, output, json_partitions, image_dir, image_ext)
if as_json:
output.write(json.dumps({'partitions': json_partitions}, indent=2))
def _print_partition_digests(self, image_filename, output, json_partitions,
image_dir, image_ext):
"""Helper for printing partitions.
Arguments:
image_filename: Image file to get information from (file object).
output: Output file to write human-readable information to (file object).
json_partitions: If not None, don't print to output, instead add partition
information to this list.
image_dir: The directory to use when looking for chained partition files.
image_ext: The extension to use for chained partition files.
Raises:
AvbError: If getting the partition digests from the image fails.
"""
image = ImageHandler(image_filename, read_only=True)
(_, _, descriptors, _) = self._parse_image(image)
for desc in descriptors:
if isinstance(desc, AvbHashDescriptor):
digest = desc.digest.hex()
if json_partitions is not None:
json_partitions.append({'name': desc.partition_name,
'digest': digest})
else:
output.write('{}: {}\n'.format(desc.partition_name, digest))
elif isinstance(desc, AvbHashtreeDescriptor):
digest = desc.root_digest.hex()
if json_partitions is not None:
json_partitions.append({'name': desc.partition_name,
'digest': digest})
else:
output.write('{}: {}\n'.format(desc.partition_name, digest))
elif isinstance(desc, AvbChainPartitionDescriptor):
chained_image_filename = os.path.join(image_dir,
desc.partition_name + image_ext)
self._print_partition_digests(
chained_image_filename, output, json_partitions, image_dir,
image_ext)
def calculate_vbmeta_digest(self, image_filename, hash_algorithm, output):
"""Implements the 'calculate_vbmeta_digest' command.
Arguments:
image_filename: Image file to get information from (file object).
hash_algorithm: Hash algorithm used.
output: Output file to write human-readable information to (file object).
"""
image_dir = os.path.dirname(image_filename)
image_ext = os.path.splitext(image_filename)[1]
image = ImageHandler(image_filename, read_only=True)
(footer, header, descriptors, _) = self._parse_image(image)
offset = 0
if footer:
offset = footer.vbmeta_offset
size = (header.SIZE + header.authentication_data_block_size +
header.auxiliary_data_block_size)
image.seek(offset)
vbmeta_blob = image.read(size)
hasher = hashlib.new(hash_algorithm)
hasher.update(vbmeta_blob)
for desc in descriptors:
if isinstance(desc, AvbChainPartitionDescriptor):
ch_image_filename = os.path.join(image_dir,
desc.partition_name + image_ext)
ch_image = ImageHandler(ch_image_filename, read_only=True)
(ch_footer, ch_header, _, _) = self._parse_image(ch_image)
ch_offset = 0
ch_size = (ch_header.SIZE + ch_header.authentication_data_block_size +
ch_header.auxiliary_data_block_size)
if ch_footer:
ch_offset = ch_footer.vbmeta_offset
ch_image.seek(ch_offset)
ch_vbmeta_blob = ch_image.read(ch_size)
hasher.update(ch_vbmeta_blob)
digest = hasher.digest()
output.write('{}\n'.format(digest.hex()))
def calculate_kernel_cmdline(self, image_filename, hashtree_disabled, output):
"""Implements the 'calculate_kernel_cmdline' command.
Arguments:
image_filename: Image file to get information from (file object).
hashtree_disabled: If True, returns the cmdline for hashtree disabled.
output: Output file to write human-readable information to (file object).
"""
image = ImageHandler(image_filename, read_only=True)
_, _, descriptors, _ = self._parse_image(image)
image_dir = os.path.dirname(image_filename)
image_ext = os.path.splitext(image_filename)[1]
cmdline_descriptors = []
for desc in descriptors:
if isinstance(desc, AvbChainPartitionDescriptor):
ch_image_filename = os.path.join(image_dir,
desc.partition_name + image_ext)
ch_image = ImageHandler(ch_image_filename, read_only=True)
_, _, ch_descriptors, _ = self._parse_image(ch_image)
for ch_desc in ch_descriptors:
if isinstance(ch_desc, AvbKernelCmdlineDescriptor):
cmdline_descriptors.append(ch_desc)
elif isinstance(desc, AvbKernelCmdlineDescriptor):
cmdline_descriptors.append(desc)
kernel_cmdline_snippets = []
for desc in cmdline_descriptors:
use_cmdline = True
if ((desc.flags &
AvbKernelCmdlineDescriptor.FLAGS_USE_ONLY_IF_HASHTREE_NOT_DISABLED)
!= 0):
if hashtree_disabled:
use_cmdline = False
if (desc.flags &
AvbKernelCmdlineDescriptor.FLAGS_USE_ONLY_IF_HASHTREE_DISABLED) != 0:
if not hashtree_disabled:
use_cmdline = False
if use_cmdline:
kernel_cmdline_snippets.append(desc.kernel_cmdline)
output.write(' '.join(kernel_cmdline_snippets))
def _parse_image(self, image):
"""Gets information about an image.
The image can either be a vbmeta or an image with a footer.
Arguments:
image: An ImageHandler (vbmeta or footer) with a hashtree descriptor.
Returns:
A tuple where the first argument is a AvbFooter (None if there
is no footer on the image), the second argument is a
AvbVBMetaHeader, the third argument is a list of
AvbDescriptor-derived instances, and the fourth argument is the
size of |image|.
Raises:
AvbError: In case the image cannot be parsed.
"""
assert isinstance(image, ImageHandler)
footer = None
image.seek(image.image_size - AvbFooter.SIZE)
try:
footer = AvbFooter(image.read(AvbFooter.SIZE))
except (LookupError, struct.error):
# Nope, just seek back to the start.
image.seek(0)
vbmeta_offset = 0
if footer:
vbmeta_offset = footer.vbmeta_offset
image.seek(vbmeta_offset)
h = AvbVBMetaHeader(image.read(AvbVBMetaHeader.SIZE))
auth_block_offset = vbmeta_offset + AvbVBMetaHeader.SIZE
aux_block_offset = auth_block_offset + h.authentication_data_block_size
desc_start_offset = aux_block_offset + h.descriptors_offset
image.seek(desc_start_offset)
descriptors = parse_descriptors(image.read(h.descriptors_size))
return footer, h, descriptors, image.image_size
def _load_vbmeta_blob(self, image):
"""Gets the vbmeta struct and associated sections.
The image can either be a vbmeta.img or an image with a footer.
Arguments:
image: An ImageHandler (vbmeta or footer).
Returns:
A blob with the vbmeta struct and other sections.
"""
assert isinstance(image, ImageHandler)
footer = None
image.seek(image.image_size - AvbFooter.SIZE)
try:
footer = AvbFooter(image.read(AvbFooter.SIZE))
except (LookupError, struct.error):
# Nope, just seek back to the start.
image.seek(0)
vbmeta_offset = 0
if footer:
vbmeta_offset = footer.vbmeta_offset
image.seek(vbmeta_offset)
h = AvbVBMetaHeader(image.read(AvbVBMetaHeader.SIZE))
image.seek(vbmeta_offset)
data_size = AvbVBMetaHeader.SIZE
data_size += h.authentication_data_block_size
data_size += h.auxiliary_data_block_size
return image.read(data_size)
def _get_cmdline_descriptors_for_hashtree_descriptor(self, ht):
"""Generate kernel cmdline descriptors for dm-verity.
Arguments:
ht: A AvbHashtreeDescriptor
Returns:
A list with two AvbKernelCmdlineDescriptor with dm-verity kernel cmdline
instructions. There is one for when hashtree is not disabled and one for
when it is.
"""
c = 'dm="1 vroot none ro 1,'
c += '0' # start
c += ' {}'.format((ht.image_size // 512)) # size (# sectors)
c += ' verity {}'.format(ht.dm_verity_version) # type and version
c += ' PARTUUID=$(ANDROID_SYSTEM_PARTUUID)' # data_dev
c += ' PARTUUID=$(ANDROID_SYSTEM_PARTUUID)' # hash_dev
c += ' {}'.format(ht.data_block_size) # data_block
c += ' {}'.format(ht.hash_block_size) # hash_block
c += ' {}'.format(ht.image_size // ht.data_block_size) # #blocks
c += ' {}'.format(ht.image_size // ht.data_block_size) # hash_offset
c += ' {}'.format(ht.hash_algorithm) # hash_alg
c += ' {}'.format(ht.root_digest.hex()) # root_digest
c += ' {}'.format(ht.salt.hex()) # salt
if ht.fec_num_roots > 0:
c += ' 10' # number of optional args
c += ' $(ANDROID_VERITY_MODE)'
c += ' ignore_zero_blocks'
c += ' use_fec_from_device PARTUUID=$(ANDROID_SYSTEM_PARTUUID)'
c += ' fec_roots {}'.format(ht.fec_num_roots)
# Note that fec_blocks is the size that FEC covers, *not* the
# size of the FEC data. Since we use FEC for everything up until
# the FEC data, it's the same as the offset.
c += ' fec_blocks {}'.format(ht.fec_offset // ht.data_block_size)
c += ' fec_start {}'.format(ht.fec_offset // ht.data_block_size)
else:
c += ' 2' # number of optional args
c += ' $(ANDROID_VERITY_MODE)'
c += ' ignore_zero_blocks'
c += '" root=/dev/dm-0'
# Now that we have the command-line, generate the descriptor.
desc = AvbKernelCmdlineDescriptor()
desc.kernel_cmdline = c
desc.flags = (
AvbKernelCmdlineDescriptor.FLAGS_USE_ONLY_IF_HASHTREE_NOT_DISABLED)
# The descriptor for when hashtree verification is disabled is a lot
# simpler - we just set the root to the partition.
desc_no_ht = AvbKernelCmdlineDescriptor()
desc_no_ht.kernel_cmdline = 'root=PARTUUID=$(ANDROID_SYSTEM_PARTUUID)'
desc_no_ht.flags = (
AvbKernelCmdlineDescriptor.FLAGS_USE_ONLY_IF_HASHTREE_DISABLED)
return [desc, desc_no_ht]
def _get_cmdline_descriptors_for_dm_verity(self, image):
"""Generate kernel cmdline descriptors for dm-verity.
Arguments:
image: An ImageHandler (vbmeta or footer) with a hashtree descriptor.
Returns:
A list with two AvbKernelCmdlineDescriptor with dm-verity kernel cmdline
instructions. There is one for when hashtree is not disabled and one for
when it is.
Raises:
AvbError: If |image| doesn't have a hashtree descriptor.
"""
(_, _, descriptors, _) = self._parse_image(image)
ht = None
for desc in descriptors:
if isinstance(desc, AvbHashtreeDescriptor):
ht = desc
break
if not ht:
raise AvbError('No hashtree descriptor in given image')
return self._get_cmdline_descriptors_for_hashtree_descriptor(ht)
def make_vbmeta_image(self, output, chain_partitions, algorithm_name,
key_path, public_key_metadata_path, rollback_index,
flags, rollback_index_location,
props, props_from_file, kernel_cmdlines,
setup_rootfs_from_kernel,
include_descriptors_from_image,
signing_helper,
signing_helper_with_files,
release_string,
append_to_release_string,
print_required_libavb_version,
padding_size):
"""Implements the 'make_vbmeta_image' command.
Arguments:
output: File to write the image to.
chain_partitions: List of partitions to chain or None.
algorithm_name: Name of algorithm to use.
key_path: Path to key to use or None.
public_key_metadata_path: Path to public key metadata or None.
rollback_index: The rollback index to use.
flags: Flags value to use in the image.
rollback_index_location: Location of the main vbmeta rollback index.
props: Properties to insert (list of strings of the form 'key:value').
props_from_file: Properties to insert (list of strings 'key:<path>').
kernel_cmdlines: Kernel cmdlines to insert (list of strings).
setup_rootfs_from_kernel: None or file to generate from.
include_descriptors_from_image: List of file objects with descriptors.
signing_helper: Program which signs a hash and return signature.
signing_helper_with_files: Same as signing_helper but uses files instead.
release_string: None or avbtool release string to use instead of default.
append_to_release_string: None or string to append.
print_required_libavb_version: True to only print required libavb version.
padding_size: If not 0, pads output so size is a multiple of the number.
Raises:
AvbError: If a chained partition is malformed.
"""
# If we're asked to calculate minimum required libavb version, we're done.
tmp_header = AvbVBMetaHeader()
if rollback_index_location > 0:
tmp_header.bump_required_libavb_version_minor(2)
if include_descriptors_from_image:
# Use the bump logic in AvbVBMetaHeader to calculate the max required
# version of all included descriptors.
for image in include_descriptors_from_image:
(_, image_header, _, _) = self._parse_image(ImageHandler(
image.name, read_only=True))
tmp_header.bump_required_libavb_version_minor(
image_header.required_libavb_version_minor)
if print_required_libavb_version:
print('1.{}'.format(tmp_header.required_libavb_version_minor))
return
if not output:
raise AvbError('No output file given')
descriptors = []
ht_desc_to_setup = None
vbmeta_blob = self._generate_vbmeta_blob(
algorithm_name, key_path, public_key_metadata_path, descriptors,
chain_partitions, rollback_index, flags, rollback_index_location,
props, props_from_file,
kernel_cmdlines, setup_rootfs_from_kernel, ht_desc_to_setup,
include_descriptors_from_image, signing_helper,
signing_helper_with_files, release_string,
append_to_release_string, tmp_header.required_libavb_version_minor)
# Write entire vbmeta blob (header, authentication, auxiliary).
output.seek(0)
output.write(vbmeta_blob)
if padding_size > 0:
padded_size = round_to_multiple(len(vbmeta_blob), padding_size)
padding_needed = padded_size - len(vbmeta_blob)
output.write(b'\0' * padding_needed)
def _generate_vbmeta_blob(self, algorithm_name, key_path,
public_key_metadata_path, descriptors,
chain_partitions,
rollback_index, flags, rollback_index_location,
props, props_from_file,
kernel_cmdlines,
setup_rootfs_from_kernel,
ht_desc_to_setup,
include_descriptors_from_image, signing_helper,
signing_helper_with_files,
release_string, append_to_release_string,
required_libavb_version_minor):
"""Generates a VBMeta blob.
This blob contains the header (struct AvbVBMetaHeader), the
authentication data block (which contains the hash and signature
for the header and auxiliary block), and the auxiliary block
(which contains descriptors, the public key used, and other data).
The |key| parameter can |None| only if the |algorithm_name| is
'NONE'.
Arguments:
algorithm_name: The algorithm name as per the ALGORITHMS dict.
key_path: The path to the .pem file used to sign the blob.
public_key_metadata_path: Path to public key metadata or None.
descriptors: A list of descriptors to insert or None.
chain_partitions: List of partitions to chain or None.
rollback_index: The rollback index to use.
flags: Flags to use in the image.
rollback_index_location: Location of the main vbmeta rollback index.
props: Properties to insert (List of strings of the form 'key:value').
props_from_file: Properties to insert (List of strings 'key:<path>').
kernel_cmdlines: Kernel cmdlines to insert (list of strings).
setup_rootfs_from_kernel: None or file to generate
dm-verity kernel cmdline from.
ht_desc_to_setup: If not None, an AvbHashtreeDescriptor to
generate dm-verity kernel cmdline descriptors from.
include_descriptors_from_image: List of file objects for which
to insert descriptors from.
signing_helper: Program which signs a hash and return signature.
signing_helper_with_files: Same as signing_helper but uses files instead.
release_string: None or avbtool release string.
append_to_release_string: None or string to append.
required_libavb_version_minor: Use at least this required minor version.
Returns:
The VBMeta blob as bytes.
Raises:
Exception: If the |algorithm_name| is not found, if no key has
been given and the given algorithm requires one, or the key is
of the wrong size.
"""
try:
alg = ALGORITHMS[algorithm_name]
except KeyError:
raise AvbError('Unknown algorithm with name {}'.format(algorithm_name))
if not descriptors:
descriptors = []
h = AvbVBMetaHeader()
h.bump_required_libavb_version_minor(required_libavb_version_minor)
# Insert chained partition descriptors, if any
if chain_partitions:
used_locations = {rollback_index_location: True}
for cp in chain_partitions:
cp_tokens = cp.split(':')
if len(cp_tokens) != 3:
raise AvbError('Malformed chained partition "{}".'.format(cp))
partition_name = cp_tokens[0]
chained_rollback_index_location = int(cp_tokens[1])
file_path = cp_tokens[2]
# Check that the same rollback location isn't being used by
# multiple chained partitions.
if used_locations.get(chained_rollback_index_location):
raise AvbError('Rollback Index Location {} is already in use.'.format(
chained_rollback_index_location))
used_locations[chained_rollback_index_location] = True
desc = AvbChainPartitionDescriptor()
desc.partition_name = partition_name
desc.rollback_index_location = chained_rollback_index_location
if desc.rollback_index_location < 1:
raise AvbError('Rollback index location must be 1 or larger.')
with open(file_path, 'rb') as f:
desc.public_key = f.read()
descriptors.append(desc)
# Descriptors.
encoded_descriptors = bytearray()
for desc in descriptors:
encoded_descriptors.extend(desc.encode())
# Add properties.
if props:
for prop in props:
idx = prop.find(':')
if idx == -1:
raise AvbError('Malformed property "{}".'.format(prop))
# pylint: disable=redefined-variable-type
desc = AvbPropertyDescriptor()
desc.key = prop[0:idx]
desc.value = prop[(idx + 1):].encode('utf-8')
encoded_descriptors.extend(desc.encode())
if props_from_file:
for prop in props_from_file:
idx = prop.find(':')
if idx == -1:
raise AvbError('Malformed property "{}".'.format(prop))
desc = AvbPropertyDescriptor()
desc.key = prop[0:idx]
file_path = prop[(idx + 1):]
with open(file_path, 'rb') as f:
# pylint: disable=attribute-defined-outside-init
desc.value = f.read()
encoded_descriptors.extend(desc.encode())
# Add AvbKernelCmdline descriptor for dm-verity from an image, if requested.
if setup_rootfs_from_kernel:
image_handler = ImageHandler(
setup_rootfs_from_kernel.name)
cmdline_desc = self._get_cmdline_descriptors_for_dm_verity(image_handler)
encoded_descriptors.extend(cmdline_desc[0].encode())
encoded_descriptors.extend(cmdline_desc[1].encode())
# Add AvbKernelCmdline descriptor for dm-verity from desc, if requested.
if ht_desc_to_setup:
cmdline_desc = self._get_cmdline_descriptors_for_hashtree_descriptor(
ht_desc_to_setup)
encoded_descriptors.extend(cmdline_desc[0].encode())
encoded_descriptors.extend(cmdline_desc[1].encode())
# Add kernel command-lines.
if kernel_cmdlines:
for i in kernel_cmdlines:
desc = AvbKernelCmdlineDescriptor()
desc.kernel_cmdline = i
encoded_descriptors.extend(desc.encode())
# Add descriptors from other images.
if include_descriptors_from_image:
descriptors_dict = dict()
for image in include_descriptors_from_image:
image_handler = ImageHandler(image.name, read_only=True)
(_, image_vbmeta_header, image_descriptors, _) = self._parse_image(
image_handler)
# Bump the required libavb version to support all included descriptors.
h.bump_required_libavb_version_minor(
image_vbmeta_header.required_libavb_version_minor)
for desc in image_descriptors:
# The --include_descriptors_from_image option is used in some setups
# with images A and B where both A and B contain a descriptor
# for a partition with the same name. Since it's not meaningful
# to include both descriptors, only include the last seen descriptor.
# See bug 76386656 for details.
if hasattr(desc, 'partition_name'):
key = type(desc).__name__ + '_' + desc.partition_name
descriptors_dict[key] = desc.encode()
else:
encoded_descriptors.extend(desc.encode())
for key in sorted(descriptors_dict):
encoded_descriptors.extend(descriptors_dict[key])
# Load public key metadata blob, if requested.
pkmd_blob = b''
if public_key_metadata_path:
with open(public_key_metadata_path, 'rb') as f:
pkmd_blob = f.read()
key = None
encoded_key = b''
if alg.public_key_num_bytes > 0:
if not key_path:
raise AvbError('Key is required for algorithm {}'.format(
algorithm_name))
encoded_key = RSAPublicKey(key_path).encode()
if len(encoded_key) != alg.public_key_num_bytes:
raise AvbError('Key is wrong size for algorithm {}'.format(
algorithm_name))
# Override release string, if requested.
if isinstance(release_string, str):
h.release_string = release_string
# Append to release string, if requested. Also insert a space before.
if isinstance(append_to_release_string, str):
h.release_string += ' ' + append_to_release_string
# For the Auxiliary data block, descriptors are stored at offset 0,
# followed by the public key, followed by the public key metadata blob.
h.auxiliary_data_block_size = round_to_multiple(
len(encoded_descriptors) + len(encoded_key) + len(pkmd_blob), 64)
h.descriptors_offset = 0
h.descriptors_size = len(encoded_descriptors)
h.public_key_offset = h.descriptors_size
h.public_key_size = len(encoded_key)
h.public_key_metadata_offset = h.public_key_offset + h.public_key_size
h.public_key_metadata_size = len(pkmd_blob)
# For the Authentication data block, the hash is first and then
# the signature.
h.authentication_data_block_size = round_to_multiple(
alg.hash_num_bytes + alg.signature_num_bytes, 64)
h.algorithm_type = alg.algorithm_type
h.hash_offset = 0
h.hash_size = alg.hash_num_bytes
# Signature offset and size - it's stored right after the hash
# (in Authentication data block).
h.signature_offset = alg.hash_num_bytes
h.signature_size = alg.signature_num_bytes
h.rollback_index = rollback_index
h.flags = flags
h.rollback_index_location = rollback_index_location
# Generate Header data block.
header_data_blob = h.encode()
# Generate Auxiliary data block.
aux_data_blob = bytearray()
aux_data_blob.extend(encoded_descriptors)
aux_data_blob.extend(encoded_key)
aux_data_blob.extend(pkmd_blob)
padding_bytes = h.auxiliary_data_block_size - len(aux_data_blob)
aux_data_blob.extend(b'\0' * padding_bytes)
# Calculate the hash.
binary_hash = b''
binary_signature = b''
if algorithm_name != 'NONE':
ha = hashlib.new(alg.hash_name)
ha.update(header_data_blob)
ha.update(aux_data_blob)
binary_hash = ha.digest()
# Calculate the signature.
rsa_key = RSAPublicKey(key_path)
data_to_sign = header_data_blob + bytes(aux_data_blob)
binary_signature = rsa_key.sign(algorithm_name, data_to_sign,
signing_helper, signing_helper_with_files)
# Generate Authentication data block.
auth_data_blob = bytearray()
auth_data_blob.extend(binary_hash)
auth_data_blob.extend(binary_signature)
padding_bytes = h.authentication_data_block_size - len(auth_data_blob)
auth_data_blob.extend(b'\0' * padding_bytes)
return header_data_blob + bytes(auth_data_blob) + bytes(aux_data_blob)
def extract_public_key(self, key_path, output):
"""Implements the 'extract_public_key' command.
Arguments:
key_path: The path to a RSA private key file.
output: The file to write to.
Raises:
AvbError: If the public key could not be extracted.
"""
output.write(RSAPublicKey(key_path).encode())
def append_vbmeta_image(self, image_filename, vbmeta_image_filename,
partition_size):
"""Implementation of the append_vbmeta_image command.
Arguments:
image_filename: File to add the footer to.
vbmeta_image_filename: File to get vbmeta struct from.
partition_size: Size of partition.
Raises:
AvbError: If an argument is incorrect or if appending VBMeta image fialed.
"""
image = ImageHandler(image_filename)
if partition_size % image.block_size != 0:
raise AvbError('Partition size of {} is not a multiple of the image '
'block size {}.'.format(partition_size,
image.block_size))
# If there's already a footer, truncate the image to its original
# size. This way 'avbtool append_vbmeta_image' is idempotent.
if image.image_size >= AvbFooter.SIZE:
image.seek(image.image_size - AvbFooter.SIZE)
try:
footer = AvbFooter(image.read(AvbFooter.SIZE))
# Existing footer found. Just truncate.
original_image_size = footer.original_image_size
image.truncate(footer.original_image_size)
except (LookupError, struct.error):
original_image_size = image.image_size
else:
# Image size is too small to possibly contain a footer.
original_image_size = image.image_size
# If anything goes wrong from here-on, restore the image back to
# its original size.
try:
vbmeta_image_handler = ImageHandler(vbmeta_image_filename)
vbmeta_blob = self._load_vbmeta_blob(vbmeta_image_handler)
# If the image isn't sparse, its size might not be a multiple of
# the block size. This will screw up padding later so just grow it.
if image.image_size % image.block_size != 0:
assert not image.is_sparse
padding_needed = image.block_size - (image.image_size%image.block_size)
image.truncate(image.image_size + padding_needed)
# The append_raw() method requires content with size being a
# multiple of |block_size| so add padding as needed. Also record
# where this is written to since we'll need to put that in the
# footer.
vbmeta_offset = image.image_size
padding_needed = (round_to_multiple(len(vbmeta_blob), image.block_size) -
len(vbmeta_blob))
vbmeta_blob_with_padding = vbmeta_blob + b'\0' * padding_needed
# Append vbmeta blob and footer
image.append_raw(vbmeta_blob_with_padding)
vbmeta_end_offset = vbmeta_offset + len(vbmeta_blob_with_padding)
# Now insert a DONT_CARE chunk with enough bytes such that the
# final Footer block is at the end of partition_size..
image.append_dont_care(partition_size - vbmeta_end_offset -
1 * image.block_size)
# Generate the Footer that tells where the VBMeta footer
# is. Also put enough padding in the front of the footer since
# we'll write out an entire block.
footer = AvbFooter()
footer.original_image_size = original_image_size
footer.vbmeta_offset = vbmeta_offset
footer.vbmeta_size = len(vbmeta_blob)
footer_blob = footer.encode()
footer_blob_with_padding = (b'\0' * (image.block_size - AvbFooter.SIZE) +
footer_blob)
image.append_raw(footer_blob_with_padding)
except Exception as e:
# Truncate back to original size, then re-raise.
image.truncate(original_image_size)
raise AvbError('Appending VBMeta image failed: {}.'.format(e))
def add_hash_footer(self, image_filename, partition_size, partition_name,
hash_algorithm, salt, chain_partitions, algorithm_name,
key_path,
public_key_metadata_path, rollback_index, flags,
rollback_index_location, props,
props_from_file, kernel_cmdlines,
setup_rootfs_from_kernel,
include_descriptors_from_image, calc_max_image_size,
signing_helper, signing_helper_with_files,
release_string, append_to_release_string,
output_vbmeta_image, do_not_append_vbmeta_image,
print_required_libavb_version, use_persistent_digest,
do_not_use_ab):
"""Implementation of the add_hash_footer on unsparse images.
Arguments:
image_filename: File to add the footer to.
partition_size: Size of partition.
partition_name: Name of partition (without A/B suffix).
hash_algorithm: Hash algorithm to use.
salt: Salt to use as a hexadecimal string or None to use /dev/urandom.
chain_partitions: List of partitions to chain.
algorithm_name: Name of algorithm to use.
key_path: Path to key to use or None.
public_key_metadata_path: Path to public key metadata or None.
rollback_index: Rollback index.
flags: Flags value to use in the image.
rollback_index_location: Location of the main vbmeta rollback index.
props: Properties to insert (List of strings of the form 'key:value').
props_from_file: Properties to insert (List of strings 'key:<path>').
kernel_cmdlines: Kernel cmdlines to insert (list of strings).
setup_rootfs_from_kernel: None or file to generate
dm-verity kernel cmdline from.
include_descriptors_from_image: List of file objects for which
to insert descriptors from.
calc_max_image_size: Don't store the footer - instead calculate the
maximum image size leaving enough room for metadata with the
given |partition_size|.
signing_helper: Program which signs a hash and return signature.
signing_helper_with_files: Same as signing_helper but uses files instead.
release_string: None or avbtool release string.
append_to_release_string: None or string to append.
output_vbmeta_image: If not None, also write vbmeta struct to this file.
do_not_append_vbmeta_image: If True, don't append vbmeta struct.
print_required_libavb_version: True to only print required libavb version.
use_persistent_digest: Use a persistent digest on device.
do_not_use_ab: This partition does not use A/B.
Raises:
AvbError: If an argument is incorrect of if adding of hash_footer failed.
"""
required_libavb_version_minor = 0
if use_persistent_digest or do_not_use_ab:
required_libavb_version_minor = 1
if rollback_index_location > 0:
required_libavb_version_minor = 2
# If we're asked to calculate minimum required libavb version, we're done.
if print_required_libavb_version:
print('1.{}'.format(required_libavb_version_minor))
return
# First, calculate the maximum image size such that an image
# this size + metadata (footer + vbmeta struct) fits in
# |partition_size|.
max_metadata_size = self.MAX_VBMETA_SIZE + self.MAX_FOOTER_SIZE
if partition_size < max_metadata_size:
raise AvbError('Parition size of {} is too small. '
'Needs to be at least {}'.format(
partition_size, max_metadata_size))
max_image_size = partition_size - max_metadata_size
# If we're asked to only calculate the maximum image size, we're done.
if calc_max_image_size:
print('{}'.format(max_image_size))
return
image = ImageHandler(image_filename)
if partition_size % image.block_size != 0:
raise AvbError('Partition size of {} is not a multiple of the image '
'block size {}.'.format(partition_size,
image.block_size))
# If there's already a footer, truncate the image to its original
# size. This way 'avbtool add_hash_footer' is idempotent (modulo
# salts).
if image.image_size >= AvbFooter.SIZE:
image.seek(image.image_size - AvbFooter.SIZE)
try:
footer = AvbFooter(image.read(AvbFooter.SIZE))
# Existing footer found. Just truncate.
original_image_size = footer.original_image_size
image.truncate(footer.original_image_size)
except (LookupError, struct.error):
original_image_size = image.image_size
else:
# Image size is too small to possibly contain a footer.
original_image_size = image.image_size
# If anything goes wrong from here-on, restore the image back to
# its original size.
try:
# If image size exceeds the maximum image size, fail.
if image.image_size > max_image_size:
raise AvbError('Image size of {} exceeds maximum image '
'size of {} in order to fit in a partition '
'size of {}.'.format(image.image_size, max_image_size,
partition_size))
digest_size = len(hashlib.new(hash_algorithm).digest())
if salt:
salt = binascii.unhexlify(salt)
elif salt is None and not use_persistent_digest:
# If salt is not explicitly specified, choose a hash that's the same
# size as the hash size. Don't populate a random salt if this
# descriptor is being created to use a persistent digest on device.
hash_size = digest_size
with open('/dev/urandom', 'rb') as f:
salt = f.read(hash_size)
else:
salt = b''
hasher = hashlib.new(hash_algorithm, salt)
# TODO(zeuthen): might want to read this in chunks to avoid
# memory pressure, then again, this is only supposed to be used
# on kernel/initramfs partitions. Possible optimization.
image.seek(0)
hasher.update(image.read(image.image_size))
digest = hasher.digest()
h_desc = AvbHashDescriptor()
h_desc.image_size = image.image_size
h_desc.hash_algorithm = hash_algorithm
h_desc.partition_name = partition_name
h_desc.salt = salt
h_desc.flags = 0
if do_not_use_ab:
h_desc.flags |= 1 # AVB_HASH_DESCRIPTOR_FLAGS_DO_NOT_USE_AB
if not use_persistent_digest:
h_desc.digest = digest
# Generate the VBMeta footer.
ht_desc_to_setup = None
vbmeta_blob = self._generate_vbmeta_blob(
algorithm_name, key_path, public_key_metadata_path, [h_desc],
chain_partitions, rollback_index, flags, rollback_index_location,
props, props_from_file,
kernel_cmdlines, setup_rootfs_from_kernel, ht_desc_to_setup,
include_descriptors_from_image, signing_helper,
signing_helper_with_files, release_string,
append_to_release_string, required_libavb_version_minor)
# Write vbmeta blob, if requested.
if output_vbmeta_image:
output_vbmeta_image.write(vbmeta_blob)
# Append vbmeta blob and footer, unless requested not to.
if not do_not_append_vbmeta_image:
# If the image isn't sparse, its size might not be a multiple of
# the block size. This will screw up padding later so just grow it.
if image.image_size % image.block_size != 0:
assert not image.is_sparse
padding_needed = image.block_size - (
image.image_size % image.block_size)
image.truncate(image.image_size + padding_needed)
# The append_raw() method requires content with size being a
# multiple of |block_size| so add padding as needed. Also record
# where this is written to since we'll need to put that in the
# footer.
vbmeta_offset = image.image_size
padding_needed = (
round_to_multiple(len(vbmeta_blob), image.block_size) -
len(vbmeta_blob))
vbmeta_blob_with_padding = vbmeta_blob + b'\0' * padding_needed
image.append_raw(vbmeta_blob_with_padding)
vbmeta_end_offset = vbmeta_offset + len(vbmeta_blob_with_padding)
# Now insert a DONT_CARE chunk with enough bytes such that the
# final Footer block is at the end of partition_size..
image.append_dont_care(partition_size - vbmeta_end_offset -
1 * image.block_size)
# Generate the Footer that tells where the VBMeta footer
# is. Also put enough padding in the front of the footer since
# we'll write out an entire block.
footer = AvbFooter()
footer.original_image_size = original_image_size
footer.vbmeta_offset = vbmeta_offset
footer.vbmeta_size = len(vbmeta_blob)
footer_blob = footer.encode()
footer_blob_with_padding = (
b'\0' * (image.block_size - AvbFooter.SIZE) + footer_blob)
image.append_raw(footer_blob_with_padding)
except Exception as e:
# Truncate back to original size, then re-raise.
image.truncate(original_image_size)
raise AvbError('Adding hash_footer failed: {}.'.format(e))
def add_hashtree_footer(self, image_filename, partition_size, partition_name,
generate_fec, fec_num_roots, hash_algorithm,
block_size, salt, chain_partitions, algorithm_name,
key_path,
public_key_metadata_path, rollback_index, flags,
rollback_index_location,
props, props_from_file, kernel_cmdlines,
setup_rootfs_from_kernel,
setup_as_rootfs_from_kernel,
include_descriptors_from_image,
calc_max_image_size, signing_helper,
signing_helper_with_files,
release_string, append_to_release_string,
output_vbmeta_image, do_not_append_vbmeta_image,
print_required_libavb_version,
use_persistent_root_digest, do_not_use_ab,
no_hashtree):
"""Implements the 'add_hashtree_footer' command.
See https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity for
more information about dm-verity and these hashes.
Arguments:
image_filename: File to add the footer to.
partition_size: Size of partition or 0 to put it right at the end.
partition_name: Name of partition (without A/B suffix).
generate_fec: If True, generate FEC codes.
fec_num_roots: Number of roots for FEC.
hash_algorithm: Hash algorithm to use.
block_size: Block size to use.
salt: Salt to use as a hexadecimal string or None to use /dev/urandom.
chain_partitions: List of partitions to chain.
algorithm_name: Name of algorithm to use.
key_path: Path to key to use or None.
public_key_metadata_path: Path to public key metadata or None.
rollback_index: Rollback index.
flags: Flags value to use in the image.
rollback_index_location: Location of the main vbmeta rollback index.
props: Properties to insert (List of strings of the form 'key:value').
props_from_file: Properties to insert (List of strings 'key:<path>').
kernel_cmdlines: Kernel cmdlines to insert (list of strings).
setup_rootfs_from_kernel: None or file to generate
dm-verity kernel cmdline from.
setup_as_rootfs_from_kernel: If True, generate dm-verity kernel
cmdline to set up rootfs.
include_descriptors_from_image: List of file objects for which
to insert descriptors from.
calc_max_image_size: Don't store the hashtree or footer - instead
calculate the maximum image size leaving enough room for hashtree
and metadata with the given |partition_size|.
signing_helper: Program which signs a hash and return signature.
signing_helper_with_files: Same as signing_helper but uses files instead.
release_string: None or avbtool release string.
append_to_release_string: None or string to append.
output_vbmeta_image: If not None, also write vbmeta struct to this file.
do_not_append_vbmeta_image: If True, don't append vbmeta struct.
print_required_libavb_version: True to only print required libavb version.
use_persistent_root_digest: Use a persistent root digest on device.
do_not_use_ab: The partition does not use A/B.
no_hashtree: Do not append hashtree. Set size in descriptor as zero.
Raises:
AvbError: If an argument is incorrect or adding the hashtree footer
failed.
"""
required_libavb_version_minor = 0
if use_persistent_root_digest or do_not_use_ab:
required_libavb_version_minor = 1
if rollback_index_location > 0:
required_libavb_version_minor = 2
# If we're asked to calculate minimum required libavb version, we're done.
if print_required_libavb_version:
print('1.{}'.format(required_libavb_version_minor))
return
digest_size = len(hashlib.new(hash_algorithm).digest())
digest_padding = round_to_pow2(digest_size) - digest_size
# If |partition_size| is given (e.g. not 0), calculate the maximum image
# size such that an image this size + the hashtree + metadata (footer +
# vbmeta struct) fits in |partition_size|. We use very conservative figures
# for metadata.
if partition_size > 0:
max_tree_size = 0
max_fec_size = 0
if not no_hashtree:
(_, max_tree_size) = calc_hash_level_offsets(
partition_size, block_size, digest_size + digest_padding)
if generate_fec:
max_fec_size = calc_fec_data_size(partition_size, fec_num_roots)
max_metadata_size = (max_fec_size + max_tree_size +
self.MAX_VBMETA_SIZE +
self.MAX_FOOTER_SIZE)
max_image_size = partition_size - max_metadata_size
else:
max_image_size = 0
# If we're asked to only calculate the maximum image size, we're done.
if calc_max_image_size:
print('{}'.format(max_image_size))
return
image = ImageHandler(image_filename)
if partition_size > 0:
if partition_size % image.block_size != 0:
raise AvbError('Partition size of {} is not a multiple of the image '
'block size {}.'.format(partition_size,
image.block_size))
elif image.image_size % image.block_size != 0:
raise AvbError('File size of {} is not a multiple of the image '
'block size {}.'.format(image.image_size,
image.block_size))
# If there's already a footer, truncate the image to its original
# size. This way 'avbtool add_hashtree_footer' is idempotent
# (modulo salts).
if image.image_size >= AvbFooter.SIZE:
image.seek(image.image_size - AvbFooter.SIZE)
try:
footer = AvbFooter(image.read(AvbFooter.SIZE))
# Existing footer found. Just truncate.
original_image_size = footer.original_image_size
image.truncate(footer.original_image_size)
except (LookupError, struct.error):
original_image_size = image.image_size
else:
# Image size is too small to possibly contain a footer.
original_image_size = image.image_size
# If anything goes wrong from here-on, restore the image back to
# its original size.
try:
# Ensure image is multiple of block_size.
rounded_image_size = round_to_multiple(image.image_size, block_size)
if rounded_image_size > image.image_size:
image.append_raw('\0' * (rounded_image_size - image.image_size))
# If image size exceeds the maximum image size, fail.
if partition_size > 0:
if image.image_size > max_image_size:
raise AvbError('Image size of {} exceeds maximum image '
'size of {} in order to fit in a partition '
'size of {}.'.format(image.image_size, max_image_size,
partition_size))
if salt:
salt = binascii.unhexlify(salt)
elif salt is None and not use_persistent_root_digest:
# If salt is not explicitly specified, choose a hash that's the same
# size as the hash size. Don't populate a random salt if this
# descriptor is being created to use a persistent digest on device.
hash_size = digest_size
with open('/dev/urandom', 'rb') as f:
salt = f.read(hash_size)
else:
salt = b''
# Hashes are stored upside down so we need to calculate hash
# offsets in advance.
(hash_level_offsets, tree_size) = calc_hash_level_offsets(
image.image_size, block_size, digest_size + digest_padding)
# If the image isn't sparse, its size might not be a multiple of
# the block size. This will screw up padding later so just grow it.
if image.image_size % image.block_size != 0:
assert not image.is_sparse
padding_needed = image.block_size - (image.image_size%image.block_size)
image.truncate(image.image_size + padding_needed)
# Generate the tree and add padding as needed.
tree_offset = image.image_size
root_digest, hash_tree = generate_hash_tree(image, image.image_size,
block_size,
hash_algorithm, salt,
digest_padding,
hash_level_offsets,
tree_size)
# Generate HashtreeDescriptor with details about the tree we
# just generated.
if no_hashtree:
tree_size = 0
hash_tree = b''
ht_desc = AvbHashtreeDescriptor()
ht_desc.dm_verity_version = 1
ht_desc.image_size = image.image_size
ht_desc.tree_offset = tree_offset
ht_desc.tree_size = tree_size
ht_desc.data_block_size = block_size
ht_desc.hash_block_size = block_size
ht_desc.hash_algorithm = hash_algorithm
ht_desc.partition_name = partition_name
ht_desc.salt = salt
if do_not_use_ab:
ht_desc.flags |= 1 # AVB_HASHTREE_DESCRIPTOR_FLAGS_DO_NOT_USE_AB
if not use_persistent_root_digest:
ht_desc.root_digest = root_digest
# Write the hash tree
padding_needed = (round_to_multiple(len(hash_tree), image.block_size) -
len(hash_tree))
hash_tree_with_padding = hash_tree + b'\0' * padding_needed
image.append_raw(hash_tree_with_padding)
len_hashtree_and_fec = len(hash_tree_with_padding)
# Generate FEC codes, if requested.
if generate_fec:
if no_hashtree:
fec_data = b''
else:
fec_data = generate_fec_data(image_filename, fec_num_roots)
padding_needed = (round_to_multiple(len(fec_data), image.block_size) -
len(fec_data))
fec_data_with_padding = fec_data + b'\0' * padding_needed
fec_offset = image.image_size
image.append_raw(fec_data_with_padding)
len_hashtree_and_fec += len(fec_data_with_padding)
# Update the hashtree descriptor.
ht_desc.fec_num_roots = fec_num_roots
ht_desc.fec_offset = fec_offset
ht_desc.fec_size = len(fec_data)
ht_desc_to_setup = None
if setup_as_rootfs_from_kernel:
ht_desc_to_setup = ht_desc
# Generate the VBMeta footer and add padding as needed.
vbmeta_offset = tree_offset + len_hashtree_and_fec
vbmeta_blob = self._generate_vbmeta_blob(
algorithm_name, key_path, public_key_metadata_path, [ht_desc],
chain_partitions, rollback_index, flags, rollback_index_location,
props, props_from_file,
kernel_cmdlines, setup_rootfs_from_kernel, ht_desc_to_setup,
include_descriptors_from_image, signing_helper,
signing_helper_with_files, release_string,
append_to_release_string, required_libavb_version_minor)
padding_needed = (round_to_multiple(len(vbmeta_blob), image.block_size) -
len(vbmeta_blob))
vbmeta_blob_with_padding = vbmeta_blob + b'\0' * padding_needed
# Write vbmeta blob, if requested.
if output_vbmeta_image:
output_vbmeta_image.write(vbmeta_blob)
# Append vbmeta blob and footer, unless requested not to.
if not do_not_append_vbmeta_image:
image.append_raw(vbmeta_blob_with_padding)
# Now insert a DONT_CARE chunk with enough bytes such that the
# final Footer block is at the end of partition_size..
if partition_size > 0:
image.append_dont_care(partition_size - image.image_size -
1 * image.block_size)
# Generate the Footer that tells where the VBMeta footer
# is. Also put enough padding in the front of the footer since
# we'll write out an entire block.
footer = AvbFooter()
footer.original_image_size = original_image_size
footer.vbmeta_offset = vbmeta_offset
footer.vbmeta_size = len(vbmeta_blob)
footer_blob = footer.encode()
footer_blob_with_padding = (
b'\0' * (image.block_size - AvbFooter.SIZE) + footer_blob)
image.append_raw(footer_blob_with_padding)
except Exception as e:
# Truncate back to original size, then re-raise.
image.truncate(original_image_size)
raise AvbError('Adding hashtree_footer failed: {}.'.format(e))
def make_atx_certificate(self, output, authority_key_path, subject_key_path,
subject_key_version, subject,
is_intermediate_authority, usage, signing_helper,
signing_helper_with_files):
"""Implements the 'make_atx_certificate' command.
Android Things certificates are required for Android Things public key
metadata. They chain the vbmeta signing key for a particular product back to
a fused, permanent root key. These certificates are fixed-length and fixed-
format with the explicit goal of not parsing ASN.1 in bootloader code.
Arguments:
output: Certificate will be written to this file on success.
authority_key_path: A PEM file path with the authority private key.
If None, then a certificate will be created without a
signature. The signature can be created out-of-band
and appended.
subject_key_path: Path to a PEM or DER subject public key.
subject_key_version: A 64-bit version value. If this is None, the number
of seconds since the epoch is used.
subject: A subject identifier. For Product Signing Key certificates this
should be the same Product ID found in the permanent attributes.
is_intermediate_authority: True if the certificate is for an intermediate
authority.
usage: If not empty, overrides the cert usage with a hash of this value.
signing_helper: Program which signs a hash and returns the signature.
signing_helper_with_files: Same as signing_helper but uses files instead.
Raises:
AvbError: If there an error during signing.
"""
signed_data = bytearray()
signed_data.extend(struct.pack('<I', 1)) # Format Version
signed_data.extend(RSAPublicKey(subject_key_path).encode())
hasher = hashlib.sha256()
hasher.update(subject)
signed_data.extend(hasher.digest())
if not usage:
usage = 'com.google.android.things.vboot'
if is_intermediate_authority:
usage += '.ca'
hasher = hashlib.sha256()
hasher.update(usage.encode('ascii'))
signed_data.extend(hasher.digest())
if subject_key_version is None:
subject_key_version = int(time.time())
signed_data.extend(struct.pack('<Q', subject_key_version))
signature = b''
if authority_key_path:
rsa_key = RSAPublicKey(authority_key_path)
algorithm_name = 'SHA512_RSA4096'
signature = rsa_key.sign(algorithm_name, signed_data, signing_helper,
signing_helper_with_files)
output.write(signed_data)
output.write(signature)
def make_atx_permanent_attributes(self, output, root_authority_key_path,
product_id):
"""Implements the 'make_atx_permanent_attributes' command.
Android Things permanent attributes are designed to be permanent for a
particular product and a hash of these attributes should be fused into
hardware to enforce this.
Arguments:
output: Attributes will be written to this file on success.
root_authority_key_path: Path to a PEM or DER public key for
the root authority.
product_id: A 16-byte Product ID.
Raises:
AvbError: If an argument is incorrect.
"""
EXPECTED_PRODUCT_ID_SIZE = 16 # pylint: disable=invalid-name
if len(product_id) != EXPECTED_PRODUCT_ID_SIZE:
raise AvbError('Invalid Product ID length.')
output.write(struct.pack('<I', 1)) # Format Version
output.write(RSAPublicKey(root_authority_key_path).encode())
output.write(product_id)
def make_atx_metadata(self, output, intermediate_key_certificate,
product_key_certificate):
"""Implements the 'make_atx_metadata' command.
Android Things metadata are included in vbmeta images to facilitate
verification. The output of this command can be used as the
public_key_metadata argument to other commands.
Arguments:
output: Metadata will be written to this file on success.
intermediate_key_certificate: A certificate file as output by
make_atx_certificate with
is_intermediate_authority set to true.
product_key_certificate: A certificate file as output by
make_atx_certificate with
is_intermediate_authority set to false.
Raises:
AvbError: If an argument is incorrect.
"""
EXPECTED_CERTIFICATE_SIZE = 1620 # pylint: disable=invalid-name
if len(intermediate_key_certificate) != EXPECTED_CERTIFICATE_SIZE:
raise AvbError('Invalid intermediate key certificate length.')
if len(product_key_certificate) != EXPECTED_CERTIFICATE_SIZE:
raise AvbError('Invalid product key certificate length.')
output.write(struct.pack('<I', 1)) # Format Version
output.write(intermediate_key_certificate)
output.write(product_key_certificate)
def make_atx_unlock_credential(self, output, intermediate_key_certificate,
unlock_key_certificate, challenge_path,
unlock_key_path, signing_helper,
signing_helper_with_files):
"""Implements the 'make_atx_unlock_credential' command.
Android Things unlock credentials can be used to authorize the unlock of AVB
on a device. These credentials are presented to an Android Things bootloader
via the fastboot interface in response to a 16-byte challenge. This method
creates all fields of the credential except the challenge signature field
(which is the last field) and can optionally create the challenge signature
field as well if a challenge and the unlock_key_path is provided.
Arguments:
output: The credential will be written to this file on success.
intermediate_key_certificate: A certificate file as output by
make_atx_certificate with
is_intermediate_authority set to true.
unlock_key_certificate: A certificate file as output by
make_atx_certificate with
is_intermediate_authority set to false and the
usage set to
'com.google.android.things.vboot.unlock'.
challenge_path: [optional] A path to the challenge to sign.
unlock_key_path: [optional] A PEM file path with the unlock private key.
signing_helper: Program which signs a hash and returns the signature.
signing_helper_with_files: Same as signing_helper but uses files instead.
Raises:
AvbError: If an argument is incorrect or an error occurs during signing.
"""
EXPECTED_CERTIFICATE_SIZE = 1620 # pylint: disable=invalid-name
EXPECTED_CHALLENGE_SIZE = 16 # pylint: disable=invalid-name
if len(intermediate_key_certificate) != EXPECTED_CERTIFICATE_SIZE:
raise AvbError('Invalid intermediate key certificate length.')
if len(unlock_key_certificate) != EXPECTED_CERTIFICATE_SIZE:
raise AvbError('Invalid product key certificate length.')
challenge = b''
if challenge_path:
with open(challenge_path, 'rb') as f:
challenge = f.read()
if len(challenge) != EXPECTED_CHALLENGE_SIZE:
raise AvbError('Invalid unlock challenge length.')
output.write(struct.pack('<I', 1)) # Format Version
output.write(intermediate_key_certificate)
output.write(unlock_key_certificate)
if challenge_path and unlock_key_path:
rsa_key = RSAPublicKey(unlock_key_path)
algorithm_name = 'SHA512_RSA4096'
signature = rsa_key.sign(algorithm_name, challenge, signing_helper,
signing_helper_with_files)
output.write(signature)
def calc_hash_level_offsets(image_size, block_size, digest_size):
"""Calculate the offsets of all the hash-levels in a Merkle-tree.
Arguments:
image_size: The size of the image to calculate a Merkle-tree for.
block_size: The block size, e.g. 4096.
digest_size: The size of each hash, e.g. 32 for SHA-256.
Returns:
A tuple where the first argument is an array of offsets and the
second is size of the tree, in bytes.
"""
level_offsets = []
level_sizes = []
tree_size = 0
num_levels = 0
size = image_size
while size > block_size:
num_blocks = (size + block_size - 1) // block_size
level_size = round_to_multiple(num_blocks * digest_size, block_size)
level_sizes.append(level_size)
tree_size += level_size
num_levels += 1
size = level_size
for n in range(0, num_levels):
offset = 0
for m in range(n + 1, num_levels):
offset += level_sizes[m]
level_offsets.append(offset)
return level_offsets, tree_size
# See system/extras/libfec/include/fec/io.h for these definitions.
FEC_FOOTER_FORMAT = '<LLLLLQ32s'
FEC_MAGIC = 0xfecfecfe
def calc_fec_data_size(image_size, num_roots):
"""Calculates how much space FEC data will take.
Arguments:
image_size: The size of the image.
num_roots: Number of roots.
Returns:
The number of bytes needed for FEC for an image of the given size
and with the requested number of FEC roots.
Raises:
ValueError: If output from the 'fec' tool is invalid.
"""
p = subprocess.Popen(
['fec', '--print-fec-size', str(image_size), '--roots', str(num_roots)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(pout, perr) = p.communicate()
retcode = p.wait()
if retcode != 0:
raise ValueError('Error invoking fec: {}'.format(perr))
return int(pout)
def generate_fec_data(image_filename, num_roots):
"""Generate FEC codes for an image.
Arguments:
image_filename: The filename of the image.
num_roots: Number of roots.
Returns:
The FEC data blob as bytes.
Raises:
ValueError: If calling the 'fec' tool failed or the output is invalid.
"""
with tempfile.NamedTemporaryFile() as fec_tmpfile:
try:
subprocess.check_call(
['fec', '--encode', '--roots', str(num_roots), image_filename,
fec_tmpfile.name],
stderr=open(os.devnull, 'wb'))
except subprocess.CalledProcessError as e:
raise ValueError('Execution of \'fec\' tool failed: {}.'.format(e))
fec_data = fec_tmpfile.read()
footer_size = struct.calcsize(FEC_FOOTER_FORMAT)
footer_data = fec_data[-footer_size:]
(magic, _, _, num_roots, fec_size, _, _) = struct.unpack(FEC_FOOTER_FORMAT,
footer_data)
if magic != FEC_MAGIC:
raise ValueError('Unexpected magic in FEC footer')
return fec_data[0:fec_size]
def generate_hash_tree(image, image_size, block_size, hash_alg_name, salt,
digest_padding, hash_level_offsets, tree_size):
"""Generates a Merkle-tree for a file.
Arguments:
image: The image, as a file.
image_size: The size of the image.
block_size: The block size, e.g. 4096.
hash_alg_name: The hash algorithm, e.g. 'sha256' or 'sha1'.
salt: The salt to use.
digest_padding: The padding for each digest.
hash_level_offsets: The offsets from calc_hash_level_offsets().
tree_size: The size of the tree, in number of bytes.
Returns:
A tuple where the first element is the top-level hash as bytes and the
second element is the hash-tree as bytes.
"""
hash_ret = bytearray(tree_size)
hash_src_offset = 0
hash_src_size = image_size
level_num = 0
while hash_src_size > block_size:
level_output_list = []
remaining = hash_src_size
while remaining > 0:
hasher = hashlib.new(hash_alg_name, salt)
# Only read from the file for the first level - for subsequent
# levels, access the array we're building.
if level_num == 0:
image.seek(hash_src_offset + hash_src_size - remaining)
data = image.read(min(remaining, block_size))
else:
offset = hash_level_offsets[level_num - 1] + hash_src_size - remaining
data = hash_ret[offset:offset + block_size]
hasher.update(data)
remaining -= len(data)
if len(data) < block_size:
hasher.update(b'\0' * (block_size - len(data)))
level_output_list.append(hasher.digest())
if digest_padding > 0:
level_output_list.append(b'\0' * digest_padding)
level_output = b''.join(level_output_list)
padding_needed = (round_to_multiple(
len(level_output), block_size) - len(level_output))
level_output += b'\0' * padding_needed
# Copy level-output into resulting tree.
offset = hash_level_offsets[level_num]
hash_ret[offset:offset + len(level_output)] = level_output
# Continue on to the next level.
hash_src_size = len(level_output)
level_num += 1
hasher = hashlib.new(hash_alg_name, salt)
hasher.update(level_output)
return hasher.digest(), bytes(hash_ret)
class AvbTool(object):
"""Object for avbtool command-line tool."""
def __init__(self):
"""Initializer method."""
self.avb = Avb()
def _add_common_args(self, sub_parser):
"""Adds arguments used by several sub-commands.
Arguments:
sub_parser: The parser to add arguments to.
"""
sub_parser.add_argument('--algorithm',
help='Algorithm to use (default: NONE)',
metavar='ALGORITHM',
default='NONE')
sub_parser.add_argument('--key',
help='Path to RSA private key file',
metavar='KEY',
required=False)
sub_parser.add_argument('--signing_helper',
help='Path to helper used for signing',
metavar='APP',
default=None,
required=False)
sub_parser.add_argument('--signing_helper_with_files',
help='Path to helper used for signing using files',
metavar='APP',
default=None,
required=False)
sub_parser.add_argument('--public_key_metadata',
help='Path to public key metadata file',
metavar='KEY_METADATA',
required=False)
sub_parser.add_argument('--rollback_index',
help='Rollback Index',
type=parse_number,
default=0)
sub_parser.add_argument('--rollback_index_location',
help='Location of main vbmeta Rollback Index',
type=parse_number,
default=0)
# This is used internally for unit tests. Do not include in --help output.
sub_parser.add_argument('--internal_release_string',
help=argparse.SUPPRESS)
sub_parser.add_argument('--append_to_release_string',
help='Text to append to release string',
metavar='STR')
sub_parser.add_argument('--prop',
help='Add property',
metavar='KEY:VALUE',
action='append')
sub_parser.add_argument('--prop_from_file',
help='Add property from file',
metavar='KEY:PATH',
action='append')
sub_parser.add_argument('--kernel_cmdline',
help='Add kernel cmdline',
metavar='CMDLINE',
action='append')
# TODO(zeuthen): the --setup_rootfs_from_kernel option used to be called
# --generate_dm_verity_cmdline_from_hashtree. Remove support for the latter
# at some future point.
sub_parser.add_argument('--setup_rootfs_from_kernel',
'--generate_dm_verity_cmdline_from_hashtree',
metavar='IMAGE',
help='Adds kernel cmdline to set up IMAGE',
type=argparse.FileType('rb'))
sub_parser.add_argument('--include_descriptors_from_image',
help='Include descriptors from image',
metavar='IMAGE',
action='append',
type=argparse.FileType('rb'))
sub_parser.add_argument('--print_required_libavb_version',
help=('Don\'t store the footer - '
'instead calculate the required libavb '
'version for the given options.'),
action='store_true')
# These are only allowed from top-level vbmeta and boot-in-lieu-of-vbmeta.
sub_parser.add_argument('--chain_partition',
help='Allow signed integrity-data for partition',
metavar='PART_NAME:ROLLBACK_SLOT:KEY_PATH',
action='append')
sub_parser.add_argument('--flags',
help='VBMeta flags',
type=parse_number,
default=0)
sub_parser.add_argument('--set_hashtree_disabled_flag',
help='Set the HASHTREE_DISABLED flag',
action='store_true')
def _add_common_footer_args(self, sub_parser):
"""Adds arguments used by add_*_footer sub-commands.
Arguments:
sub_parser: The parser to add arguments to.
"""
sub_parser.add_argument('--use_persistent_digest',
help='Use a persistent digest on device instead of '
'storing the digest in the descriptor. This '
'cannot be used with A/B so must be combined '
'with --do_not_use_ab when an A/B suffix is '
'expected at runtime.',
action='store_true')
sub_parser.add_argument('--do_not_use_ab',
help='The partition does not use A/B even when an '
'A/B suffix is present. This must not be used '
'for vbmeta or chained partitions.',
action='store_true')
def _fixup_common_args(self, args):
"""Common fixups needed by subcommands.
Arguments:
args: Arguments to modify.
Returns:
The modified arguments.
"""
if args.set_hashtree_disabled_flag:
args.flags |= AVB_VBMETA_IMAGE_FLAGS_HASHTREE_DISABLED
return args
def run(self, argv):
"""Command-line processor.
Arguments:
argv: Pass sys.argv from main.
"""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(title='subcommands')
sub_parser = subparsers.add_parser(
'generate_test_image',
help=('Generates a test image with a known pattern for testing: '
'0x00 0x01 0x02 ... 0xff 0x00 0x01 ...'))
sub_parser.add_argument('--image_size',
help='Size of image to generate.',
type=parse_number,
required=True)
sub_parser.add_argument('--start_byte',
help='Integer for the start byte of the pattern.',
type=parse_number,
default=0)
sub_parser.add_argument('--output',
help='Output file name.',
type=argparse.FileType('wb'),
default=sys.stdout)
sub_parser.set_defaults(func=self.generate_test_image)
sub_parser = subparsers.add_parser('version',
help='Prints version of avbtool.')
sub_parser.set_defaults(func=self.version)
sub_parser = subparsers.add_parser('extract_public_key',
help='Extract public key.')
sub_parser.add_argument('--key',
help='Path to RSA private key file',
required=True)
sub_parser.add_argument('--output',
help='Output file name',
type=argparse.FileType('wb'),
required=True)
sub_parser.set_defaults(func=self.extract_public_key)
sub_parser = subparsers.add_parser('make_vbmeta_image',
help='Makes a vbmeta image.')
sub_parser.add_argument('--output',
help='Output file name',
type=argparse.FileType('wb'))
sub_parser.add_argument('--padding_size',
metavar='NUMBER',
help='If non-zero, pads output with NUL bytes so '
'its size is a multiple of NUMBER '
'(default: 0)',
type=parse_number,
default=0)
self._add_common_args(sub_parser)
sub_parser.set_defaults(func=self.make_vbmeta_image)
sub_parser = subparsers.add_parser('add_hash_footer',
help='Add hashes and footer to image.')
sub_parser.add_argument('--image',
help='Image to add hashes to',
type=argparse.FileType('rb+'))
sub_parser.add_argument('--partition_size',
help='Partition size',
type=parse_number)
sub_parser.add_argument('--partition_name',
help='Partition name',
default=None)
sub_parser.add_argument('--hash_algorithm',
help='Hash algorithm to use (default: sha256)',
default='sha256')
sub_parser.add_argument('--salt',
help='Salt in hex (default: /dev/urandom)')
sub_parser.add_argument('--calc_max_image_size',
help=('Don\'t store the footer - '
'instead calculate the maximum image size '
'leaving enough room for metadata with '
'the given partition size.'),
action='store_true')
sub_parser.add_argument('--output_vbmeta_image',
help='Also write vbmeta struct to file',
type=argparse.FileType('wb'))
sub_parser.add_argument('--do_not_append_vbmeta_image',
help=('Do not append vbmeta struct or footer '
'to the image'),
action='store_true')
self._add_common_args(sub_parser)
self._add_common_footer_args(sub_parser)
sub_parser.set_defaults(func=self.add_hash_footer)
sub_parser = subparsers.add_parser('append_vbmeta_image',
help='Append vbmeta image to image.')
sub_parser.add_argument('--image',
help='Image to append vbmeta blob to',
type=argparse.FileType('rb+'))
sub_parser.add_argument('--partition_size',
help='Partition size',
type=parse_number,
required=True)
sub_parser.add_argument('--vbmeta_image',
help='Image with vbmeta blob to append',
type=argparse.FileType('rb'))
sub_parser.set_defaults(func=self.append_vbmeta_image)
sub_parser = subparsers.add_parser(
'add_hashtree_footer',
help='Add hashtree and footer to image.')
sub_parser.add_argument('--image',
help='Image to add hashtree to',
type=argparse.FileType('rb+'))
sub_parser.add_argument('--partition_size',
help='Partition size',
default=0,
type=parse_number)
sub_parser.add_argument('--partition_name',
help='Partition name',
default='')
sub_parser.add_argument('--hash_algorithm',
help='Hash algorithm to use (default: sha1)',
default='sha1')
sub_parser.add_argument('--salt',
help='Salt in hex (default: /dev/urandom)')
sub_parser.add_argument('--block_size',
help='Block size (default: 4096)',
type=parse_number,
default=4096)
# TODO(zeuthen): The --generate_fec option was removed when we
# moved to generating FEC by default. To avoid breaking existing
# users needing to transition we simply just print a warning below
# in add_hashtree_footer(). Remove this option and the warning at
# some point in the future.
sub_parser.add_argument('--generate_fec',
help=argparse.SUPPRESS,
action='store_true')
sub_parser.add_argument(
'--do_not_generate_fec',
help='Do not generate forward-error-correction codes',
action='store_true')
sub_parser.add_argument('--fec_num_roots',
help='Number of roots for FEC (default: 2)',
type=parse_number,
default=2)
sub_parser.add_argument('--calc_max_image_size',
help=('Don\'t store the hashtree or footer - '
'instead calculate the maximum image size '
'leaving enough room for hashtree '
'and metadata with the given partition '
'size.'),
action='store_true')
sub_parser.add_argument('--output_vbmeta_image',
help='Also write vbmeta struct to file',
type=argparse.FileType('wb'))
sub_parser.add_argument('--do_not_append_vbmeta_image',
help=('Do not append vbmeta struct or footer '
'to the image'),
action='store_true')
# This is different from --setup_rootfs_from_kernel insofar that
# it doesn't take an IMAGE, the generated cmdline will be for the
# hashtree we're adding.
sub_parser.add_argument('--setup_as_rootfs_from_kernel',
action='store_true',
help='Adds kernel cmdline for setting up rootfs')
sub_parser.add_argument('--no_hashtree',
action='store_true',
help='Do not append hashtree')
self._add_common_args(sub_parser)
self._add_common_footer_args(sub_parser)
sub_parser.set_defaults(func=self.add_hashtree_footer)
sub_parser = subparsers.add_parser('erase_footer',
help='Erase footer from an image.')
sub_parser.add_argument('--image',
help='Image with a footer',
type=argparse.FileType('rb+'),
required=True)
sub_parser.add_argument('--keep_hashtree',
help='Keep the hashtree and FEC in the image',
action='store_true')
sub_parser.set_defaults(func=self.erase_footer)
sub_parser = subparsers.add_parser('zero_hashtree',
help='Zero out hashtree and FEC data.')
sub_parser.add_argument('--image',
help='Image with a footer',
type=argparse.FileType('rb+'),
required=True)
sub_parser.set_defaults(func=self.zero_hashtree)
sub_parser = subparsers.add_parser(
'extract_vbmeta_image',
help='Extracts vbmeta from an image with a footer.')
sub_parser.add_argument('--image',
help='Image with footer',
type=argparse.FileType('rb'),
required=True)
sub_parser.add_argument('--output',
help='Output file name',
type=argparse.FileType('wb'))
sub_parser.add_argument('--padding_size',
metavar='NUMBER',
help='If non-zero, pads output with NUL bytes so '
'its size is a multiple of NUMBER '
'(default: 0)',
type=parse_number,
default=0)
sub_parser.set_defaults(func=self.extract_vbmeta_image)
sub_parser = subparsers.add_parser('resize_image',
help='Resize image with a footer.')
sub_parser.add_argument('--image',
help='Image with a footer',
type=argparse.FileType('rb+'),
required=True)
sub_parser.add_argument('--partition_size',
help='New partition size',
type=parse_number)
sub_parser.set_defaults(func=self.resize_image)
sub_parser = subparsers.add_parser(
'info_image',
help='Show information about vbmeta or footer.')
sub_parser.add_argument('--image',
help='Image to show information about',
type=argparse.FileType('rb'),
required=True)
sub_parser.add_argument('--output',
help='Write info to file',
type=argparse.FileType('wt'),
default=sys.stdout)
sub_parser.set_defaults(func=self.info_image)
sub_parser = subparsers.add_parser(
'verify_image',
help='Verify an image.')
sub_parser.add_argument('--image',
help='Image to verify',
type=argparse.FileType('rb'),
required=True)
sub_parser.add_argument('--key',
help='Check embedded public key matches KEY',
metavar='KEY',
required=False)
sub_parser.add_argument('--expected_chain_partition',
help='Expected chain partition',
metavar='PART_NAME:ROLLBACK_SLOT:KEY_PATH',
action='append')
sub_parser.add_argument(
'--follow_chain_partitions',
help=('Follows chain partitions even when not '
'specified with the --expected_chain_partition option'),
action='store_true')
sub_parser.add_argument(
'--accept_zeroed_hashtree',
help=('Accept images where the hashtree or FEC data is zeroed out'),
action='store_true')
sub_parser.set_defaults(func=self.verify_image)
sub_parser = subparsers.add_parser(
'print_partition_digests',
help='Prints partition digests.')
sub_parser.add_argument('--image',
help='Image to print partition digests from',
type=argparse.FileType('rb'),
required=True)
sub_parser.add_argument('--output',
help='Write info to file',
type=argparse.FileType('wt'),
default=sys.stdout)
sub_parser.add_argument('--json',
help=('Print output as JSON'),
action='store_true')
sub_parser.set_defaults(func=self.print_partition_digests)
sub_parser = subparsers.add_parser(
'calculate_vbmeta_digest',
help='Calculate vbmeta digest.')
sub_parser.add_argument('--image',
help='Image to calculate digest for',
type=argparse.FileType('rb'),
required=True)
sub_parser.add_argument('--hash_algorithm',
help='Hash algorithm to use (default: sha256)',
default='sha256')
sub_parser.add_argument('--output',
help='Write hex digest to file (default: stdout)',
type=argparse.FileType('wt'),
default=sys.stdout)
sub_parser.set_defaults(func=self.calculate_vbmeta_digest)
sub_parser = subparsers.add_parser(
'calculate_kernel_cmdline',
help='Calculate kernel cmdline.')
sub_parser.add_argument('--image',
help='Image to calculate kernel cmdline for',
type=argparse.FileType('rb'),
required=True)
sub_parser.add_argument('--hashtree_disabled',
help='Return the cmdline for hashtree disabled',
action='store_true')
sub_parser.add_argument('--output',
help='Write cmdline to file (default: stdout)',
type=argparse.FileType('wt'),
default=sys.stdout)
sub_parser.set_defaults(func=self.calculate_kernel_cmdline)
sub_parser = subparsers.add_parser('set_ab_metadata',
help='Set A/B metadata.')
sub_parser.add_argument('--misc_image',
help=('The misc image to modify. If the image does '
'not exist, it will be created.'),
type=argparse.FileType('r+b'),
required=True)
sub_parser.add_argument('--slot_data',
help=('Slot data of the form "priority", '
'"tries_remaining", "sucessful_boot" for '
'slot A followed by the same for slot B, '
'separated by colons. The default value '
'is 15:7:0:14:7:0.'),
default='15:7:0:14:7:0')
sub_parser.set_defaults(func=self.set_ab_metadata)
sub_parser = subparsers.add_parser(
'make_atx_certificate',
help='Create an Android Things eXtension (ATX) certificate.')
sub_parser.add_argument('--output',
help='Write certificate to file',
type=argparse.FileType('wb'),
default=sys.stdout)
sub_parser.add_argument('--subject',
help=('Path to subject file'),
type=argparse.FileType('rb'),
required=True)
sub_parser.add_argument('--subject_key',
help=('Path to subject RSA public key file'),
type=argparse.FileType('rb'),
required=True)
sub_parser.add_argument('--subject_key_version',
help=('Version of the subject key'),
type=parse_number,
required=False)
sub_parser.add_argument('--subject_is_intermediate_authority',
help=('Generate an intermediate authority '
'certificate'),
action='store_true')
sub_parser.add_argument('--usage',
help=('Override usage with a hash of the provided '
'string'),
required=False)
sub_parser.add_argument('--authority_key',
help='Path to authority RSA private key file',
required=False)
sub_parser.add_argument('--signing_helper',
help='Path to helper used for signing',
metavar='APP',
default=None,
required=False)
sub_parser.add_argument('--signing_helper_with_files',
help='Path to helper used for signing using files',
metavar='APP',
default=None,
required=False)
sub_parser.set_defaults(func=self.make_atx_certificate)
sub_parser = subparsers.add_parser(
'make_atx_permanent_attributes',
help='Create Android Things eXtension (ATX) permanent attributes.')
sub_parser.add_argument('--output',
help='Write attributes to file',
type=argparse.FileType('wb'),
default=sys.stdout)
sub_parser.add_argument('--root_authority_key',
help='Path to authority RSA public key file',
type=argparse.FileType('rb'),
required=True)
sub_parser.add_argument('--product_id',
help=('Path to Product ID file'),
type=argparse.FileType('rb'),
required=True)
sub_parser.set_defaults(func=self.make_atx_permanent_attributes)
sub_parser = subparsers.add_parser(
'make_atx_metadata',
help='Create Android Things eXtension (ATX) metadata.')
sub_parser.add_argument('--output',
help='Write metadata to file',
type=argparse.FileType('wb'),
default=sys.stdout)
sub_parser.add_argument('--intermediate_key_certificate',
help='Path to intermediate key certificate file',
type=argparse.FileType('rb'),
required=True)
sub_parser.add_argument('--product_key_certificate',
help='Path to product key certificate file',
type=argparse.FileType('rb'),
required=True)
sub_parser.set_defaults(func=self.make_atx_metadata)
sub_parser = subparsers.add_parser(
'make_atx_unlock_credential',
help='Create an Android Things eXtension (ATX) unlock credential.')
sub_parser.add_argument('--output',
help='Write credential to file',
type=argparse.FileType('wb'),
default=sys.stdout)
sub_parser.add_argument('--intermediate_key_certificate',
help='Path to intermediate key certificate file',
type=argparse.FileType('rb'),
required=True)
sub_parser.add_argument('--unlock_key_certificate',
help='Path to unlock key certificate file',
type=argparse.FileType('rb'),
required=True)
sub_parser.add_argument('--challenge',
help='Path to the challenge to sign (optional). If '
'this is not provided the challenge signature '
'field is omitted and can be concatenated '
'later.',
required=False)
sub_parser.add_argument('--unlock_key',
help='Path to unlock key (optional). Must be '
'provided if using --challenge.',
required=False)
sub_parser.add_argument('--signing_helper',
help='Path to helper used for signing',
metavar='APP',
default=None,
required=False)
sub_parser.add_argument('--signing_helper_with_files',
help='Path to helper used for signing using files',
metavar='APP',
default=None,
required=False)
sub_parser.set_defaults(func=self.make_atx_unlock_credential)
args = parser.parse_args(argv[1:])
try:
args.func(args)
except AttributeError:
# This error gets raised when the command line tool is called without any
# arguments. It mimics the original Python 2 behavior.
parser.print_usage()
print('avbtool: error: too few arguments')
sys.exit(2)
except AvbError as e:
sys.stderr.write('{}: {}\n'.format(argv[0], str(e)))
sys.exit(1)
def version(self, _):
"""Implements the 'version' sub-command."""
print(get_release_string())
def generate_test_image(self, args):
"""Implements the 'generate_test_image' sub-command."""
self.avb.generate_test_image(args.output, args.image_size, args.start_byte)
def extract_public_key(self, args):
"""Implements the 'extract_public_key' sub-command."""
self.avb.extract_public_key(args.key, args.output)
def make_vbmeta_image(self, args):
"""Implements the 'make_vbmeta_image' sub-command."""
args = self._fixup_common_args(args)
self.avb.make_vbmeta_image(args.output, args.chain_partition,
args.algorithm, args.key,
args.public_key_metadata, args.rollback_index,
args.flags, args.rollback_index_location,
args.prop, args.prop_from_file,
args.kernel_cmdline,
args.setup_rootfs_from_kernel,
args.include_descriptors_from_image,
args.signing_helper,
args.signing_helper_with_files,
args.internal_release_string,
args.append_to_release_string,
args.print_required_libavb_version,
args.padding_size)
def append_vbmeta_image(self, args):
"""Implements the 'append_vbmeta_image' sub-command."""
self.avb.append_vbmeta_image(args.image.name, args.vbmeta_image.name,
args.partition_size)
def add_hash_footer(self, args):
"""Implements the 'add_hash_footer' sub-command."""
args = self._fixup_common_args(args)
self.avb.add_hash_footer(args.image.name if args.image else None,
args.partition_size,
args.partition_name, args.hash_algorithm,
args.salt, args.chain_partition, args.algorithm,
args.key,
args.public_key_metadata, args.rollback_index,
args.flags, args.rollback_index_location,
args.prop, args.prop_from_file,
args.kernel_cmdline,
args.setup_rootfs_from_kernel,
args.include_descriptors_from_image,
args.calc_max_image_size,
args.signing_helper,
args.signing_helper_with_files,
args.internal_release_string,
args.append_to_release_string,
args.output_vbmeta_image,
args.do_not_append_vbmeta_image,
args.print_required_libavb_version,
args.use_persistent_digest,
args.do_not_use_ab)
def add_hashtree_footer(self, args):
"""Implements the 'add_hashtree_footer' sub-command."""
args = self._fixup_common_args(args)
# TODO(zeuthen): Remove when removing support for the
# '--generate_fec' option above.
if args.generate_fec:
sys.stderr.write('The --generate_fec option is deprecated since FEC '
'is now generated by default. Use the option '
'--do_not_generate_fec to not generate FEC.\n')
self.avb.add_hashtree_footer(
args.image.name if args.image else None,
args.partition_size,
args.partition_name,
not args.do_not_generate_fec, args.fec_num_roots,
args.hash_algorithm, args.block_size,
args.salt, args.chain_partition, args.algorithm,
args.key, args.public_key_metadata,
args.rollback_index, args.flags,
args.rollback_index_location, args.prop,
args.prop_from_file,
args.kernel_cmdline,
args.setup_rootfs_from_kernel,
args.setup_as_rootfs_from_kernel,
args.include_descriptors_from_image,
args.calc_max_image_size,
args.signing_helper,
args.signing_helper_with_files,
args.internal_release_string,
args.append_to_release_string,
args.output_vbmeta_image,
args.do_not_append_vbmeta_image,
args.print_required_libavb_version,
args.use_persistent_digest,
args.do_not_use_ab,
args.no_hashtree)
def erase_footer(self, args):
"""Implements the 'erase_footer' sub-command."""
self.avb.erase_footer(args.image.name, args.keep_hashtree)
def zero_hashtree(self, args):
"""Implements the 'zero_hashtree' sub-command."""
self.avb.zero_hashtree(args.image.name)
def extract_vbmeta_image(self, args):
"""Implements the 'extract_vbmeta_image' sub-command."""
self.avb.extract_vbmeta_image(args.output, args.image.name,
args.padding_size)
def resize_image(self, args):
"""Implements the 'resize_image' sub-command."""
self.avb.resize_image(args.image.name, args.partition_size)
def set_ab_metadata(self, args):
"""Implements the 'set_ab_metadata' sub-command."""
self.avb.set_ab_metadata(args.misc_image, args.slot_data)
def info_image(self, args):
"""Implements the 'info_image' sub-command."""
self.avb.info_image(args.image.name, args.output)
def verify_image(self, args):
"""Implements the 'verify_image' sub-command."""
self.avb.verify_image(args.image.name, args.key,
args.expected_chain_partition,
args.follow_chain_partitions,
args.accept_zeroed_hashtree)
def print_partition_digests(self, args):
"""Implements the 'print_partition_digests' sub-command."""
self.avb.print_partition_digests(args.image.name, args.output, args.json)
def calculate_vbmeta_digest(self, args):
"""Implements the 'calculate_vbmeta_digest' sub-command."""
self.avb.calculate_vbmeta_digest(args.image.name, args.hash_algorithm,
args.output)
def calculate_kernel_cmdline(self, args):
"""Implements the 'calculate_kernel_cmdline' sub-command."""
self.avb.calculate_kernel_cmdline(args.image.name, args.hashtree_disabled,
args.output)
def make_atx_certificate(self, args):
"""Implements the 'make_atx_certificate' sub-command."""
self.avb.make_atx_certificate(args.output, args.authority_key,
args.subject_key.name,
args.subject_key_version,
args.subject.read(),
args.subject_is_intermediate_authority,
args.usage,
args.signing_helper,
args.signing_helper_with_files)
def make_atx_permanent_attributes(self, args):
"""Implements the 'make_atx_permanent_attributes' sub-command."""
self.avb.make_atx_permanent_attributes(args.output,
args.root_authority_key.name,
args.product_id.read())
def make_atx_metadata(self, args):
"""Implements the 'make_atx_metadata' sub-command."""
self.avb.make_atx_metadata(args.output,
args.intermediate_key_certificate.read(),
args.product_key_certificate.read())
def make_atx_unlock_credential(self, args):
"""Implements the 'make_atx_unlock_credential' sub-command."""
self.avb.make_atx_unlock_credential(
args.output,
args.intermediate_key_certificate.read(),
args.unlock_key_certificate.read(),
args.challenge,
args.unlock_key,
args.signing_helper,
args.signing_helper_with_files)
if __name__ == '__main__':
if AVB_INVOCATION_LOGFILE:
with open(AVB_INVOCATION_LOGFILE, 'a') as log:
log.write(' '.join(sys.argv))
log.write('\n')
tool = AvbTool()
tool.run(sys.argv)
|
py | 7dfbbc456a7ae371569822e0c6458f9b1e921273 | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 18 10:34:30 2016
@author: dahoiv
"""
import os
import sqlite3
import ConvertDataToDB
from img_data import img_data
import image_registration
import util
def save_to_db(image_ids, ny_image_ids):
print("----here")
data_transforms = []
for (img_id, ny_img_id) in zip(image_ids, ny_image_ids):
print(img_id)
_img = img_data(img_id, db_path, util.TEMP_FOLDER_PATH)
_img.load_db_transforms()
print(_img.transform)
if _img.transform is None:
continue
_img.processed_filepath = image_registration.move_vol(_img.img_filepath, _img.get_transforms())
_img.image_id = ny_img_id
data_transforms.append(_img)
image_registration.save_transform_to_database(data_transforms)
if __name__ == "__main__":
os.nice(19)
util.setup("temp_convert/", "LGG")
util.mkdir_p(util.TEMP_FOLDER_PATH)
util.DATA_FOLDER = "/mnt/dokumneter/data/database/"
if True:
db_path = "/home/dahoiv/disk/data/database3/LGG/"
util.DATA_FOLDER = util.DATA_FOLDER + "LGG" + "/"
util.DB_PATH = util.DATA_FOLDER + "brainSegmentation.db"
convert_table_inv = ConvertDataToDB.get_convert_table('/home/dahoiv/disk/data/Segmentations/NY_PID_LGG segmentert.xlsx')
convert_table = {v: k for k, v in convert_table_inv.items()}
print(convert_table)
print(util.DB_PATH)
conn = sqlite3.connect(util.DB_PATH)
conn.text_factory = str
cursor = conn.execute('''SELECT pid from Patient''')
conn2 = sqlite3.connect(db_path + "brainSegmentation.db")
conn2.text_factory = str
image_ids = []
ny_image_ids = []
for row in cursor:
# print(row)
ny_pid = row[0]
try:
old_pid = int(convert_table_inv[str(ny_pid)])
except Exception:
continue
cursor2 = conn2.execute('''SELECT id from Images where pid = ? AND diag_pre_post = ?''', (old_pid, "pre"))
for _id in cursor2:
image_ids.append(_id[0])
cursor2.close()
cursor2 = conn.execute('''SELECT id from Images where pid = ? AND diag_pre_post = ?''', (ny_pid, "pre"))
for _id in cursor2:
ny_image_ids.append(_id[0])
cursor2.close()
cursor.close()
conn.close()
print(ny_image_ids, image_ids)
save_to_db(image_ids, ny_image_ids)
if False:
util.setup("temp_convert/", "GBM")
db_path = "/home/dahoiv/disk/data/database/GBM/"
util.DATA_FOLDER = util.DATA_FOLDER + "GBM" + "/"
util.DB_PATH = util.DATA_FOLDER + "brainSegmentation.db"
import do_img_registration_GBM
image_ids = do_img_registration_GBM.find_images()
ny_image_ids = image_ids
save_to_db(image_ids, ny_image_ids)
|
py | 7dfbbce3bd514a1fdbfa7fa6811e8bd3cc071ed6 | from bisect import bisect_left
from bisect import bisect_right
from contextlib import contextmanager
from copy import deepcopy
from functools import wraps
from inspect import isclass
import calendar
import collections
import datetime
import decimal
import hashlib
import itertools
import logging
import operator
import re
import socket
import struct
import sys
import threading
import time
import uuid
import warnings
try:
from pysqlite3 import dbapi2 as pysq3
except ImportError:
try:
from pysqlite2 import dbapi2 as pysq3
except ImportError:
pysq3 = None
try:
import sqlite3
except ImportError:
sqlite3 = pysq3
else:
if pysq3 and pysq3.sqlite_version_info >= sqlite3.sqlite_version_info:
sqlite3 = pysq3
try:
from psycopg2cffi import compat
compat.register()
except ImportError:
pass
try:
import psycopg2
from psycopg2 import extensions as pg_extensions
except ImportError:
psycopg2 = None
try:
import MySQLdb as mysql # prefer the C module.
except ImportError:
try:
import pymysql as mysql
except ImportError:
mysql = None
__version__ = '3.1.5'
__all__ = [
'AsIs',
'AutoField',
'BareField',
'BigAutoField',
'BigBitField',
'BigIntegerField',
'BitField',
'BlobField',
'BooleanField',
'Case',
'Cast',
'CharField',
'Check',
'Column',
'CompositeKey',
'Context',
'Database',
'DatabaseError',
'DataError',
'DateField',
'DateTimeField',
'DecimalField',
'DeferredForeignKey',
'DeferredThroughModel',
'DJANGO_MAP',
'DoesNotExist',
'DoubleField',
'DQ',
'Field',
'FixedCharField',
'FloatField',
'fn',
'ForeignKeyField',
'ImproperlyConfigured',
'Index',
'IntegerField',
'IntegrityError',
'InterfaceError',
'InternalError',
'IPField',
'JOIN',
'ManyToManyField',
'Model',
'ModelIndex',
'MySQLDatabase',
'NotSupportedError',
'OP',
'OperationalError',
'PostgresqlDatabase',
'PrimaryKeyField', # XXX: Deprecated, change to AutoField.
'prefetch',
'ProgrammingError',
'Proxy',
'SchemaManager',
'SmallIntegerField',
'Select',
'SQL',
'SqliteDatabase',
'Table',
'TextField',
'TimeField',
'TimestampField',
'Tuple',
'UUIDField',
'Value',
'Window',
]
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger('peewee')
logger.addHandler(NullHandler())
# Import any speedups or provide alternate implementations.
try:
from playhouse._speedups import quote
except ImportError:
def quote(path, quote_char):
quotes = (quote_char, quote_char)
if len(path) == 1:
return path[0].join(quotes)
return '.'.join([part.join(quotes) for part in path])
if sys.version_info[0] == 2:
text_type = unicode
bytes_type = str
buffer_type = buffer
izip_longest = itertools.izip_longest
exec('def reraise(tp, value, tb=None): raise tp, value, tb')
def print_(s):
sys.stdout.write(s)
sys.stdout.write('\n')
else:
import builtins
from collections import Callable
from functools import reduce
callable = lambda c: isinstance(c, Callable)
text_type = str
bytes_type = bytes
buffer_type = memoryview
basestring = str
long = int
print_ = getattr(builtins, 'print')
izip_longest = itertools.zip_longest
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
if sqlite3:
sqlite3.register_adapter(decimal.Decimal, str)
sqlite3.register_adapter(datetime.date, str)
sqlite3.register_adapter(datetime.time, str)
__date_parts__ = set(('year', 'month', 'day', 'hour', 'minute', 'second'))
# Sqlite does not support the `date_part` SQL function, so we will define an
# implementation in python.
__sqlite_datetime_formats__ = (
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d',
'%H:%M:%S',
'%H:%M:%S.%f',
'%H:%M')
__sqlite_date_trunc__ = {
'year': '%Y',
'month': '%Y-%m',
'day': '%Y-%m-%d',
'hour': '%Y-%m-%d %H',
'minute': '%Y-%m-%d %H:%M',
'second': '%Y-%m-%d %H:%M:%S'}
__mysql_date_trunc__ = __sqlite_date_trunc__.copy()
__mysql_date_trunc__['minute'] = '%Y-%m-%d %H:%i'
__mysql_date_trunc__['second'] = '%Y-%m-%d %H:%i:%S'
def _sqlite_date_part(lookup_type, datetime_string):
assert lookup_type in __date_parts__
if not datetime_string:
return
dt = format_date_time(datetime_string, __sqlite_datetime_formats__)
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, datetime_string):
assert lookup_type in __sqlite_date_trunc__
if not datetime_string:
return
dt = format_date_time(datetime_string, __sqlite_datetime_formats__)
return dt.strftime(__sqlite_date_trunc__[lookup_type])
def __deprecated__(s):
warnings.warn(s, DeprecationWarning)
class attrdict(dict):
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
raise AttributeError(attr)
def __setattr__(self, attr, value): self[attr] = value
def __iadd__(self, rhs): self.update(rhs); return self
def __add__(self, rhs): d = attrdict(self); d.update(rhs); return d
SENTINEL = object()
#: Operations for use in SQL expressions.
OP = attrdict(
AND='AND',
OR='OR',
ADD='+',
SUB='-',
MUL='*',
DIV='/',
BIN_AND='&',
BIN_OR='|',
XOR='#',
MOD='%',
EQ='=',
LT='<',
LTE='<=',
GT='>',
GTE='>=',
NE='!=',
IN='IN',
NOT_IN='NOT IN',
IS='IS',
IS_NOT='IS NOT',
LIKE='LIKE',
ILIKE='ILIKE',
BETWEEN='BETWEEN',
REGEXP='REGEXP',
CONCAT='||',
BITWISE_NEGATION='~')
# To support "django-style" double-underscore filters, create a mapping between
# operation name and operation code, e.g. "__eq" == OP.EQ.
DJANGO_MAP = attrdict({
'eq': OP.EQ,
'lt': OP.LT,
'lte': OP.LTE,
'gt': OP.GT,
'gte': OP.GTE,
'ne': OP.NE,
'in': OP.IN,
'is': OP.IS,
'like': OP.LIKE,
'ilike': OP.ILIKE,
'regexp': OP.REGEXP})
#: Mapping of field type to the data-type supported by the database. Databases
#: may override or add to this list.
FIELD = attrdict(
AUTO='INTEGER',
BIGAUTO='BIGINT',
BIGINT='BIGINT',
BLOB='BLOB',
BOOL='SMALLINT',
CHAR='CHAR',
DATE='DATE',
DATETIME='DATETIME',
DECIMAL='DECIMAL',
DEFAULT='',
DOUBLE='REAL',
FLOAT='REAL',
INT='INTEGER',
SMALLINT='SMALLINT',
TEXT='TEXT',
TIME='TIME',
UUID='TEXT',
VARCHAR='VARCHAR')
#: Join helpers (for convenience) -- all join types are supported, this object
#: is just to help avoid introducing errors by using strings everywhere.
JOIN = attrdict(
INNER='INNER',
LEFT_OUTER='LEFT OUTER',
RIGHT_OUTER='RIGHT OUTER',
FULL='FULL',
FULL_OUTER='FULL OUTER',
CROSS='CROSS')
# Row representations.
ROW = attrdict(
TUPLE=1,
DICT=2,
NAMED_TUPLE=3,
CONSTRUCTOR=4,
MODEL=5)
SCOPE_NORMAL = 1
SCOPE_SOURCE = 2
SCOPE_VALUES = 4
SCOPE_CTE = 8
SCOPE_COLUMN = 16
# Helper functions that are used in various parts of the codebase.
MODEL_BASE = '_metaclass_helper_'
def with_metaclass(meta, base=object):
return meta(MODEL_BASE, (base,), {})
def merge_dict(source, overrides):
merged = source.copy()
if overrides:
merged.update(overrides)
return merged
if sys.version_info[:2] == (2, 6):
import types
def is_model(obj):
if isinstance(obj, (type, types.ClassType)):
return issubclass(obj, Model)
return False
else:
def is_model(obj):
if isclass(obj):
return issubclass(obj, Model)
return False
def ensure_tuple(value):
if value is not None:
return value if isinstance(value, (list, tuple)) else (value,)
def ensure_entity(value):
if value is not None:
return value if isinstance(value, Node) else Entity(value)
def chunked(it, n):
marker = object()
for group in (list(g) for g in izip_longest(*[iter(it)] * n,
fillvalue=marker)):
if group[-1] is marker:
del group[group.index(marker):]
yield group
class _callable_context_manager(object):
def __call__(self, fn):
@wraps(fn)
def inner(*args, **kwargs):
with self:
return fn(*args, **kwargs)
return inner
class Proxy(object):
"""
Create a proxy or placeholder for another object.
"""
__slots__ = ('obj', '_callbacks')
def __init__(self):
self._callbacks = []
self.initialize(None)
def initialize(self, obj):
self.obj = obj
for callback in self._callbacks:
callback(obj)
def attach_callback(self, callback):
self._callbacks.append(callback)
return callback
def __getattr__(self, attr):
if self.obj is None:
raise AttributeError('Cannot use uninitialized Proxy.')
return getattr(self.obj, attr)
def __setattr__(self, attr, value):
if attr not in self.__slots__:
raise AttributeError('Cannot set attribute on proxy.')
return super(Proxy, self).__setattr__(attr, value)
# SQL Generation.
class AliasManager(object):
def __init__(self):
# A list of dictionaries containing mappings at various depths.
self._counter = 0
self._current_index = 0
self._mapping = []
self.push()
@property
def mapping(self):
return self._mapping[self._current_index - 1]
def add(self, source):
if source not in self.mapping:
self._counter += 1
self[source] = 't%d' % self._counter
return self.mapping[source]
def get(self, source, any_depth=False):
if any_depth:
for idx in reversed(range(self._current_index)):
if source in self._mapping[idx]:
return self._mapping[idx][source]
return self.add(source)
def __getitem__(self, source):
return self.get(source)
def __setitem__(self, source, alias):
self.mapping[source] = alias
def push(self):
self._current_index += 1
if self._current_index > len(self._mapping):
self._mapping.append({})
def pop(self):
if self._current_index == 1:
raise ValueError('Cannot pop() from empty alias manager.')
self._current_index -= 1
class State(collections.namedtuple('_State', ('scope', 'parentheses',
'subquery', 'settings'))):
def __new__(cls, scope=SCOPE_NORMAL, parentheses=False, subquery=False,
**kwargs):
return super(State, cls).__new__(cls, scope, parentheses, subquery,
kwargs)
def __call__(self, scope=None, parentheses=None, subquery=None, **kwargs):
# All state is "inherited" except parentheses.
scope = self.scope if scope is None else scope
subquery = self.subquery if subquery is None else subquery
# Try to avoid unnecessary dict copying.
if kwargs and self.settings:
settings = self.settings.copy() # Copy original settings dict.
settings.update(kwargs) # Update copy with overrides.
elif kwargs:
settings = kwargs
else:
settings = self.settings
return State(scope, parentheses, subquery, **settings)
def __getattr__(self, attr_name):
return self.settings.get(attr_name)
def __scope_context__(scope):
@contextmanager
def inner(self, **kwargs):
with self(scope=scope, **kwargs):
yield self
return inner
class Context(object):
def __init__(self, **settings):
self.stack = []
self._sql = []
self._values = []
self.alias_manager = AliasManager()
self.state = State(**settings)
def column_sort_key(self, item):
return item[0].get_sort_key(self)
@property
def scope(self):
return self.state.scope
@property
def parentheses(self):
return self.state.parentheses
@property
def subquery(self):
return self.state.subquery
def __call__(self, **overrides):
if overrides and overrides.get('scope') == self.scope:
del overrides['scope']
self.stack.append(self.state)
self.state = self.state(**overrides)
return self
scope_normal = __scope_context__(SCOPE_NORMAL)
scope_source = __scope_context__(SCOPE_SOURCE)
scope_values = __scope_context__(SCOPE_VALUES)
scope_cte = __scope_context__(SCOPE_CTE)
scope_column = __scope_context__(SCOPE_COLUMN)
def __enter__(self):
if self.parentheses:
self.literal('(')
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.parentheses:
self.literal(')')
self.state = self.stack.pop()
@contextmanager
def push_alias(self):
self.alias_manager.push()
yield
self.alias_manager.pop()
def sql(self, obj):
if isinstance(obj, (Node, Context)):
return obj.__sql__(self)
elif is_model(obj):
return obj._meta.table.__sql__(self)
else:
return self.sql(Value(obj))
def literal(self, keyword):
self._sql.append(keyword)
return self
def value(self, value, converter=None):
if converter is None:
converter = self.state.converter
if converter is not None:
value = converter(value)
self._values.append(value)
return self
def __sql__(self, ctx):
ctx._sql.extend(self._sql)
ctx._values.extend(self._values)
return ctx
def parse(self, node):
return self.sql(node).query()
def query(self):
return ''.join(self._sql), self._values
# AST.
class Node(object):
def clone(self):
obj = self.__class__.__new__(self.__class__)
obj.__dict__ = self.__dict__.copy()
return obj
def __sql__(self, ctx):
raise NotImplementedError
@staticmethod
def copy(method):
def inner(self, *args, **kwargs):
clone = self.clone()
method(clone, *args, **kwargs)
return clone
return inner
def unwrap(self):
return self
class ColumnFactory(object):
__slots__ = ('node',)
def __init__(self, node):
self.node = node
def __getattr__(self, attr):
return Column(self.node, attr)
class _DynamicColumn(object):
__slots__ = ()
def __get__(self, instance, instance_type=None):
if instance is not None:
return ColumnFactory(instance) # Implements __getattr__().
return self
class _ExplicitColumn(object):
__slots__ = ()
def __get__(self, instance, instance_type=None):
if instance is not None:
raise AttributeError(
'%s specifies columns explicitly, and does not support '
'dynamic column lookups.' % instance)
return self
class Source(Node):
c = _DynamicColumn()
def __init__(self, alias=None):
super(Source, self).__init__()
self._alias = alias
@Node.copy
def alias(self, name):
self._alias = name
def select(self, *columns):
return Select((self,), columns)
def join(self, dest, join_type='INNER', on=None):
return Join(self, dest, join_type, on)
def left_outer_join(self, dest, on=None):
return Join(self, dest, JOIN.LEFT_OUTER, on)
def get_sort_key(self, ctx):
if self._alias:
return (self._alias,)
return (ctx.alias_manager[self],)
def apply_alias(self, ctx):
# If we are defining the source, include the "AS alias" declaration. An
# alias is created for the source if one is not already defined.
if ctx.scope == SCOPE_SOURCE:
if self._alias:
ctx.alias_manager[self] = self._alias
ctx.literal(' AS ').sql(Entity(ctx.alias_manager[self]))
return ctx
def apply_column(self, ctx):
if self._alias:
ctx.alias_manager[self] = self._alias
return ctx.sql(Entity(ctx.alias_manager[self]))
class _HashableSource(object):
def __init__(self, *args, **kwargs):
super(_HashableSource, self).__init__(*args, **kwargs)
self._update_hash()
@Node.copy
def alias(self, name):
self._alias = name
self._update_hash()
def _update_hash(self):
self._hash = self._get_hash()
def _get_hash(self):
return hash((self.__class__, self._path, self._alias))
def __hash__(self):
return self._hash
def __eq__(self, other):
return self._hash == other._hash
def __ne__(self, other):
return not (self == other)
def __bind_database__(meth):
@wraps(meth)
def inner(self, *args, **kwargs):
result = meth(self, *args, **kwargs)
if self._database:
return result.bind(self._database)
return result
return inner
def __join__(join_type='INNER', inverted=False):
def method(self, other):
if inverted:
self, other = other, self
return Join(self, other, join_type=join_type)
return method
class BaseTable(Source):
__and__ = __join__(JOIN.INNER)
__add__ = __join__(JOIN.LEFT_OUTER)
__sub__ = __join__(JOIN.RIGHT_OUTER)
__or__ = __join__(JOIN.FULL_OUTER)
__mul__ = __join__(JOIN.CROSS)
__rand__ = __join__(JOIN.INNER, inverted=True)
__radd__ = __join__(JOIN.LEFT_OUTER, inverted=True)
__rsub__ = __join__(JOIN.RIGHT_OUTER, inverted=True)
__ror__ = __join__(JOIN.FULL_OUTER, inverted=True)
__rmul__ = __join__(JOIN.CROSS, inverted=True)
class _BoundTableContext(_callable_context_manager):
def __init__(self, table, database):
self.table = table
self.database = database
def __enter__(self):
self._orig_database = self.table._database
self.table.bind(self.database)
if self.table._model is not None:
self.table._model.bind(self.database)
return self.table
def __exit__(self, exc_type, exc_val, exc_tb):
self.table.bind(self._orig_database)
if self.table._model is not None:
self.table._model.bind(self._orig_database)
class Table(_HashableSource, BaseTable):
def __init__(self, name, columns=None, primary_key=None, schema=None,
alias=None, _model=None, _database=None):
self.__name__ = name
self._columns = columns
self._primary_key = primary_key
self._schema = schema
self._path = (schema, name) if schema else (name,)
self._model = _model
self._database = _database
super(Table, self).__init__(alias=alias)
# Allow tables to restrict what columns are available.
if columns is not None:
self.c = _ExplicitColumn()
for column in columns:
setattr(self, column, Column(self, column))
if primary_key:
col_src = self if self._columns else self.c
self.primary_key = getattr(col_src, primary_key)
else:
self.primary_key = None
def clone(self):
# Ensure a deep copy of the column instances.
return Table(
self.__name__,
columns=self._columns,
primary_key=self._primary_key,
schema=self._schema,
alias=self._alias,
_model=self._model,
_database=self._database)
def bind(self, database=None):
self._database = database
return self
def bind_ctx(self, database=None):
return _BoundTableContext(self, database)
def _get_hash(self):
return hash((self.__class__, self._path, self._alias, self._model))
@__bind_database__
def select(self, *columns):
if not columns and self._columns:
columns = [Column(self, column) for column in self._columns]
return Select((self,), columns)
@__bind_database__
def insert(self, insert=None, columns=None, **kwargs):
if kwargs:
insert = {} if insert is None else insert
src = self if self._columns else self.c
for key, value in kwargs.items():
insert[getattr(src, key)] = value
return Insert(self, insert=insert, columns=columns)
@__bind_database__
def replace(self, insert=None, columns=None, **kwargs):
return (self
.insert(insert=insert, columns=columns)
.on_conflict('REPLACE'))
@__bind_database__
def update(self, update=None, **kwargs):
if kwargs:
update = {} if update is None else update
for key, value in kwargs.items():
src = self if self._columns else self.c
update[getattr(self, key)] = value
return Update(self, update=update)
@__bind_database__
def delete(self):
return Delete(self)
def __sql__(self, ctx):
if ctx.scope == SCOPE_VALUES:
# Return the quoted table name.
return ctx.sql(Entity(*self._path))
if self._alias:
ctx.alias_manager[self] = self._alias
if ctx.scope == SCOPE_SOURCE:
# Define the table and its alias.
return self.apply_alias(ctx.sql(Entity(*self._path)))
else:
# Refer to the table using the alias.
return self.apply_column(ctx)
class Join(BaseTable):
def __init__(self, lhs, rhs, join_type=JOIN.INNER, on=None, alias=None):
super(Join, self).__init__(alias=alias)
self.lhs = lhs
self.rhs = rhs
self.join_type = join_type
self._on = on
def on(self, predicate):
self._on = predicate
return self
def __sql__(self, ctx):
(ctx
.sql(self.lhs)
.literal(' %s JOIN ' % self.join_type)
.sql(self.rhs))
if self._on is not None:
ctx.literal(' ON ').sql(self._on)
return ctx
class CTE(_HashableSource, Source):
def __init__(self, name, query, recursive=False, columns=None):
self._alias = name
self._nested_cte_list = query._cte_list
query._cte_list = ()
self._query = query
self._recursive = recursive
if columns is not None:
columns = [Entity(c) if isinstance(c, basestring) else c
for c in columns]
self._columns = columns
super(CTE, self).__init__(alias=name)
def _get_hash(self):
return hash((self.__class__, self._alias, id(self._query)))
def __sql__(self, ctx):
if ctx.scope != SCOPE_CTE:
return ctx.sql(Entity(self._alias))
with ctx.push_alias():
ctx.alias_manager[self] = self._alias
ctx.sql(Entity(self._alias))
if self._columns:
ctx.literal(' ').sql(EnclosedNodeList(self._columns))
ctx.literal(' AS (')
with ctx.scope_normal():
ctx.sql(self._query)
ctx.literal(')')
return ctx
class ColumnBase(Node):
def alias(self, alias):
if alias:
return Alias(self, alias)
return self
def unalias(self):
return self
def cast(self, as_type):
return Cast(self, as_type)
def asc(self, collation=None, nulls=None):
return Asc(self, collation=collation, nulls=nulls)
__pos__ = asc
def desc(self, collation=None, nulls=None):
return Desc(self, collation=collation, nulls=nulls)
__neg__ = desc
def __invert__(self):
return Negated(self)
def _e(op, inv=False):
"""
Lightweight factory which returns a method that builds an Expression
consisting of the left-hand and right-hand operands, using `op`.
"""
def inner(self, rhs):
if inv:
return Expression(rhs, op, self)
return Expression(self, op, rhs)
return inner
__and__ = _e(OP.AND)
__or__ = _e(OP.OR)
__add__ = _e(OP.ADD)
__sub__ = _e(OP.SUB)
__mul__ = _e(OP.MUL)
__div__ = __truediv__ = _e(OP.DIV)
__xor__ = _e(OP.XOR)
__radd__ = _e(OP.ADD, inv=True)
__rsub__ = _e(OP.SUB, inv=True)
__rmul__ = _e(OP.MUL, inv=True)
__rdiv__ = __rtruediv__ = _e(OP.DIV, inv=True)
__rand__ = _e(OP.AND, inv=True)
__ror__ = _e(OP.OR, inv=True)
__rxor__ = _e(OP.XOR, inv=True)
def __eq__(self, rhs):
op = OP.IS if rhs is None else OP.EQ
return Expression(self, op, rhs)
def __ne__(self, rhs):
op = OP.IS_NOT if rhs is None else OP.NE
return Expression(self, op, rhs)
__lt__ = _e(OP.LT)
__le__ = _e(OP.LTE)
__gt__ = _e(OP.GT)
__ge__ = _e(OP.GTE)
__lshift__ = _e(OP.IN)
__rshift__ = _e(OP.IS)
__mod__ = _e(OP.LIKE)
__pow__ = _e(OP.ILIKE)
bin_and = _e(OP.BIN_AND)
bin_or = _e(OP.BIN_OR)
in_ = _e(OP.IN)
not_in = _e(OP.NOT_IN)
regexp = _e(OP.REGEXP)
# Special expressions.
def is_null(self, is_null=True):
op = OP.IS if is_null else OP.IS_NOT
return Expression(self, op, None)
def contains(self, rhs):
return Expression(self, OP.ILIKE, '%%%s%%' % rhs)
def startswith(self, rhs):
return Expression(self, OP.ILIKE, '%s%%' % rhs)
def endswith(self, rhs):
return Expression(self, OP.ILIKE, '%%%s' % rhs)
def between(self, lo, hi):
return Expression(self, OP.BETWEEN, NodeList((lo, SQL('AND'), hi)))
def concat(self, rhs):
return StringExpression(self, OP.CONCAT, rhs)
def __getitem__(self, item):
if isinstance(item, slice):
if item.start is None or item.stop is None:
raise ValueError('BETWEEN range must have both a start- and '
'end-point.')
return self.between(item.start, item.stop)
return self == item
def distinct(self):
return NodeList((SQL('DISTINCT'), self))
def get_sort_key(self, ctx):
return ()
class Column(ColumnBase):
def __init__(self, source, name):
self.source = source
self.name = name
def get_sort_key(self, ctx):
if ctx.scope == SCOPE_VALUES:
return (self.name,)
else:
return self.source.get_sort_key(ctx) + (self.name,)
def __hash__(self):
return hash((self.source, self.name))
def __sql__(self, ctx):
if ctx.scope == SCOPE_VALUES:
return ctx.sql(Entity(self.name))
else:
with ctx.scope_column():
return ctx.sql(self.source).literal('.').sql(Entity(self.name))
class WrappedNode(ColumnBase):
def __init__(self, node):
self.node = node
def unwrap(self):
return self.node.unwrap()
class EntityFactory(object):
__slots__ = ('node',)
def __init__(self, node):
self.node = node
def __getattr__(self, attr):
return Entity(self.node, attr)
class _DynamicEntity(object):
__slots__ = ()
def __get__(self, instance, instance_type=None):
if instance is not None:
return EntityFactory(instance._alias) # Implements __getattr__().
return self
class Alias(WrappedNode):
c = _DynamicEntity()
def __init__(self, node, alias):
super(Alias, self).__init__(node)
self._alias = alias
def alias(self, alias=None):
if alias is None:
return self.node
else:
return Alias(self.node, alias)
def unalias(self):
return self.node
def __sql__(self, ctx):
if ctx.scope == SCOPE_SOURCE:
return (ctx
.sql(self.node)
.literal(' AS ')
.sql(Entity(self._alias)))
else:
return ctx.sql(Entity(self._alias))
class Negated(WrappedNode):
def __invert__(self):
return self.node
def __sql__(self, ctx):
return ctx.literal('NOT ').sql(self.node)
class BitwiseMixin(object):
def __and__(self, other):
return self.bin_and(other)
def __or__(self, other):
return self.bin_or(other)
def __sub__(self, other):
return self.bin_and(other.bin_negated())
def __invert__(self):
return BitwiseNegated(self)
class BitwiseNegated(BitwiseMixin, WrappedNode):
def __invert__(self):
return self.node
def __sql__(self, ctx):
if ctx.state.operations:
op_sql = ctx.state.operations.get(self.op, self.op)
else:
op_sql = self.op
return ctx.literal(op_sql).sql(self.node)
class Value(ColumnBase):
def __init__(self, value, converter=None, unpack=True):
self.value = value
self.converter = converter
self.multi = isinstance(self.value, (list, set, tuple)) and unpack
if self.multi:
self.values = []
for item in self.value:
if isinstance(item, Node):
self.values.append(item)
else:
self.values.append(Value(item, self.converter))
def __sql__(self, ctx):
if self.multi:
ctx.sql(EnclosedNodeList(self.values))
else:
(ctx
.literal(ctx.state.param or '?')
.value(self.value, self.converter))
return ctx
def AsIs(value):
return Value(value, unpack=False)
class Cast(WrappedNode):
def __init__(self, node, cast):
super(Cast, self).__init__(node)
self.cast = cast
def __sql__(self, ctx):
return (ctx
.literal('CAST(')
.sql(self.node)
.literal(' AS %s)' % self.cast))
class Ordering(WrappedNode):
def __init__(self, node, direction, collation=None, nulls=None):
super(Ordering, self).__init__(node)
self.direction = direction
self.collation = collation
self.nulls = nulls
def collate(self, collation=None):
return Ordering(self.node, self.direction, collation)
def __sql__(self, ctx):
ctx.sql(self.node).literal(' %s' % self.direction)
if self.collation:
ctx.literal(' COLLATE %s' % self.collation)
if self.nulls:
ctx.literal(' NULLS %s' % self.nulls)
return ctx
def Asc(node, collation=None, nulls=None):
return Ordering(node, 'ASC', collation, nulls)
def Desc(node, collation=None, nulls=None):
return Ordering(node, 'DESC', collation, nulls)
class Expression(ColumnBase):
def __init__(self, lhs, op, rhs, flat=False):
self.lhs = lhs
self.op = op
self.rhs = rhs
self.flat = flat
def __sql__(self, ctx):
overrides = {'parentheses': not self.flat}
if isinstance(self.lhs, Field):
overrides['converter'] = self.lhs.db_value
else:
overrides['converter'] = None
if ctx.state.operations:
op_sql = ctx.state.operations.get(self.op, self.op)
else:
op_sql = self.op
with ctx(**overrides):
# Postgresql reports an error for IN/NOT IN (), so convert to
# the equivalent boolean expression.
if self.op == OP.IN and Context().parse(self.rhs)[0] == '()':
return ctx.literal('0 = 1')
elif self.op == OP.NOT_IN and Context().parse(self.rhs)[0] == '()':
return ctx.literal('1 = 1')
return (ctx
.sql(self.lhs)
.literal(' %s ' % op_sql)
.sql(self.rhs))
class StringExpression(Expression):
def __add__(self, rhs):
return self.concat(rhs)
def __radd__(self, lhs):
return StringExpression(lhs, OP.CONCAT, self)
class Entity(ColumnBase):
def __init__(self, *path):
self._path = [part.replace('"', '""') for part in path if part]
def __getattr__(self, attr):
return Entity(*self._path + [attr])
def get_sort_key(self, ctx):
return tuple(self._path)
def __hash__(self):
return hash((self.__class__.__name__, tuple(self._path)))
def __sql__(self, ctx):
return ctx.literal(quote(self._path, ctx.state.quote or '"'))
class SQL(ColumnBase):
def __init__(self, sql, params=None):
self.sql = sql
self.params = params
def __sql__(self, ctx):
ctx.literal(self.sql)
if self.params:
for param in self.params:
if isinstance(param, Node):
ctx.sql(param)
else:
ctx.value(param)
return ctx
def Check(constraint):
return SQL('CHECK (%s)' % constraint)
class Function(ColumnBase):
def __init__(self, name, arguments, coerce=True):
self.name = name
self.arguments = arguments
if name and name.lower() in ('sum', 'count'):
self._coerce = False
else:
self._coerce = coerce
def __getattr__(self, attr):
def decorator(*args, **kwargs):
return Function(attr, args, **kwargs)
return decorator
def over(self, partition_by=None, order_by=None, start=None, end=None,
window=None):
if isinstance(partition_by, Window) and window is None:
window = partition_by
if start is not None and not isinstance(start, SQL):
start = SQL(*start)
if end is not None and not isinstance(end, SQL):
end = SQL(*end)
if window is None:
node = Window(partition_by=partition_by, order_by=order_by,
start=start, end=end)
else:
node = SQL(window._alias)
return NodeList((self, SQL('OVER'), node))
def coerce(self, coerce=True):
self._coerce = coerce
return self
def __sql__(self, ctx):
ctx.literal(self.name)
if not len(self.arguments):
ctx.literal('()')
else:
# Special-case to avoid double-wrapping functions whose only
# argument is a sub-query.
if len(self.arguments) == 1 and isinstance(self.arguments[0],
SelectQuery):
wrapper = CommaNodeList
else:
wrapper = EnclosedNodeList
ctx.sql(wrapper([
(argument if isinstance(argument, Node)
else Value(argument))
for argument in self.arguments]))
return ctx
fn = Function(None, None)
class Window(Node):
CURRENT_ROW = 'CURRENT ROW'
def __init__(self, partition_by=None, order_by=None, start=None, end=None,
alias=None):
super(Window, self).__init__()
self.partition_by = partition_by
self.order_by = order_by
self.start = start
self.end = end
if self.start is None and self.end is not None:
raise ValueError('Cannot specify WINDOW end without start.')
self._alias = alias or 'w'
def alias(self, alias=None):
self._alias = alias or 'w'
return self
@staticmethod
def following(value=None):
if value is None:
return SQL('UNBOUNDED FOLLOWING')
return SQL('%d FOLLOWING' % value)
@staticmethod
def preceding(value=None):
if value is None:
return SQL('UNBOUNDED PRECEDING')
return SQL('%d PRECEDING' % value)
def __sql__(self, ctx):
if ctx.scope != SCOPE_SOURCE:
ctx.literal(self._alias)
ctx.literal(' AS ')
with ctx(parentheses=True):
parts = []
if self.partition_by:
parts.extend((
SQL('PARTITION BY'),
CommaNodeList(self.partition_by)))
if self.order_by:
parts.extend((
SQL('ORDER BY'),
CommaNodeList(self.order_by)))
if self.start is not None and self.end is not None:
parts.extend((
SQL('ROWS BETWEEN'),
self.start,
SQL('AND'),
self.end))
elif self.start is not None:
parts.extend((SQL('ROWS'), self.start))
ctx.sql(NodeList(parts))
return ctx
def clone_base(self):
return Window(self.partition_by, self.order_by)
def Case(predicate, expression_tuples, default=None):
clauses = [SQL('CASE')]
if predicate is not None:
clauses.append(predicate)
for expr, value in expression_tuples:
clauses.extend((SQL('WHEN'), expr, SQL('THEN'), value))
if default is not None:
clauses.extend((SQL('ELSE'), default))
clauses.append(SQL('END'))
return NodeList(clauses)
class NodeList(ColumnBase):
def __init__(self, nodes, glue=' ', parens=False):
self.nodes = nodes
self.glue = glue
self.parens = parens
if parens and len(self.nodes) == 1:
if isinstance(self.nodes[0], Expression):
# Hack to avoid double-parentheses.
self.nodes[0].flat = True
def __sql__(self, ctx):
n_nodes = len(self.nodes)
if n_nodes == 0:
return ctx.literal('()') if self.parens else ctx
with ctx(parentheses=self.parens):
for i in range(n_nodes - 1):
ctx.sql(self.nodes[i])
ctx.literal(self.glue)
ctx.sql(self.nodes[n_nodes - 1])
return ctx
def CommaNodeList(nodes):
return NodeList(nodes, ', ')
def EnclosedNodeList(nodes):
return NodeList(nodes, ', ', True)
class DQ(ColumnBase):
def __init__(self, **query):
super(DQ, self).__init__()
self.query = query
self._negated = False
@Node.copy
def __invert__(self):
self._negated = not self._negated
def clone(self):
node = DQ(**self.query)
node._negated = self._negated
return node
#: Represent a row tuple.
Tuple = lambda *a: EnclosedNodeList(a)
class QualifiedNames(WrappedNode):
def __sql__(self, ctx):
with ctx.scope_column():
return ctx.sql(self.node)
class OnConflict(Node):
def __init__(self, action=None, update=None, preserve=None, where=None,
conflict_target=None):
self._action = action
self._update = update
self._preserve = ensure_tuple(preserve)
self._where = where
self._conflict_target = ensure_tuple(conflict_target)
def get_conflict_statement(self, ctx):
return ctx.state.conflict_statement(self)
def get_conflict_update(self, ctx):
return ctx.state.conflict_update(self)
@Node.copy
def preserve(self, *columns):
self._preserve = columns
@Node.copy
def update(self, _data=None, **kwargs):
if _data and kwargs and not isinstance(_data, dict):
raise ValueError('Cannot mix data with keyword arguments in the '
'OnConflict update method.')
_data = _data or {}
if kwargs:
_data.update(kwargs)
self._update = _data
@Node.copy
def where(self, *expressions):
if self._where is not None:
expressions = (self._where,) + expressions
self._where = reduce(operator.and_, expressions)
@Node.copy
def conflict_target(self, *constraints):
self._conflict_target = constraints
def database_required(method):
@wraps(method)
def inner(self, database=None, *args, **kwargs):
database = self._database if database is None else database
if not database:
raise Exception('Query must be bound to a database in order '
'to call "%s".' % method.__name__)
return method(self, database, *args, **kwargs)
return inner
# BASE QUERY INTERFACE.
class BaseQuery(Node):
default_row_type = ROW.DICT
def __init__(self, _database=None, **kwargs):
self._database = _database
self._cursor_wrapper = None
self._row_type = None
self._constructor = None
super(BaseQuery, self).__init__(**kwargs)
def bind(self, database=None):
self._database = database
return self
def clone(self):
query = super(BaseQuery, self).clone()
query._cursor_wrapper = None
return query
@Node.copy
def dicts(self, as_dict=True):
self._row_type = ROW.DICT if as_dict else None
return self
@Node.copy
def tuples(self, as_tuple=True):
self._row_type = ROW.TUPLE if as_tuple else None
return self
@Node.copy
def namedtuples(self, as_namedtuple=True):
self._row_type = ROW.NAMED_TUPLE if as_namedtuple else None
return self
@Node.copy
def objects(self, constructor=None):
self._row_type = ROW.CONSTRUCTOR if constructor else None
self._constructor = constructor
return self
def _get_cursor_wrapper(self, cursor):
row_type = self._row_type or self.default_row_type
if row_type == ROW.DICT:
return DictCursorWrapper(cursor)
elif row_type == ROW.TUPLE:
return CursorWrapper(cursor)
elif row_type == ROW.NAMED_TUPLE:
return NamedTupleCursorWrapper(cursor)
elif row_type == ROW.CONSTRUCTOR:
return ObjectCursorWrapper(cursor, self._constructor)
else:
raise ValueError('Unrecognized row type: "%s".' % row_type)
def __sql__(self, ctx):
raise NotImplementedError
def sql(self):
if self._database:
context = self._database.get_sql_context()
else:
context = Context()
return context.parse(self)
@database_required
def execute(self, database):
return self._execute(database)
def _execute(self, database):
raise NotImplementedError
def iterator(self, database=None):
return iter(self.execute(database).iterator())
def _ensure_execution(self):
if not self._cursor_wrapper:
if not self._database:
raise ValueError('Query has not been executed.')
self.execute()
def __iter__(self):
self._ensure_execution()
return iter(self._cursor_wrapper)
def __getitem__(self, value):
self._ensure_execution()
if isinstance(value, slice):
index = value.stop
else:
index = value
if index is not None and index >= 0:
index += 1
self._cursor_wrapper.fill_cache(index)
return self._cursor_wrapper.row_cache[value]
def __len__(self):
self._ensure_execution()
return len(self._cursor_wrapper)
class RawQuery(BaseQuery):
def __init__(self, sql=None, params=None, **kwargs):
super(RawQuery, self).__init__(**kwargs)
self._sql = sql
self._params = params
def __sql__(self, ctx):
ctx.literal(self._sql)
if self._params:
for param in self._params:
if isinstance(param, Node):
ctx.sql(param)
else:
ctx.value(param)
return ctx
def _execute(self, database):
if self._cursor_wrapper is None:
cursor = database.execute(self)
self._cursor_wrapper = self._get_cursor_wrapper(cursor)
return self._cursor_wrapper
class Query(BaseQuery):
def __init__(self, where=None, order_by=None, limit=None, offset=None,
**kwargs):
super(Query, self).__init__(**kwargs)
self._where = where
self._order_by = order_by
self._limit = limit
self._offset = offset
self._cte_list = None
@Node.copy
def with_cte(self, *cte_list):
self._cte_list = cte_list
@Node.copy
def where(self, *expressions):
if self._where is not None:
expressions = (self._where,) + expressions
self._where = reduce(operator.and_, expressions)
@Node.copy
def order_by(self, *values):
self._order_by = values
@Node.copy
def order_by_extend(self, *values):
self._order_by = ((self._order_by or ()) + values) or None
@Node.copy
def limit(self, value=None):
self._limit = value
@Node.copy
def offset(self, value=None):
self._offset = value
@Node.copy
def paginate(self, page, paginate_by=20):
if page > 0:
page -= 1
self._limit = paginate_by
self._offset = page * paginate_by
def _apply_ordering(self, ctx):
if self._order_by:
(ctx
.literal(' ORDER BY ')
.sql(CommaNodeList(self._order_by)))
if self._limit is not None or (self._offset is not None and
ctx.state.limit_max):
ctx.literal(' LIMIT %d' % (self._limit or ctx.state.limit_max))
if self._offset is not None:
ctx.literal(' OFFSET %d' % self._offset)
return ctx
def __sql__(self, ctx):
if self._cte_list:
# The CTE scope is only used at the very beginning of the query,
# when we are describing the various CTEs we will be using.
recursive = any(cte._recursive for cte in self._cte_list)
with ctx.scope_cte():
(ctx
.literal('WITH RECURSIVE ' if recursive else 'WITH ')
.sql(CommaNodeList(self._cte_list))
.literal(' '))
return ctx
def __compound_select__(operation, inverted=False):
def method(self, other):
if inverted:
self, other = other, self
return CompoundSelectQuery(self, operation, other)
return method
class SelectQuery(Query):
__add__ = __compound_select__('UNION ALL')
__or__ = __compound_select__('UNION')
__and__ = __compound_select__('INTERSECT')
__sub__ = __compound_select__('EXCEPT')
__radd__ = __compound_select__('UNION ALL', inverted=True)
__ror__ = __compound_select__('UNION', inverted=True)
__rand__ = __compound_select__('INTERSECT', inverted=True)
__rsub__ = __compound_select__('EXCEPT', inverted=True)
def cte(self, name, recursive=False, columns=None):
return CTE(name, self, recursive=recursive, columns=columns)
class SelectBase(_HashableSource, Source, SelectQuery):
def _get_hash(self):
return hash((self.__class__, self._alias or id(self)))
def _execute(self, database):
if self._cursor_wrapper is None:
cursor = database.execute(self)
self._cursor_wrapper = self._get_cursor_wrapper(cursor)
return self._cursor_wrapper
@database_required
def peek(self, database, n=1):
rows = self.execute(database)[:n]
if rows:
return rows[0] if n == 1 else rows
@database_required
def first(self, database, n=1):
if self._limit != n:
self._limit = n
self._cursor_wrapper = None
return self.peek(database, n=n)
@database_required
def scalar(self, database, as_tuple=False):
row = self.tuples().peek(database)
return row[0] if row and not as_tuple else row
@database_required
def count(self, database, clear_limit=False):
clone = self.order_by().alias('_wrapped')
if clear_limit:
clone._limit = clone._offset = None
try:
if clone._having is None and clone._windows is None and \
clone._distinct is None and clone._simple_distinct is not True:
clone = clone.select(SQL('1'))
except AttributeError:
pass
return Select([clone], [fn.COUNT(SQL('1'))]).scalar(database)
@database_required
def exists(self, database):
clone = self.columns(SQL('1'))
clone._limit = 1
clone._offset = None
return bool(clone.scalar())
@database_required
def get(self, database):
self._cursor_wrapper = None
try:
return self.execute(database)[0]
except IndexError:
pass
# QUERY IMPLEMENTATIONS.
class CompoundSelectQuery(SelectBase):
def __init__(self, lhs, op, rhs):
super(CompoundSelectQuery, self).__init__()
self.lhs = lhs
self.op = op
self.rhs = rhs
@property
def _returning(self):
return self.lhs._returning
def _get_query_key(self):
return (self.lhs.get_query_key(), self.rhs.get_query_key())
def __sql__(self, ctx):
if ctx.scope == SCOPE_COLUMN:
return self.apply_column(ctx)
parens_around_query = ctx.state.compound_select_parentheses
outer_parens = ctx.subquery or (ctx.scope == SCOPE_SOURCE)
with ctx(parentheses=outer_parens):
with ctx.scope_normal(parentheses=parens_around_query,
subquery=False):
ctx.sql(self.lhs)
ctx.literal(' %s ' % self.op)
with ctx.push_alias():
with ctx.scope_normal(parentheses=parens_around_query,
subquery=False):
ctx.sql(self.rhs)
# Apply ORDER BY, LIMIT, OFFSET.
self._apply_ordering(ctx)
return self.apply_alias(ctx)
class Select(SelectBase):
def __init__(self, from_list=None, columns=None, group_by=None,
having=None, distinct=None, windows=None, for_update=None,
**kwargs):
super(Select, self).__init__(**kwargs)
self._from_list = (list(from_list) if isinstance(from_list, tuple)
else from_list) or []
self._returning = columns
self._group_by = group_by
self._having = having
self._windows = None
self._for_update = 'FOR UPDATE' if for_update is True else for_update
self._distinct = self._simple_distinct = None
if distinct:
if isinstance(distinct, bool):
self._simple_distinct = distinct
else:
self._distinct = distinct
self._cursor_wrapper = None
@Node.copy
def columns(self, *columns, **kwargs):
self._returning = columns
select = columns
@Node.copy
def select_extend(self, *columns):
self._returning = tuple(self._returning) + columns
@Node.copy
def from_(self, *sources):
self._from_list = list(sources)
@Node.copy
def join(self, dest, join_type='INNER', on=None):
if not self._from_list:
raise ValueError('No sources to join on.')
item = self._from_list.pop()
self._from_list.append(Join(item, dest, join_type, on))
@Node.copy
def group_by(self, *columns):
grouping = []
for column in columns:
if isinstance(column, Table):
if not column._columns:
raise ValueError('Cannot pass a table to group_by() that '
'does not have columns explicitly '
'declared.')
grouping.extend([getattr(column, col_name)
for col_name in column._columns])
else:
grouping.append(column)
self._group_by = grouping
@Node.copy
def group_by_extend(self, *values):
group_by = tuple(self._group_by or ()) + values
return self.group_by(*group_by)
@Node.copy
def having(self, *expressions):
if self._having is not None:
expressions = (self._having,) + expressions
self._having = reduce(operator.and_, expressions)
@Node.copy
def distinct(self, *columns):
if len(columns) == 1 and (columns[0] is True or columns[0] is False):
self._simple_distinct = columns[0]
else:
self._simple_distinct = False
self._distinct = columns
@Node.copy
def window(self, *windows):
self._windows = windows if windows else None
@Node.copy
def for_update(self, for_update=True):
self._for_update = 'FOR UPDATE' if for_update is True else for_update
def _get_query_key(self):
return self._alias
def __sql_selection__(self, ctx, is_subquery=False):
return ctx.sql(CommaNodeList(self._returning))
def __sql__(self, ctx):
super(Select, self).__sql__(ctx)
if ctx.scope == SCOPE_COLUMN:
return self.apply_column(ctx)
is_subquery = ctx.subquery
parentheses = is_subquery or (ctx.scope == SCOPE_SOURCE)
with ctx.scope_normal(converter=None, parentheses=parentheses,
subquery=True):
ctx.literal('SELECT ')
if self._simple_distinct or self._distinct is not None:
ctx.literal('DISTINCT ')
if self._distinct:
(ctx
.literal('ON ')
.sql(EnclosedNodeList(self._distinct))
.literal(' '))
with ctx.scope_source():
ctx = self.__sql_selection__(ctx, is_subquery)
if self._from_list:
with ctx.scope_source(parentheses=False):
ctx.literal(' FROM ').sql(CommaNodeList(self._from_list))
if self._where is not None:
ctx.literal(' WHERE ').sql(self._where)
if self._group_by:
ctx.literal(' GROUP BY ').sql(CommaNodeList(self._group_by))
if self._having is not None:
ctx.literal(' HAVING ').sql(self._having)
if self._windows is not None:
ctx.literal(' WINDOW ')
ctx.sql(CommaNodeList(self._windows))
# Apply ORDER BY, LIMIT, OFFSET.
self._apply_ordering(ctx)
if self._for_update:
if not ctx.state.for_update:
raise ValueError('FOR UPDATE specified but not supported '
'by database.')
ctx.literal(' ')
ctx.sql(SQL(self._for_update))
ctx = self.apply_alias(ctx)
return ctx
class _WriteQuery(Query):
def __init__(self, table, returning=None, **kwargs):
self.table = table
self._returning = returning
self._return_cursor = True if returning else False
super(_WriteQuery, self).__init__(**kwargs)
@Node.copy
def returning(self, *returning):
self._returning = returning
self._return_cursor = True if returning else False
def apply_returning(self, ctx):
if self._returning:
ctx.literal(' RETURNING ').sql(CommaNodeList(self._returning))
return ctx
def _execute(self, database):
if self._returning:
cursor = self.execute_returning(database)
else:
cursor = database.execute(self)
return self.handle_result(database, cursor)
def execute_returning(self, database):
if self._cursor_wrapper is None:
cursor = database.execute(self)
self._cursor_wrapper = self._get_cursor_wrapper(cursor)
return self._cursor_wrapper
def handle_result(self, database, cursor):
if self._return_cursor:
return cursor
return database.rows_affected(cursor)
def _set_table_alias(self, ctx):
ctx.alias_manager[self.table] = self.table.__name__
def __sql__(self, ctx):
super(_WriteQuery, self).__sql__(ctx)
# We explicitly set the table alias to the table's name, which ensures
# that if a sub-select references a column on the outer table, we won't
# assign it a new alias (e.g. t2) but will refer to it as table.column.
self._set_table_alias(ctx)
return ctx
class Update(_WriteQuery):
def __init__(self, table, update=None, **kwargs):
super(Update, self).__init__(table, **kwargs)
self._update = update
self._from = None
@Node.copy
def from_(self, *sources):
self._from = sources
def __sql__(self, ctx):
super(Update, self).__sql__(ctx)
with ctx.scope_values(subquery=True):
ctx.literal('UPDATE ')
expressions = []
for k, v in sorted(self._update.items(), key=ctx.column_sort_key):
if not isinstance(v, Node):
converter = k.db_value if isinstance(k, Field) else None
v = Value(v, converter=converter, unpack=False)
expressions.append(NodeList((k, SQL('='), v)))
(ctx
.sql(self.table)
.literal(' SET ')
.sql(CommaNodeList(expressions)))
if self._from:
ctx.literal(' FROM ').sql(CommaNodeList(self._from))
if self._where:
ctx.literal(' WHERE ').sql(self._where)
self._apply_ordering(ctx)
return self.apply_returning(ctx)
class Insert(_WriteQuery):
SIMPLE = 0
QUERY = 1
MULTI = 2
class DefaultValuesException(Exception): pass
def __init__(self, table, insert=None, columns=None, on_conflict=None,
**kwargs):
super(Insert, self).__init__(table, **kwargs)
self._insert = insert
self._columns = columns
self._on_conflict = on_conflict
self._query_type = None
def where(self, *expressions):
raise NotImplementedError('INSERT queries cannot have a WHERE clause.')
@Node.copy
def on_conflict_ignore(self, ignore=True):
self._on_conflict = OnConflict('IGNORE') if ignore else None
@Node.copy
def on_conflict_replace(self, replace=True):
self._on_conflict = OnConflict('REPLACE') if replace else None
@Node.copy
def on_conflict(self, *args, **kwargs):
self._on_conflict = (OnConflict(*args, **kwargs) if (args or kwargs)
else None)
def _simple_insert(self, ctx):
if not self._insert:
raise self.DefaultValuesException('Error: no data to insert.')
return self._generate_insert((self._insert,), ctx)
def get_default_data(self):
return {}
def _generate_insert(self, insert, ctx):
rows_iter = iter(insert)
columns = self._columns
# Load and organize column defaults (if provided).
defaults = self.get_default_data()
if not columns:
uses_strings = False
try:
row = next(rows_iter)
except StopIteration:
raise self.DefaultValuesException('Error: no rows to insert.')
else:
accum = []
value_lookups = {}
for key in row:
if isinstance(key, basestring):
column = getattr(self.table, key)
uses_strings = True
else:
column = key
accum.append(column)
value_lookups[column] = key
column_set = set(accum)
for column in (set(defaults) - column_set):
accum.append(column)
value_lookups[column] = column.name if uses_strings else column
columns = sorted(accum, key=lambda obj: obj.get_sort_key(ctx))
rows_iter = itertools.chain(iter((row,)), rows_iter)
else:
columns = list(columns)
value_lookups = dict((column, column) for column in columns)
for col in sorted(defaults, key=lambda obj: obj.get_sort_key(ctx)):
if col not in value_lookups:
columns.append(col)
value_lookups[col] = col
ctx.sql(EnclosedNodeList(columns)).literal(' VALUES ')
columns_converters = [
(column, column.db_value if isinstance(column, Field) else None)
for column in columns]
all_values = []
for row in rows_iter:
values = []
is_dict = isinstance(row, collections.Mapping)
for i, (column, converter) in enumerate(columns_converters):
try:
if is_dict:
val = row[value_lookups[column]]
else:
val = row[i]
except (KeyError, IndexError):
if column in defaults:
val = defaults[column]
if callable(val):
val = val()
else:
raise ValueError('Missing value for "%s".' % column)
if not isinstance(val, Node):
val = Value(val, converter=converter, unpack=False)
values.append(val)
all_values.append(EnclosedNodeList(values))
with ctx.scope_values(subquery=True):
return ctx.sql(CommaNodeList(all_values))
def _query_insert(self, ctx):
return (ctx
.sql(EnclosedNodeList(self._columns))
.literal(' ')
.sql(self._insert))
def _default_values(self, ctx):
if not self._database:
return ctx.literal('DEFAULT VALUES')
return self._database.default_values_insert(ctx)
def __sql__(self, ctx):
super(Insert, self).__sql__(ctx)
with ctx.scope_values():
statement = None
if self._on_conflict is not None:
statement = self._on_conflict.get_conflict_statement(ctx)
(ctx
.sql(statement or SQL('INSERT'))
.literal(' INTO ')
.sql(self.table)
.literal(' '))
if isinstance(self._insert, dict) and not self._columns:
try:
self._simple_insert(ctx)
except self.DefaultValuesException:
self._default_values(ctx)
self._query_type = Insert.SIMPLE
elif isinstance(self._insert, SelectQuery):
self._query_insert(ctx)
self._query_type = Insert.QUERY
else:
try:
self._generate_insert(self._insert, ctx)
except self.DefaultValuesException:
return
self._query_type = Insert.MULTI
if self._on_conflict is not None:
update = self._on_conflict.get_conflict_update(ctx)
if update is not None:
ctx.literal(' ').sql(update)
return self.apply_returning(ctx)
def _execute(self, database):
if self._returning is None and database.returning_clause \
and self.table._primary_key:
self._returning = (self.table._primary_key,)
return super(Insert, self)._execute(database)
def handle_result(self, database, cursor):
if self._return_cursor:
return cursor
return database.last_insert_id(cursor, self._query_type)
class Delete(_WriteQuery):
def __sql__(self, ctx):
super(Delete, self).__sql__(ctx)
with ctx.scope_values(subquery=True):
ctx.literal('DELETE FROM ').sql(self.table)
if self._where is not None:
ctx.literal(' WHERE ').sql(self._where)
self._apply_ordering(ctx)
return self.apply_returning(ctx)
class Index(Node):
def __init__(self, name, table, expressions, unique=False, safe=False,
where=None, using=None):
self._name = name
self._table = Entity(table) if not isinstance(table, Table) else table
self._expressions = expressions
self._where = where
self._unique = unique
self._safe = safe
self._using = using
@Node.copy
def safe(self, _safe=True):
self._safe = _safe
@Node.copy
def where(self, *expressions):
if self._where is not None:
expressions = (self._where,) + expressions
self._where = reduce(operator.and_, expressions)
@Node.copy
def using(self, _using=None):
self._using = _using
def __sql__(self, ctx):
statement = 'CREATE UNIQUE INDEX ' if self._unique else 'CREATE INDEX '
with ctx.scope_values(subquery=True):
ctx.literal(statement)
if self._safe:
ctx.literal('IF NOT EXISTS ')
(ctx
.sql(Entity(self._name))
.literal(' ON ')
.sql(self._table)
.literal(' '))
if self._using is not None:
ctx.literal('USING %s ' % self._using)
ctx.sql(EnclosedNodeList([
SQL(expr) if isinstance(expr, basestring) else expr
for expr in self._expressions]))
if self._where is not None:
ctx.literal(' WHERE ').sql(self._where)
return ctx
class ModelIndex(Index):
def __init__(self, model, fields, unique=False, safe=True, where=None,
using=None, name=None):
self._model = model
if name is None:
name = self._generate_name_from_fields(model, fields)
if using is None:
for field in fields:
if getattr(field, 'index_type', None):
using = field.index_type
super(ModelIndex, self).__init__(
name=name,
table=model._meta.table,
expressions=fields,
unique=unique,
safe=safe,
where=where,
using=using)
def _generate_name_from_fields(self, model, fields):
accum = []
for field in fields:
if isinstance(field, basestring):
accum.append(field.split()[0])
else:
if isinstance(field, Node) and not isinstance(field, Field):
field = field.unwrap()
if isinstance(field, Field):
accum.append(field.column_name)
if not accum:
raise ValueError('Unable to generate a name for the index, please '
'explicitly specify a name.')
index_name = re.sub('[^\w]+', '',
'%s_%s' % (model._meta.name, '_'.join(accum)))
if len(index_name) > 64:
index_hash = hashlib.md5(index_name.encode('utf-8')).hexdigest()
index_name = '%s_%s' % (index_name[:56], index_hash[:7])
return index_name
# DB-API 2.0 EXCEPTIONS.
class PeeweeException(Exception): pass
class ImproperlyConfigured(PeeweeException): pass
class DatabaseError(PeeweeException): pass
class DataError(DatabaseError): pass
class IntegrityError(DatabaseError): pass
class InterfaceError(PeeweeException): pass
class InternalError(DatabaseError): pass
class NotSupportedError(DatabaseError): pass
class OperationalError(DatabaseError): pass
class ProgrammingError(DatabaseError): pass
class ExceptionWrapper(object):
__slots__ = ('exceptions',)
def __init__(self, exceptions):
self.exceptions = exceptions
def __enter__(self): pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
return
if exc_type.__name__ in self.exceptions:
new_type = self.exceptions[exc_type.__name__]
exc_args = exc_value.args
reraise(new_type, new_type(*exc_args), traceback)
EXCEPTIONS = {
'ConstraintError': IntegrityError,
'DatabaseError': DatabaseError,
'DataError': DataError,
'IntegrityError': IntegrityError,
'InterfaceError': InterfaceError,
'InternalError': InternalError,
'NotSupportedError': NotSupportedError,
'OperationalError': OperationalError,
'ProgrammingError': ProgrammingError}
__exception_wrapper__ = ExceptionWrapper(EXCEPTIONS)
# DATABASE INTERFACE AND CONNECTION MANAGEMENT.
IndexMetadata = collections.namedtuple(
'IndexMetadata',
('name', 'sql', 'columns', 'unique', 'table'))
ColumnMetadata = collections.namedtuple(
'ColumnMetadata',
('name', 'data_type', 'null', 'primary_key', 'table'))
ForeignKeyMetadata = collections.namedtuple(
'ForeignKeyMetadata',
('column', 'dest_table', 'dest_column', 'table'))
class _ConnectionState(object):
def __init__(self, **kwargs):
super(_ConnectionState, self).__init__(**kwargs)
self.reset()
def reset(self):
self.closed = True
self.conn = None
self.transactions = []
def set_connection(self, conn):
self.conn = conn
self.closed = False
class _ConnectionLocal(_ConnectionState, threading.local): pass
class _NoopLock(object):
__slots__ = ()
def __enter__(self): return self
def __exit__(self, exc_type, exc_val, exc_tb): pass
class ConnectionContext(_callable_context_manager):
__slots__ = ('db',)
def __init__(self, db): self.db = db
def __enter__(self):
if self.db.is_closed():
self.db.connect()
def __exit__(self, exc_type, exc_val, exc_tb): self.db.close()
class Database(_callable_context_manager):
context_class = Context
field_types = {}
operations = {}
param = '?'
quote = '"'
# Feature toggles.
commit_select = False
compound_select_parentheses = False
for_update = False
limit_max = None
returning_clause = False
safe_create_index = True
safe_drop_index = True
sequences = False
def __init__(self, database, thread_safe=True, autorollback=False,
field_types=None, operations=None, autocommit=None, **kwargs):
self._field_types = merge_dict(FIELD, self.field_types)
self._operations = merge_dict(OP, self.operations)
if field_types:
self._field_types.update(field_types)
if operations:
self._operations.update(operations)
self.autorollback = autorollback
self.thread_safe = thread_safe
if thread_safe:
self._state = _ConnectionLocal()
self._lock = threading.Lock()
else:
self._state = _ConnectionState()
self._lock = _NoopLock()
if autocommit is not None:
__deprecated__('Peewee no longer uses the "autocommit" option, as '
'the semantics now require it to always be True. '
'Because some database-drivers also use the '
'"autocommit" parameter, you are receiving a '
'warning so you may update your code and remove '
'the parameter, as in the future, specifying '
'autocommit could impact the behavior of the '
'database driver you are using.')
self.connect_params = {}
self.init(database, **kwargs)
def init(self, database, **kwargs):
if not self.is_closed():
self.close()
self.database = database
self.connect_params.update(kwargs)
self.deferred = not bool(database)
def __enter__(self):
if self.is_closed():
self.connect()
self.transaction().__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
top = self._state.transactions[-1]
try:
top.__exit__(exc_type, exc_val, exc_tb)
finally:
self.close()
def connection_context(self):
return ConnectionContext(self)
def _connect(self):
raise NotImplementedError
def connect(self, reuse_if_open=False):
with self._lock:
if self.deferred:
raise Exception('Error, database must be initialized before '
'opening a connection.')
if not self._state.closed:
if reuse_if_open:
return False
raise OperationalError('Connection already opened.')
self._state.reset()
with __exception_wrapper__:
self._state.set_connection(self._connect())
self._initialize_connection(self._state.conn)
return True
def _initialize_connection(self, conn):
pass
def close(self):
with self._lock:
if self.deferred:
raise Exception('Error, database must be initialized before '
'opening a connection.')
if self.in_transaction():
raise OperationalError('Attempting to close database while '
'transaction is open.')
is_open = not self._state.closed
try:
if is_open:
with __exception_wrapper__:
self._close(self._state.conn)
finally:
self._state.reset()
return is_open
def _close(self, conn):
conn.close()
def is_closed(self):
return self._state.closed
def connection(self):
if self.is_closed():
self.connect()
return self._state.conn
def cursor(self, commit=None):
if self.is_closed():
self.connect()
return self._state.conn.cursor()
def execute_sql(self, sql, params=None, commit=SENTINEL):
logger.debug((sql, params))
if commit is SENTINEL:
if self.in_transaction():
commit = False
elif self.commit_select:
commit = True
else:
commit = not sql[:6].lower().startswith('select')
with __exception_wrapper__:
cursor = self.cursor(commit)
try:
cursor.execute(sql, params or ())
except Exception:
if self.autorollback and not self.in_transaction():
self.rollback()
raise
else:
if commit and not self.in_transaction():
self.commit()
return cursor
def execute(self, query, commit=SENTINEL, **context_options):
ctx = self.get_sql_context(**context_options)
sql, params = ctx.sql(query).query()
return self.execute_sql(sql, params, commit=commit)
def get_context_options(self):
return {
'field_types': self._field_types,
'operations': self._operations,
'param': self.param,
'quote': self.quote,
'compound_select_parentheses': self.compound_select_parentheses,
'conflict_statement': self.conflict_statement,
'conflict_update': self.conflict_update,
'for_update': self.for_update,
'limit_max': self.limit_max,
}
def get_sql_context(self, **context_options):
context = self.get_context_options()
if context_options:
context.update(context_options)
return self.context_class(**context)
def conflict_statement(self, on_conflict):
raise NotImplementedError
def conflict_update(self, on_conflict):
raise NotImplementedError
def last_insert_id(self, cursor, query_type=None):
return cursor.lastrowid
def rows_affected(self, cursor):
return cursor.rowcount
def default_values_insert(self, ctx):
return ctx.literal('DEFAULT VALUES')
def in_transaction(self):
return bool(self._state.transactions)
def push_transaction(self, transaction):
self._state.transactions.append(transaction)
def pop_transaction(self):
return self._state.transactions.pop()
def transaction_depth(self):
return len(self._state.transactions)
def top_transaction(self):
if self._state.transactions:
return self._state.transactions[-1]
def atomic(self):
return _atomic(self)
def manual_commit(self):
return _manual(self)
def transaction(self):
return _transaction(self)
def savepoint(self):
return _savepoint(self)
def begin(self):
if self.is_closed():
self.connect()
def commit(self):
return self._state.conn.commit()
def rollback(self):
return self._state.conn.rollback()
def batch_commit(self, it, n):
for group in chunked(it, n):
with self.atomic():
for obj in group:
yield obj
def table_exists(self, table, schema=None):
return table.__name__ in self.get_tables(schema=schema)
def get_tables(self, schema=None):
raise NotImplementedError
def get_indexes(self, table, schema=None):
raise NotImplementedError
def get_columns(self, table, schema=None):
raise NotImplementedError
def get_primary_keys(self, table, schema=None):
raise NotImplementedError
def get_foreign_keys(self, table, schema=None):
raise NotImplementedError
def sequence_exists(self, seq):
raise NotImplementedError
def create_tables(self, models, **options):
for model in sort_models(models):
model.create_table(**options)
def drop_tables(self, models, **kwargs):
for model in reversed(sort_models(models)):
model.drop_table(**kwargs)
def extract_date(self, date_part, date_field):
raise NotImplementedError
def truncate_date(self, date_part, date_field):
raise NotImplementedError
def bind(self, models, bind_refs=True, bind_backrefs=True):
for model in models:
model.bind(self, bind_refs=bind_refs, bind_backrefs=bind_backrefs)
def bind_ctx(self, models, bind_refs=True, bind_backrefs=True):
return _BoundModelsContext(models, self, bind_refs, bind_backrefs)
def get_noop_select(self, ctx):
return ctx.sql(Select().columns(SQL('0')).where(SQL('0')))
def __pragma__(name):
def __get__(self):
return self.pragma(name)
def __set__(self, value):
return self.pragma(name, value)
return property(__get__, __set__)
class SqliteDatabase(Database):
field_types = {
'BIGAUTO': FIELD.AUTO,
'BIGINT': FIELD.INT,
'BOOL': FIELD.INT,
'DOUBLE': FIELD.FLOAT,
'SMALLINT': FIELD.INT,
'UUID': FIELD.TEXT}
operations = {
'LIKE': 'GLOB',
'ILIKE': 'LIKE'}
limit_max = -1
def __init__(self, database, *args, **kwargs):
self._pragmas = kwargs.pop('pragmas', ())
super(SqliteDatabase, self).__init__(database, *args, **kwargs)
self._aggregates = {}
self._collations = {}
self._functions = {}
self._table_functions = []
self._extensions = set()
self.register_function(_sqlite_date_part, 'date_part', 2)
self.register_function(_sqlite_date_trunc, 'date_trunc', 2)
def init(self, database, pragmas=None, timeout=5, **kwargs):
if pragmas is not None:
self._pragmas = pragmas
self._timeout = timeout
super(SqliteDatabase, self).init(database, **kwargs)
def _connect(self):
if sqlite3 is None:
raise ImproperlyConfigured('SQLite driver not installed!')
conn = sqlite3.connect(self.database, timeout=self._timeout,
**self.connect_params)
conn.isolation_level = None
try:
self._add_conn_hooks(conn)
except:
conn.close()
raise
return conn
def _add_conn_hooks(self, conn):
self._set_pragmas(conn)
self._load_aggregates(conn)
self._load_collations(conn)
self._load_functions(conn)
if self._table_functions:
for table_function in self._table_functions:
table_function.register(conn)
if self._extensions:
self._load_extensions(conn)
def _set_pragmas(self, conn):
if self._pragmas:
cursor = conn.cursor()
for pragma, value in self._pragmas:
cursor.execute('PRAGMA %s = %s;' % (pragma, value))
cursor.close()
def pragma(self, key, value=SENTINEL, permanent=False):
sql = 'PRAGMA %s' % key
if value is not SENTINEL:
sql += ' = %s' % (value or 0)
if permanent:
pragmas = dict(self._pragmas or ())
pragmas[key] = value
self._pragmas = list(pragmas.items())
elif permanent:
raise ValueError('Cannot specify a permanent pragma without value')
row = self.execute_sql(sql).fetchone()
if row:
return row[0]
cache_size = __pragma__('cache_size')
foreign_keys = __pragma__('foreign_keys')
journal_mode = __pragma__('journal_mode')
journal_size_limit = __pragma__('journal_size_limit')
mmap_size = __pragma__('mmap_size')
page_size = __pragma__('page_size')
read_uncommitted = __pragma__('read_uncommitted')
synchronous = __pragma__('synchronous')
wal_autocheckpoint = __pragma__('wal_autocheckpoint')
@property
def timeout(self):
return self._timeout
@timeout.setter
def timeout(self, seconds):
if self._timeout == seconds:
return
self._timeout = seconds
if not self.is_closed():
self.execute_sql('PRAGMA busy_timeout=%d;' % (seconds * 1000))
def _load_aggregates(self, conn):
for name, (klass, num_params) in self._aggregates.items():
conn.create_aggregate(name, num_params, klass)
def _load_collations(self, conn):
for name, fn in self._collations.items():
conn.create_collation(name, fn)
def _load_functions(self, conn):
for name, (fn, num_params) in self._functions.items():
conn.create_function(name, num_params, fn)
def register_aggregate(self, klass, name=None, num_params=-1):
self._aggregates[name or klass.__name__.lower()] = (klass, num_params)
if not self.is_closed():
self._load_aggregates(self.connection())
def aggregate(self, name=None, num_params=-1):
def decorator(klass):
self.register_aggregate(klass, name, num_params)
return klass
return decorator
def register_collation(self, fn, name=None):
name = name or fn.__name__
def _collation(*args):
expressions = args + (SQL('collate %s' % name),)
return NodeList(expressions)
fn.collation = _collation
self._collations[name] = fn
if not self.is_closed():
self._load_collations(self.connection())
def collation(self, name=None):
def decorator(fn):
self.register_collation(fn, name)
return fn
return decorator
def register_function(self, fn, name=None, num_params=-1):
self._functions[name or fn.__name__] = (fn, num_params)
if not self.is_closed():
self._load_functions(self.connection())
def func(self, name=None, num_params=-1):
def decorator(fn):
self.register_function(fn, name, num_params)
return fn
return decorator
def register_table_function(self, klass, name=None):
if name is not None:
klass.name = name
self._table_functions.append(klass)
if not self.is_closed():
klass.register(self.connection())
def table_function(self, name=None):
def decorator(klass):
self.register_table_function(klass, name)
return klass
return decorator
def unregister_aggregate(self, name):
del(self._aggregates[name])
def unregister_collation(self, name):
del(self._collations[name])
def unregister_function(self, name):
del(self._functions[name])
def unregister_table_function(self, name):
for idx, klass in enumerate(self._table_functions):
if klass.name == name:
break
else:
return False
self._table_functions.pop(idx)
return True
def _load_extensions(self, conn):
conn.enable_load_extension(True)
for extension in self._extensions:
conn.load_extension(extension)
def load_extension(self, extension):
self._extensions.add(extension)
if not self.is_closed():
conn = self.connection()
conn.enable_load_extension(True)
conn.load_extension(extension)
def unload_extension(self, extension):
self._extensions.remove(extension)
def atomic(self, lock_type=None):
return _atomic(self, lock_type=lock_type)
def transaction(self, lock_type=None):
return _transaction(self, lock_type=lock_type)
def begin(self, lock_type=None):
statement = 'BEGIN %s' % lock_type if lock_type else 'BEGIN'
self.execute_sql(statement, commit=False)
def get_tables(self, schema=None):
cursor = self.execute_sql('SELECT name FROM sqlite_master WHERE '
'type = ? ORDER BY name;', ('table',))
return [row for row, in cursor.fetchall()]
def get_indexes(self, table, schema=None):
query = ('SELECT name, sql FROM sqlite_master '
'WHERE tbl_name = ? AND type = ? ORDER BY name')
cursor = self.execute_sql(query, (table, 'index'))
index_to_sql = dict(cursor.fetchall())
# Determine which indexes have a unique constraint.
unique_indexes = set()
cursor = self.execute_sql('PRAGMA index_list("%s")' % table)
for row in cursor.fetchall():
name = row[1]
is_unique = int(row[2]) == 1
if is_unique:
unique_indexes.add(name)
# Retrieve the indexed columns.
index_columns = {}
for index_name in sorted(index_to_sql):
cursor = self.execute_sql('PRAGMA index_info("%s")' % index_name)
index_columns[index_name] = [row[2] for row in cursor.fetchall()]
return [
IndexMetadata(
name,
index_to_sql[name],
index_columns[name],
name in unique_indexes,
table)
for name in sorted(index_to_sql)]
def get_columns(self, table, schema=None):
cursor = self.execute_sql('PRAGMA table_info("%s")' % table)
return [ColumnMetadata(row[1], row[2], not row[3], bool(row[5]), table)
for row in cursor.fetchall()]
def get_primary_keys(self, table, schema=None):
cursor = self.execute_sql('PRAGMA table_info("%s")' % table)
return [row[1] for row in filter(lambda r: r[-1], cursor.fetchall())]
def get_foreign_keys(self, table, schema=None):
cursor = self.execute_sql('PRAGMA foreign_key_list("%s")' % table)
return [ForeignKeyMetadata(row[3], row[2], row[4], table)
for row in cursor.fetchall()]
def get_binary_type(self):
return sqlite3.Binary
def conflict_statement(self, on_conflict):
if on_conflict._action:
return SQL('INSERT OR %s' % on_conflict._action.upper())
def conflict_update(self, on_conflict):
if any((on_conflict._preserve, on_conflict._update, on_conflict._where,
on_conflict._conflict_target)):
raise ValueError('SQLite does not support specifying which values '
'to preserve or update.')
def extract_date(self, date_part, date_field):
return fn.date_part(date_part, date_field)
def truncate_date(self, date_part, date_field):
return fn.date_trunc(date_part, date_field)
class PostgresqlDatabase(Database):
field_types = {
'AUTO': 'SERIAL',
'BIGAUTO': 'BIGSERIAL',
'BLOB': 'BYTEA',
'BOOL': 'BOOLEAN',
'DATETIME': 'TIMESTAMP',
'DECIMAL': 'NUMERIC',
'DOUBLE': 'DOUBLE PRECISION',
'UUID': 'UUID'}
operations = {'REGEXP': '~'}
param = '%s'
commit_select = True
compound_select_parentheses = True
for_update = True
returning_clause = True
safe_create_index = False
sequences = True
def init(self, database, register_unicode=True, encoding=None, **kwargs):
self._register_unicode = register_unicode
self._encoding = encoding
self._need_server_version = True
super(PostgresqlDatabase, self).init(database, **kwargs)
def _connect(self):
if psycopg2 is None:
raise ImproperlyConfigured('Postgres driver not installed!')
conn = psycopg2.connect(database=self.database, **self.connect_params)
if self._register_unicode:
pg_extensions.register_type(pg_extensions.UNICODE, conn)
pg_extensions.register_type(pg_extensions.UNICODEARRAY, conn)
if self._encoding:
conn.set_client_encoding(self._encoding)
if self._need_server_version:
self.set_server_version(conn.server_version)
self._need_server_version = False
return conn
def set_server_version(self, version):
if version >= 90600:
self.safe_create_index = True
def last_insert_id(self, cursor, query_type=None):
try:
return cursor if query_type else cursor[0][0]
except (IndexError, KeyError, TypeError):
pass
def get_tables(self, schema=None):
query = ('SELECT tablename FROM pg_catalog.pg_tables '
'WHERE schemaname = %s ORDER BY tablename')
cursor = self.execute_sql(query, (schema or 'public',))
return [table for table, in cursor.fetchall()]
def get_indexes(self, table, schema=None):
query = """
SELECT
i.relname, idxs.indexdef, idx.indisunique,
array_to_string(array_agg(cols.attname), ',')
FROM pg_catalog.pg_class AS t
INNER JOIN pg_catalog.pg_index AS idx ON t.oid = idx.indrelid
INNER JOIN pg_catalog.pg_class AS i ON idx.indexrelid = i.oid
INNER JOIN pg_catalog.pg_indexes AS idxs ON
(idxs.tablename = t.relname AND idxs.indexname = i.relname)
LEFT OUTER JOIN pg_catalog.pg_attribute AS cols ON
(cols.attrelid = t.oid AND cols.attnum = ANY(idx.indkey))
WHERE t.relname = %s AND t.relkind = %s AND idxs.schemaname = %s
GROUP BY i.relname, idxs.indexdef, idx.indisunique
ORDER BY idx.indisunique DESC, i.relname;"""
cursor = self.execute_sql(query, (table, 'r', schema or 'public'))
return [IndexMetadata(row[0], row[1], row[3].split(','), row[2], table)
for row in cursor.fetchall()]
def get_columns(self, table, schema=None):
query = """
SELECT column_name, is_nullable, data_type
FROM information_schema.columns
WHERE table_name = %s AND table_schema = %s
ORDER BY ordinal_position"""
cursor = self.execute_sql(query, (table, schema or 'public'))
pks = set(self.get_primary_keys(table, schema))
return [ColumnMetadata(name, dt, null == 'YES', name in pks, table)
for name, null, dt in cursor.fetchall()]
def get_primary_keys(self, table, schema=None):
query = """
SELECT kc.column_name
FROM information_schema.table_constraints AS tc
INNER JOIN information_schema.key_column_usage AS kc ON (
tc.table_name = kc.table_name AND
tc.table_schema = kc.table_schema AND
tc.constraint_name = kc.constraint_name)
WHERE
tc.constraint_type = %s AND
tc.table_name = %s AND
tc.table_schema = %s"""
ctype = 'PRIMARY KEY'
cursor = self.execute_sql(query, (ctype, table, schema or 'public'))
return [pk for pk, in cursor.fetchall()]
def get_foreign_keys(self, table, schema=None):
sql = """
SELECT
kcu.column_name, ccu.table_name, ccu.column_name
FROM information_schema.table_constraints AS tc
JOIN information_schema.key_column_usage AS kcu
ON (tc.constraint_name = kcu.constraint_name AND
tc.constraint_schema = kcu.constraint_schema)
JOIN information_schema.constraint_column_usage AS ccu
ON (ccu.constraint_name = tc.constraint_name AND
ccu.constraint_schema = tc.constraint_schema)
WHERE
tc.constraint_type = 'FOREIGN KEY' AND
tc.table_name = %s AND
tc.table_schema = %s"""
cursor = self.execute_sql(sql, (table, schema or 'public'))
return [ForeignKeyMetadata(row[0], row[1], row[2], table)
for row in cursor.fetchall()]
def sequence_exists(self, sequence):
res = self.execute_sql("""
SELECT COUNT(*) FROM pg_class, pg_namespace
WHERE relkind='S'
AND pg_class.relnamespace = pg_namespace.oid
AND relname=%s""", (sequence,))
return bool(res.fetchone()[0])
def get_binary_type(self):
return psycopg2.Binary
def conflict_statement(self, on_conflict):
return
def conflict_update(self, on_conflict):
action = on_conflict._action.lower() if on_conflict._action else ''
if action in ('ignore', 'nothing'):
return SQL('ON CONFLICT DO NOTHING')
elif action and action != 'update':
raise ValueError('The only supported actions for conflict '
'resolution with Postgresql are "ignore" or '
'"update".')
elif not on_conflict._update and not on_conflict._preserve:
raise ValueError('If you are not performing any updates (or '
'preserving any INSERTed values), then the '
'conflict resolution action should be set to '
'"IGNORE".')
elif not on_conflict._conflict_target:
raise ValueError('Postgres requires that a conflict target be '
'specified when doing an upsert.')
target = EnclosedNodeList([
Entity(col) if isinstance(col, basestring) else col
for col in on_conflict._conflict_target])
updates = []
if on_conflict._preserve:
for column in on_conflict._preserve:
excluded = NodeList((SQL('EXCLUDED'), ensure_entity(column)),
glue='.')
expression = NodeList((ensure_entity(column), SQL('='),
excluded))
updates.append(expression)
if on_conflict._update:
for k, v in on_conflict._update.items():
if not isinstance(v, Node):
converter = k.db_value if isinstance(k, Field) else None
v = Value(v, converter=converter, unpack=False)
else:
v = QualifiedNames(v)
updates.append(NodeList((ensure_entity(k), SQL('='), v)))
parts = [SQL('ON CONFLICT'),
target,
SQL('DO UPDATE SET'),
CommaNodeList(updates)]
if on_conflict._where:
parts.extend((SQL('WHERE'), QualifiedNames(on_conflict._where)))
return NodeList(parts)
def extract_date(self, date_part, date_field):
return fn.EXTRACT(NodeList((date_part, SQL('FROM'), date_field)))
def truncate_date(self, date_part, date_field):
return fn.DATE_TRUNC(date_part, date_field)
def get_noop_select(self, ctx):
return ctx.sql(Select().columns(SQL('0')).where(SQL('false')))
class MySQLDatabase(Database):
field_types = {
'AUTO': 'INTEGER AUTO_INCREMENT',
'BIGAUTO': 'BIGINT AUTO_INCREMENT',
'BOOL': 'BOOL',
'DECIMAL': 'NUMERIC',
'DOUBLE': 'DOUBLE PRECISION',
'FLOAT': 'FLOAT',
'UUID': 'VARCHAR(40)'}
operations = {
'LIKE': 'LIKE BINARY',
'ILIKE': 'LIKE',
'XOR': 'XOR'}
param = '%s'
quote = '`'
commit_select = True
for_update = True
limit_max = 2 ** 64 - 1
safe_create_index = False
safe_drop_index = False
def init(self, database, **kwargs):
params = {'charset': 'utf8', 'use_unicode': True}
params.update(kwargs)
if 'password' in params:
params['passwd'] = params.pop('password')
super(MySQLDatabase, self).init(database, **params)
def _connect(self):
if mysql is None:
raise ImproperlyConfigured('MySQL driver not installed!')
return mysql.connect(db=self.database, **self.connect_params)
def default_values_insert(self, ctx):
return ctx.literal('() VALUES ()')
def get_tables(self, schema=None):
return [table for table, in self.execute_sql('SHOW TABLES')]
def get_indexes(self, table, schema=None):
cursor = self.execute_sql('SHOW INDEX FROM `%s`' % table)
unique = set()
indexes = {}
for row in cursor.fetchall():
if not row[1]:
unique.add(row[2])
indexes.setdefault(row[2], [])
indexes[row[2]].append(row[4])
return [IndexMetadata(name, None, indexes[name], name in unique, table)
for name in indexes]
def get_columns(self, table, schema=None):
sql = """
SELECT column_name, is_nullable, data_type
FROM information_schema.columns
WHERE table_name = %s AND table_schema = DATABASE()"""
cursor = self.execute_sql(sql, (table,))
pks = set(self.get_primary_keys(table))
return [ColumnMetadata(name, dt, null == 'YES', name in pks, table)
for name, null, dt in cursor.fetchall()]
def get_primary_keys(self, table, schema=None):
cursor = self.execute_sql('SHOW INDEX FROM `%s`' % table)
return [row[4] for row in
filter(lambda row: row[2] == 'PRIMARY', cursor.fetchall())]
def get_foreign_keys(self, table, schema=None):
query = """
SELECT column_name, referenced_table_name, referenced_column_name
FROM information_schema.key_column_usage
WHERE table_name = %s
AND table_schema = DATABASE()
AND referenced_table_name IS NOT NULL
AND referenced_column_name IS NOT NULL"""
cursor = self.execute_sql(query, (table,))
return [
ForeignKeyMetadata(column, dest_table, dest_column, table)
for column, dest_table, dest_column in cursor.fetchall()]
def get_binary_type(self):
return mysql.Binary
def conflict_statement(self, on_conflict):
if not on_conflict._action: return
action = on_conflict._action.lower()
if action == 'replace':
return SQL('REPLACE')
elif action == 'ignore':
return SQL('INSERT IGNORE')
elif action != 'update':
raise ValueError('Un-supported action for conflict resolution. '
'MySQL supports REPLACE, IGNORE and UPDATE.')
def conflict_update(self, on_conflict):
if on_conflict._where or on_conflict._conflict_target:
raise ValueError('MySQL does not support the specification of '
'where clauses or conflict targets for conflict '
'resolution.')
updates = []
if on_conflict._preserve:
for column in on_conflict._preserve:
entity = ensure_entity(column)
expression = NodeList((
ensure_entity(column),
SQL('='),
fn.VALUES(entity)))
updates.append(expression)
if on_conflict._update:
for k, v in on_conflict._update.items():
if not isinstance(v, Node):
converter = k.db_value if isinstance(k, Field) else None
v = Value(v, converter=converter, unpack=False)
updates.append(NodeList((ensure_entity(k), SQL('='), v)))
if updates:
return NodeList((SQL('ON DUPLICATE KEY UPDATE'),
CommaNodeList(updates)))
def extract_date(self, date_part, date_field):
return fn.EXTRACT(NodeList((SQL(date_part), SQL('FROM'), date_field)))
def truncate_date(self, date_part, date_field):
return fn.DATE_FORMAT(date_field, __mysql_date_trunc__[date_part])
def get_noop_select(self, ctx):
return ctx.literal('DO 0')
# TRANSACTION CONTROL.
class _manual(_callable_context_manager):
def __init__(self, db):
self.db = db
def __enter__(self):
top = self.db.top_transaction()
if top and not isinstance(self.db.top_transaction(), _manual):
raise ValueError('Cannot enter manual commit block while a '
'transaction is active.')
self.db.push_transaction(self)
def __exit__(self, exc_type, exc_val, exc_tb):
if self.db.pop_transaction() is not self:
raise ValueError('Transaction stack corrupted while exiting '
'manual commit block.')
class _atomic(_callable_context_manager):
def __init__(self, db, lock_type=None):
self.db = db
self._lock_type = lock_type
self._transaction_args = (lock_type,) if lock_type is not None else ()
def __enter__(self):
if self.db.transaction_depth() == 0:
self._helper = self.db.transaction(*self._transaction_args)
else:
self._helper = self.db.savepoint()
if isinstance(self.db.top_transaction(), _manual):
raise ValueError('Cannot enter atomic commit block while in '
'manual commit mode.')
return self._helper.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
return self._helper.__exit__(exc_type, exc_val, exc_tb)
class _transaction(_callable_context_manager):
def __init__(self, db, lock_type=None):
self.db = db
self._lock_type = lock_type
def _begin(self):
if self._lock_type:
self.db.begin(self._lock_type)
else:
self.db.begin()
def commit(self, begin=True):
self.db.commit()
if begin:
self._begin()
def rollback(self, begin=True):
self.db.rollback()
if begin:
self._begin()
def __enter__(self):
if self.db.transaction_depth() == 0:
self._begin()
self.db.push_transaction(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type:
self.rollback(False)
elif self.db.transaction_depth() == 1:
try:
self.commit(False)
except:
self.rollback(False)
raise
finally:
self.db.pop_transaction()
class _savepoint(_callable_context_manager):
def __init__(self, db, sid=None):
self.db = db
self.sid = sid or 's' + uuid.uuid4().hex
self.quoted_sid = self.sid.join((self.db.quote, self.db.quote))
def _begin(self):
self.db.execute_sql('SAVEPOINT %s;' % self.quoted_sid)
def commit(self, begin=True):
self.db.execute_sql('RELEASE SAVEPOINT %s;' % self.quoted_sid)
if begin: self._begin()
def rollback(self):
self.db.execute_sql('ROLLBACK TO SAVEPOINT %s;' % self.quoted_sid)
def __enter__(self):
self._begin()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type:
self.rollback()
else:
try:
self.commit(begin=False)
except:
self.rollback()
raise
# CURSOR REPRESENTATIONS.
class CursorWrapper(object):
def __init__(self, cursor):
self.cursor = cursor
self.count = 0
self.index = 0
self.initialized = False
self.populated = False
self.row_cache = []
def __iter__(self):
if self.populated:
return iter(self.row_cache)
return ResultIterator(self)
def __getitem__(self, item):
if isinstance(item, slice):
# TODO: getslice
start = item.start
stop = item.stop
if stop is None or stop < 0:
self.fill_cache()
else:
self.fill_cache(stop)
return self.row_cache[item]
elif isinstance(item, int):
self.fill_cache(item if item > 0 else 0)
return self.row_cache[item]
else:
raise ValueError('CursorWrapper only supports integer and slice '
'indexes.')
def __len__(self):
self.fill_cache()
return self.count
def initialize(self):
pass
def iterate(self, cache=True):
row = self.cursor.fetchone()
if row is None:
self.populated = True
self.cursor.close()
raise StopIteration
elif not self.initialized:
self.initialize() # Lazy initialization.
self.initialized = True
self.count += 1
result = self.process_row(row)
if cache:
self.row_cache.append(result)
return result
def process_row(self, row):
return row
def iterator(self):
"""Efficient one-pass iteration over the result set."""
while True:
yield self.iterate(False)
def fill_cache(self, n=0.):
n = n or float('Inf')
if n < 0:
raise ValueError('Negative values are not supported.')
iterator = ResultIterator(self)
iterator.index = self.count
while not self.populated and (n > self.count):
try:
iterator.next()
except StopIteration:
break
class DictCursorWrapper(CursorWrapper):
def _initialize_columns(self):
description = self.cursor.description
self.columns = [t[0][t[0].find('.') + 1:]
for t in description]
self.ncols = len(description)
initialize = _initialize_columns
def _row_to_dict(self, row):
result = {}
for i in range(self.ncols):
result[self.columns[i]] = row[i]
return result
process_row = _row_to_dict
class NamedTupleCursorWrapper(CursorWrapper):
def initialize(self):
description = self.cursor.description
self.tuple_class = collections.namedtuple(
'Row',
[col[0][col[0].find('.') + 1:].strip('"') for col in description])
def process_row(self, row):
return self.tuple_class(*row)
class ObjectCursorWrapper(DictCursorWrapper):
def __init__(self, cursor, constructor):
super(ObjectCursorWrapper, self).__init__(cursor)
self.constructor = constructor
def process_row(self, row):
row_dict = self._row_to_dict(row)
return self.constructor(**row_dict)
class ResultIterator(object):
def __init__(self, cursor_wrapper):
self.cursor_wrapper = cursor_wrapper
self.index = 0
def __iter__(self):
return self
def next(self):
if self.index < self.cursor_wrapper.count:
obj = self.cursor_wrapper.row_cache[self.index]
elif not self.cursor_wrapper.populated:
self.cursor_wrapper.iterate()
obj = self.cursor_wrapper.row_cache[self.index]
else:
raise StopIteration
self.index += 1
return obj
__next__ = next
# FIELDS
class FieldAccessor(object):
def __init__(self, model, field, name):
self.model = model
self.field = field
self.name = name
def __get__(self, instance, instance_type=None):
if instance is not None:
return instance.__data__.get(self.name)
return self.field
def __set__(self, instance, value):
instance.__data__[self.name] = value
instance._dirty.add(self.name)
class ForeignKeyAccessor(FieldAccessor):
def __init__(self, model, field, name):
super(ForeignKeyAccessor, self).__init__(model, field, name)
self.rel_model = field.rel_model
def get_rel_instance(self, instance):
value = instance.__data__.get(self.name)
if value is not None or self.name in instance.__rel__:
if self.name not in instance.__rel__:
obj = self.rel_model.get(self.field.rel_field == value)
instance.__rel__[self.name] = obj
return instance.__rel__[self.name]
elif not self.field.null:
raise self.rel_model.DoesNotExist
return value
def __get__(self, instance, instance_type=None):
if instance is not None:
return self.get_rel_instance(instance)
return self.field
def __set__(self, instance, obj):
if isinstance(obj, self.rel_model):
instance.__data__[self.name] = getattr(obj, self.field.rel_field.name)
instance.__rel__[self.name] = obj
else:
fk_value = instance.__data__.get(self.name)
instance.__data__[self.name] = obj
if obj != fk_value and self.name in instance.__rel__:
del instance.__rel__[self.name]
instance._dirty.add(self.name)
class BackrefAccessor(object):
def __init__(self, field):
self.field = field
self.model = field.rel_model
self.rel_model = field.model
def __get__(self, instance, instance_type=None):
if instance is not None:
dest = self.field.rel_field.name
return (self.rel_model
.select()
.where(self.field == getattr(instance, dest)))
return self
class ObjectIdAccessor(object):
"""Gives direct access to the underlying id"""
def __init__(self, field):
self.field = field
def __get__(self, instance, instance_type=None):
if instance is not None:
return instance.__data__.get(self.field.name)
return self.field
def __set__(self, instance, value):
setattr(instance, self.field.name, value)
class Field(ColumnBase):
_field_counter = 0
_order = 0
accessor_class = FieldAccessor
auto_increment = False
field_type = 'DEFAULT'
def __init__(self, null=False, index=False, unique=False, column_name=None,
default=None, primary_key=False, constraints=None,
sequence=None, collation=None, unindexed=False, choices=None,
help_text=None, verbose_name=None, db_column=None,
_hidden=False):
if db_column is not None:
__deprecated__('"db_column" has been deprecated in favor of '
'"column_name" for Field objects.')
column_name = db_column
self.null = null
self.index = index
self.unique = unique
self.column_name = column_name
self.default = default
self.primary_key = primary_key
self.constraints = constraints # List of column constraints.
self.sequence = sequence # Name of sequence, e.g. foo_id_seq.
self.collation = collation
self.unindexed = unindexed
self.choices = choices
self.help_text = help_text
self.verbose_name = verbose_name
self._hidden = _hidden
# Used internally for recovering the order in which Fields were defined
# on the Model class.
Field._field_counter += 1
self._order = Field._field_counter
self._sort_key = (self.primary_key and 1 or 2), self._order
def __hash__(self):
return hash(self.name + '.' + self.model.__name__)
def bind(self, model, name, set_attribute=True):
self.model = model
self.name = name
self.column_name = self.column_name or name
if set_attribute:
setattr(model, name, self.accessor_class(model, self, name))
@property
def column(self):
return Column(self.model._meta.table, self.column_name)
def coerce(self, value):
return value
def db_value(self, value):
return value if value is None else self.coerce(value)
def python_value(self, value):
return value if value is None else self.coerce(value)
def get_sort_key(self, ctx):
return self._sort_key
def __sql__(self, ctx):
return ctx.sql(self.column)
def get_modifiers(self):
return
def ddl_datatype(self, ctx):
if ctx and ctx.state.field_types:
column_type = ctx.state.field_types.get(self.field_type,
self.field_type)
else:
column_type = self.field_type
modifiers = self.get_modifiers()
if column_type and modifiers:
modifier_literal = ', '.join([str(m) for m in modifiers])
return SQL('%s(%s)' % (column_type, modifier_literal))
else:
return SQL(column_type)
def ddl(self, ctx):
accum = [Entity(self.column_name)]
data_type = self.ddl_datatype(ctx)
if data_type:
accum.append(data_type)
if self.unindexed:
accum.append(SQL('UNINDEXED'))
if not self.null:
accum.append(SQL('NOT NULL'))
if self.primary_key:
accum.append(SQL('PRIMARY KEY'))
if self.sequence:
accum.append(SQL("DEFAULT NEXTVAL('%s')" % self.sequence))
if self.constraints:
accum.extend(self.constraints)
if self.collation:
accum.append(SQL('COLLATE %s' % self.collation))
return NodeList(accum)
class IntegerField(Field):
field_type = 'INT'
coerce = int
class BigIntegerField(IntegerField):
field_type = 'BIGINT'
class SmallIntegerField(IntegerField):
field_type = 'SMALLINT'
class AutoField(IntegerField):
auto_increment = True
field_type = 'AUTO'
def __init__(self, *args, **kwargs):
if kwargs.get('primary_key') is False:
raise ValueError('%s must always be a primary key.' % type(self))
kwargs['primary_key'] = True
super(AutoField, self).__init__(*args, **kwargs)
class BigAutoField(AutoField):
field_type = 'BIGAUTO'
class PrimaryKeyField(AutoField):
def __init__(self, *args, **kwargs):
__deprecated__('"PrimaryKeyField" has been renamed to "AutoField". '
'Please update your code accordingly as this will be '
'completely removed in a subsequent release.')
super(PrimaryKeyField, self).__init__(*args, **kwargs)
class FloatField(Field):
field_type = 'FLOAT'
coerce = float
class DoubleField(FloatField):
field_type = 'DOUBLE'
class DecimalField(Field):
field_type = 'DECIMAL'
def __init__(self, max_digits=10, decimal_places=5, auto_round=False,
rounding=None, *args, **kwargs):
self.max_digits = max_digits
self.decimal_places = decimal_places
self.auto_round = auto_round
self.rounding = rounding or decimal.DefaultContext.rounding
super(DecimalField, self).__init__(*args, **kwargs)
def get_modifiers(self):
return [self.max_digits, self.decimal_places]
def db_value(self, value):
D = decimal.Decimal
if not value:
return value if value is None else D(0)
if self.auto_round:
exp = D(10) ** (-self.decimal_places)
rounding = self.rounding
return D(text_type(value)).quantize(exp, rounding=rounding)
return value
def python_value(self, value):
if value is not None:
if isinstance(value, decimal.Decimal):
return value
return decimal.Decimal(text_type(value))
class _StringField(Field):
def coerce(self, value):
if isinstance(value, text_type):
return value
elif isinstance(value, bytes_type):
return value.decode('utf-8')
return text_type(value)
def __add__(self, other): return self.concat(other)
def __radd__(self, other): return other.concat(self)
class CharField(_StringField):
field_type = 'VARCHAR'
def __init__(self, max_length=255, *args, **kwargs):
self.max_length = max_length
super(CharField, self).__init__(*args, **kwargs)
def get_modifiers(self):
return self.max_length and [self.max_length] or None
class FixedCharField(CharField):
field_type = 'CHAR'
def python_value(self, value):
value = super(FixedCharField, self).python_value(value)
if value:
value = value.strip()
return value
class TextField(_StringField):
field_type = 'TEXT'
class BlobField(Field):
field_type = 'BLOB'
def bind(self, model, name, set_attribute=True):
self._constructor = bytearray
if model._meta.database:
if isinstance(model._meta.database, Proxy):
def cb(db):
self._constructor = db.get_binary_type()
model._meta.database.attach_callback(cb)
else:
self._constructor = model._meta.database.get_binary_type()
return super(BlobField, self).bind(model, name, set_attribute)
def db_value(self, value):
if isinstance(value, text_type):
value = value.encode('raw_unicode_escape')
if isinstance(value, bytes_type):
return self._constructor(value)
return value
class BitField(BitwiseMixin, BigIntegerField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('default', 0)
super(BitField, self).__init__(*args, **kwargs)
def flag(self, value):
class FlagDescriptor(object):
def __init__(self, field, value):
self._field = field
self._value = value
def __get__(self, instance, instance_type=None):
if instance is None:
return self._field.bin_and(self._value) != 0
value = getattr(instance, self._field.name) or 0
return (value & self._value) != 0
def __set__(self, instance, is_set):
if is_set not in (True, False):
raise ValueError('Value must be either True or False')
value = getattr(instance, self._field.name) or 0
if is_set:
value |= self._value
else:
value &= ~self._value
setattr(instance, self._field.name, value)
return FlagDescriptor(self, value)
class BigBitFieldData(object):
def __init__(self, instance, name):
self.instance = instance
self.name = name
value = self.instance.__data__.get(self.name)
if not value:
value = bytearray()
elif not isinstance(value, bytearray):
value = bytearray(value)
self._buffer = self.instance.__data__[self.name] = value
def _ensure_length(self, idx):
byte_num, byte_offset = divmod(idx, 8)
cur_size = len(self._buffer)
if cur_size <= byte_num:
self._buffer.extend(b'\x00' * ((byte_num + 1) - cur_size))
return byte_num, byte_offset
def set_bit(self, idx):
byte_num, byte_offset = self._ensure_length(idx)
self._buffer[byte_num] |= (1 << byte_offset)
def clear_bit(self, idx):
byte_num, byte_offset = self._ensure_length(idx)
self._buffer[byte_num] &= ~(1 << byte_offset)
def toggle_bit(self, idx):
byte_num, byte_offset = self._ensure_length(idx)
self._buffer[byte_num] ^= (1 << byte_offset)
return bool(self._buffer[byte_num] & (1 << byte_offset))
def is_set(self, idx):
byte_num, byte_offset = self._ensure_length(idx)
return bool(self._buffer[byte_num] & (1 << byte_offset))
def __repr__(self):
return repr(self._buffer)
class BigBitFieldAccessor(FieldAccessor):
def __get__(self, instance, instance_type=None):
if instance is None:
return self.field
return BigBitFieldData(instance, self.name)
def __set__(self, instance, value):
if isinstance(value, memoryview):
value = value.tobytes()
elif isinstance(value, buffer_type):
value = bytes(value)
elif isinstance(value, bytearray):
value = bytes_type(value)
elif isinstance(value, BigBitFieldData):
value = bytes_type(value._buffer)
elif isinstance(value, text_type):
value = value.encode('utf-8')
elif not isinstance(value, bytes_type):
raise ValueError('Value must be either a bytes, memoryview or '
'BigBitFieldData instance.')
super(BigBitFieldAccessor, self).__set__(instance, value)
class BigBitField(BlobField):
accessor_class = BigBitFieldAccessor
def __init__(self, *args, **kwargs):
kwargs.setdefault('default', bytes_type)
super(BigBitField, self).__init__(*args, **kwargs)
def db_value(self, value):
return bytes_type(value) if value is not None else value
class UUIDField(Field):
field_type = 'UUID'
def db_value(self, value):
if isinstance(value, uuid.UUID):
return value.hex
try:
return uuid.UUID(value).hex
except:
return value
def python_value(self, value):
if isinstance(value, uuid.UUID):
return value
return None if value is None else uuid.UUID(value)
def _date_part(date_part):
def dec(self):
return self.model._meta.database.extract_date(date_part, self)
return dec
def format_date_time(value, formats, post_process=None):
post_process = post_process or (lambda x: x)
for fmt in formats:
try:
return post_process(datetime.datetime.strptime(value, fmt))
except ValueError:
pass
return value
class _BaseFormattedField(Field):
formats = None
def __init__(self, formats=None, *args, **kwargs):
if formats is not None:
self.formats = formats
super(_BaseFormattedField, self).__init__(*args, **kwargs)
class DateTimeField(_BaseFormattedField):
field_type = 'DATETIME'
formats = [
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d',
]
def python_value(self, value):
if value and isinstance(value, basestring):
return format_date_time(value, self.formats)
return value
year = property(_date_part('year'))
month = property(_date_part('month'))
day = property(_date_part('day'))
hour = property(_date_part('hour'))
minute = property(_date_part('minute'))
second = property(_date_part('second'))
class DateField(_BaseFormattedField):
field_type = 'DATE'
formats = [
'%Y-%m-%d',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
]
def python_value(self, value):
if value and isinstance(value, basestring):
pp = lambda x: x.date()
return format_date_time(value, self.formats, pp)
elif value and isinstance(value, datetime.datetime):
return value.date()
return value
year = property(_date_part('year'))
month = property(_date_part('month'))
day = property(_date_part('day'))
class TimeField(_BaseFormattedField):
field_type = 'TIME'
formats = [
'%H:%M:%S.%f',
'%H:%M:%S',
'%H:%M',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S',
]
def python_value(self, value):
if value:
if isinstance(value, basestring):
pp = lambda x: x.time()
return format_date_time(value, self.formats, pp)
elif isinstance(value, datetime.datetime):
return value.time()
if value is not None and isinstance(value, datetime.timedelta):
return (datetime.datetime.min + value).time()
return value
hour = property(_date_part('hour'))
minute = property(_date_part('minute'))
second = property(_date_part('second'))
class TimestampField(IntegerField):
# Support second -> microsecond resolution.
valid_resolutions = [10**i for i in range(7)]
def __init__(self, *args, **kwargs):
self.resolution = kwargs.pop('resolution', 1) or 1
if self.resolution not in self.valid_resolutions:
raise ValueError('TimestampField resolution must be one of: %s' %
', '.join(str(i) for i in self.valid_resolutions))
self.utc = kwargs.pop('utc', False) or False
_dt = datetime.datetime
self._conv = _dt.utcfromtimestamp if self.utc else _dt.fromtimestamp
_default = _dt.utcnow if self.utc else _dt.now
kwargs.setdefault('default', _default)
super(TimestampField, self).__init__(*args, **kwargs)
def db_value(self, value):
if value is None:
return
if isinstance(value, datetime.datetime):
pass
elif isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
else:
return int(round(value * self.resolution))
if self.utc:
timestamp = calendar.timegm(value.utctimetuple())
else:
timestamp = time.mktime(value.timetuple())
timestamp += (value.microsecond * .000001)
if self.resolution > 1:
timestamp *= self.resolution
return int(round(timestamp))
def python_value(self, value):
if value is not None and isinstance(value, (int, float, long)):
if value == 0:
return
elif self.resolution > 1:
ticks_to_microsecond = 1000000 // self.resolution
value, ticks = divmod(value, self.resolution)
microseconds = ticks * ticks_to_microsecond
return self._conv(value).replace(microsecond=microseconds)
else:
return self._conv(value)
return value
class IPField(BigIntegerField):
def db_value(self, val):
if val is not None:
return struct.unpack('!I', socket.inet_aton(val))[0]
def python_value(self, val):
if val is not None:
return socket.inet_ntoa(struct.pack('!I', val))
class BooleanField(Field):
field_type = 'BOOL'
coerce = bool
class BareField(Field):
def __init__(self, coerce=None, *args, **kwargs):
super(BareField, self).__init__(*args, **kwargs)
if coerce is not None:
self.coerce = coerce
def ddl(self, ctx):
return Entity(self.column_name)
class ForeignKeyField(Field):
accessor_class = ForeignKeyAccessor
def __init__(self, model, field=None, backref=None, on_delete=None,
on_update=None, _deferred=None, rel_model=None, to_field=None,
object_id_name=None, related_name=None, *args, **kwargs):
super(ForeignKeyField, self).__init__(*args, **kwargs)
if rel_model is not None:
__deprecated__('"rel_model" has been deprecated in favor of '
'"model" for ForeignKeyField objects.')
model = rel_model
if to_field is not None:
__deprecated__('"to_field" has been deprecated in favor of '
'"field" for ForeignKeyField objects.')
field = to_field
if related_name is not None:
__deprecated__('"related_name" has been deprecated in favor of '
'"backref" for Field objects.')
backref = related_name
self.rel_model = model
self.rel_field = field
self.declared_backref = backref
self.backref = None
self.on_delete = on_delete
self.on_update = on_update
self.deferred = _deferred
self.object_id_name = object_id_name
def __repr__(self):
if hasattr(self, 'model') and getattr(self, 'name', None):
return '<%s: "%s"."%s">' % (type(self).__name__,
self.model._meta.name,
self.name)
return '<%s: (unbound)>' % type(self).__name__
@property
def field_type(self):
if not isinstance(self.rel_field, AutoField):
return self.rel_field.field_type
return IntegerField.field_type
def get_modifiers(self):
if not isinstance(self.rel_field, AutoField):
return self.rel_field.get_modifiers()
return super(ForeignKeyField, self).get_modifiers()
def coerce(self, value):
return self.rel_field.coerce(value)
def db_value(self, value):
if isinstance(value, self.rel_model):
value = value.get_id()
return self.rel_field.db_value(value)
def python_value(self, value):
if isinstance(value, self.rel_model):
return value
return self.rel_field.python_value(value)
def expression(self):
return self.column == self.rel_field.column
def bind(self, model, name, set_attribute=True):
if not self.column_name:
self.column_name = name if name.endswith('_id') else name + '_id'
if not self.object_id_name:
self.object_id_name = self.column_name
if self.object_id_name == name:
self.object_id_name += '_id'
elif self.object_id_name == name:
raise ValueError('ForeignKeyField "%s"."%s" specifies an '
'object_id_name that conflicts with its field '
'name.' % (model._meta.name, name))
if self.rel_model == 'self':
self.rel_model = model
if isinstance(self.rel_field, basestring):
self.rel_field = getattr(self.rel_model, self.rel_field)
elif self.rel_field is None:
self.rel_field = self.rel_model._meta.primary_key
# Bind field before assigning backref, so field is bound when
# calling declared_backref() (if callable).
super(ForeignKeyField, self).bind(model, name, set_attribute)
if callable(self.declared_backref):
self.backref = self.declared_backref(self)
else:
self.backref, self.declared_backref = self.declared_backref, None
if not self.backref:
self.backref = '%s_set' % model._meta.name
if set_attribute:
setattr(model, self.object_id_name, ObjectIdAccessor(self))
if self.backref not in '!+':
setattr(self.rel_model, self.backref, BackrefAccessor(self))
def foreign_key_constraint(self):
parts = [
SQL('FOREIGN KEY'),
EnclosedNodeList((self,)),
SQL('REFERENCES'),
self.rel_model,
EnclosedNodeList((self.rel_field,))]
if self.on_delete:
parts.append(SQL('ON DELETE %s' % self.on_delete))
if self.on_update:
parts.append(SQL('ON UPDATE %s' % self.on_update))
return NodeList(parts)
def __getattr__(self, attr):
if attr.startswith('__'):
# Prevent recursion error when deep-copying.
raise AttributeError('Cannot look-up non-existant "__" methods.')
if attr in self.rel_model._meta.fields:
return self.rel_model._meta.fields[attr]
raise AttributeError('%r has no attribute %s, nor is it a valid field '
'on %s.' % (self, attr, self.rel_model))
class DeferredForeignKey(Field):
_unresolved = set()
def __init__(self, rel_model_name, **kwargs):
self.field_kwargs = kwargs
self.rel_model_name = rel_model_name.lower()
DeferredForeignKey._unresolved.add(self)
super(DeferredForeignKey, self).__init__()
__hash__ = object.__hash__
def set_model(self, rel_model):
field = ForeignKeyField(rel_model, _deferred=True, **self.field_kwargs)
self.model._meta.add_field(self.name, field)
@staticmethod
def resolve(model_cls):
unresolved = list(DeferredForeignKey._unresolved)
for dr in unresolved:
if dr.rel_model_name == model_cls.__name__.lower():
dr.set_model(model_cls)
DeferredForeignKey._unresolved.discard(dr)
class DeferredThroughModel(object):
def set_field(self, model, field, name):
self.model = model
self.field = field
self.name = name
def set_model(self, through_model):
self.field.through_model = through_model
self.field.bind(self.model, self.name)
class MetaField(Field):
column_name = default = model = name = None
primary_key = False
class ManyToManyFieldAccessor(FieldAccessor):
def __init__(self, model, field, name):
super(ManyToManyFieldAccessor, self).__init__(model, field, name)
self.model = field.model
self.rel_model = field.rel_model
self.through_model = field.get_through_model()
self.src_fk = self.through_model._meta.model_refs[self.model][0]
self.dest_fk = self.through_model._meta.model_refs[self.rel_model][0]
def __get__(self, instance, instance_type=None, force_query=False):
if instance is not None:
if not force_query and isinstance(getattr(instance, self.src_fk.backref), list):
return [getattr(obj, self.dest_fk.name) for obj in getattr(instance, self.src_fk.backref)]
else:
return (ManyToManyQuery(instance, self, self.rel_model)
.join(self.through_model)
.join(self.model)
.where(self.src_fk == instance))
return self.field
def __set__(self, instance, value):
query = self.__get__(instance, force_query=True)
query.add(value, clear_existing=True)
class ManyToManyField(MetaField):
accessor_class = ManyToManyFieldAccessor
def __init__(self, model, backref=None, through_model=None,
_is_backref=False):
if through_model is not None and not (
isinstance(through_model, DeferredThroughModel) or
is_model(through_model)):
raise TypeError('Unexpected value for through_model. Expected '
'Model or DeferredThroughModel.')
self.rel_model = model
self.backref = backref
self.through_model = through_model
self._is_backref = _is_backref
def _get_descriptor(self):
return ManyToManyFieldAccessor(self)
def bind(self, model, name, set_attribute=True):
if isinstance(self.through_model, DeferredThroughModel):
self.through_model.set_field(model, self, name)
return
super(ManyToManyField, self).bind(model, name, set_attribute)
if not self._is_backref:
many_to_many_field = ManyToManyField(
self.model,
through_model=self.through_model,
_is_backref=True)
backref = self.backref or model._meta.name + 's'
self.rel_model._meta.add_field(backref, many_to_many_field)
def get_models(self):
return [model for _, model in sorted((
(self._is_backref, self.model),
(not self._is_backref, self.rel_model)))]
def get_through_model(self):
if not self.through_model:
lhs, rhs = self.get_models()
tables = [model._meta.table_name for model in (lhs, rhs)]
class Meta:
database = self.model._meta.database
table_name = '%s_%s_through' % tuple(tables)
indexes = (
((lhs._meta.name, rhs._meta.name),
True),)
attrs = {
lhs._meta.name: ForeignKeyField(lhs),
rhs._meta.name: ForeignKeyField(rhs)}
attrs['Meta'] = Meta
self.through_model = type(
'%s%sThrough' % (lhs.__name__, rhs.__name__),
(Model,),
attrs)
return self.through_model
class VirtualField(MetaField):
field_class = None
def __init__(self, field_class=None, *args, **kwargs):
Field = field_class if field_class is not None else self.field_class
self.field_instance = Field() if Field is not None else None
super(VirtualField, self).__init__(*args, **kwargs)
def db_value(self, value):
if self.field_instance is not None:
return self.field_instance.db_value(value)
return value
def python_value(self, value):
if self.field_instance is not None:
return self.field_instance.python_value(value)
return value
def bind(self, model, name, set_attribute=True):
self.model = model
self.column_name = self.name = name
setattr(model, name, self.accessor_class(model, self, name))
class CompositeKey(MetaField):
sequence = None
def __init__(self, *field_names):
self.field_names = field_names
def __get__(self, instance, instance_type=None):
if instance is not None:
return tuple([getattr(instance, field_name)
for field_name in self.field_names])
return self
def __set__(self, instance, value):
if not isinstance(value, (list, tuple)):
raise TypeError('A list or tuple must be used to set the value of '
'a composite primary key.')
if len(value) != len(self.field_names):
raise ValueError('The length of the value must equal the number '
'of columns of the composite primary key.')
for idx, field_value in enumerate(value):
setattr(instance, self.field_names[idx], field_value)
def __eq__(self, other):
expressions = [(self.model._meta.fields[field] == value)
for field, value in zip(self.field_names, other)]
return reduce(operator.and_, expressions)
def __ne__(self, other):
return ~(self == other)
def __hash__(self):
return hash((self.model.__name__, self.field_names))
def __sql__(self, ctx):
return ctx.sql(CommaNodeList([self.model._meta.fields[field]
for field in self.field_names]))
def bind(self, model, name, set_attribute=True):
self.model = model
self.column_name = self.name = name
setattr(model, self.name, self)
class _SortedFieldList(object):
__slots__ = ('_keys', '_items')
def __init__(self):
self._keys = []
self._items = []
def __getitem__(self, i):
return self._items[i]
def __iter__(self):
return iter(self._items)
def __contains__(self, item):
k = item._sort_key
i = bisect_left(self._keys, k)
j = bisect_right(self._keys, k)
return item in self._items[i:j]
def index(self, field):
return self._keys.index(field._sort_key)
def insert(self, item):
k = item._sort_key
i = bisect_left(self._keys, k)
self._keys.insert(i, k)
self._items.insert(i, item)
def remove(self, item):
idx = self.index(item)
del self._items[idx]
del self._keys[idx]
# MODELS
class SchemaManager(object):
def __init__(self, model, database=None, **context_options):
self.model = model
self._database = database
context_options.setdefault('scope', SCOPE_VALUES)
self.context_options = context_options
@property
def database(self):
return self._database or self.model._meta.database
@database.setter
def database(self, value):
self._database = value
def _create_context(self):
return self.database.get_sql_context(**self.context_options)
def _create_table(self, safe=True, **options):
is_temp = options.pop('temporary', False)
ctx = self._create_context()
ctx.literal('CREATE TEMPORARY TABLE ' if is_temp else 'CREATE TABLE ')
if safe:
ctx.literal('IF NOT EXISTS ')
ctx.sql(self.model).literal(' ')
columns = []
constraints = []
meta = self.model._meta
if meta.composite_key:
pk_columns = [meta.fields[field_name].column
for field_name in meta.primary_key.field_names]
constraints.append(NodeList((SQL('PRIMARY KEY'),
EnclosedNodeList(pk_columns))))
for field in meta.sorted_fields:
columns.append(field.ddl(ctx))
if isinstance(field, ForeignKeyField) and not field.deferred:
constraints.append(field.foreign_key_constraint())
if meta.constraints:
constraints.extend(meta.constraints)
constraints.extend(self._create_table_option_sql(options))
ctx.sql(EnclosedNodeList(columns + constraints))
if meta.without_rowid:
ctx.literal(' WITHOUT ROWID')
return ctx
def _create_table_option_sql(self, options):
accum = []
options = merge_dict(self.model._meta.options or {}, options)
if not options:
return accum
for key, value in sorted(options.items()):
if not isinstance(value, Node):
if is_model(value):
value = value._meta.table
else:
value = SQL(value)
accum.append(NodeList((SQL(key), value), glue='='))
return accum
def create_table(self, safe=True, **options):
self.database.execute(self._create_table(safe=safe, **options))
def _drop_table(self, safe=True, **options):
is_temp = options.pop('temporary', False)
ctx = (self._create_context()
.literal('DROP TEMPORARY ' if is_temp else 'DROP ')
.literal('TABLE IF EXISTS ' if safe else 'TABLE ')
.sql(self.model))
if options.get('cascade'):
ctx = ctx.literal(' CASCADE')
return ctx
def drop_table(self, safe=True, **options):
self.database.execute(self._drop_table(safe=safe), **options)
def _create_indexes(self, safe=True):
return [self._create_index(index, safe)
for index in self.model._meta.fields_to_index()]
def _create_index(self, index, safe=True):
if isinstance(index, Index):
if not self.database.safe_create_index:
index = index.safe(False)
elif index._safe != safe:
index = index.safe(safe)
return self._create_context().sql(index)
def create_indexes(self, safe=True):
for query in self._create_indexes(safe=safe):
self.database.execute(query)
def _drop_indexes(self, safe=True):
return [self._drop_index(index, safe)
for index in self.model._meta.fields_to_index()
if isinstance(index, Index)]
def _drop_index(self, index, safe):
statement = 'DROP INDEX '
if safe and self.database.safe_drop_index:
statement += 'IF EXISTS '
return (self
._create_context()
.literal(statement)
.sql(Entity(index._name)))
def drop_indexes(self, safe=True):
for query in self._drop_indexes(safe=safe):
self.database.execute(query)
def _check_sequences(self, field):
if not field.sequence or not self.database.sequences:
raise ValueError('Sequences are either not supported, or are not '
'defined for "%s".' % field.name)
def _create_sequence(self, field):
self._check_sequences(field)
if not self.database.sequence_exists(field.sequence):
return (self
._create_context()
.literal('CREATE SEQUENCE ')
.sql(Entity(field.sequence)))
def create_sequence(self, field):
self.database.execute(self._create_sequence(field))
def _drop_sequence(self, field):
self._check_sequences(field)
if self.database.sequence_exists(field.sequence):
return (self
._create_context()
.literal('DROP SEQUENCE ')
.sql(Entity(field.sequence)))
def drop_sequence(self, field):
self.database.execute(self._drop_sequence(field))
def _create_foreign_key(self, field):
name = 'fk_%s_%s_refs_%s' % (field.model._meta.table_name,
field.column_name,
field.rel_model._meta.table_name)
return (self
._create_context()
.literal('ALTER TABLE ')
.sql(field.model)
.literal(' ADD CONSTRAINT ')
.sql(Entity(name))
.literal(' ')
.sql(field.foreign_key_constraint()))
def create_foreign_key(self, field):
self.database.execute(self._create_foreign_key(field))
def create_all(self, safe=True, **table_options):
if self.database.sequences:
for field in self.model._meta.sorted_fields:
if field and field.sequence:
self.create_sequence(field)
self.create_table(safe, **table_options)
self.create_indexes(safe=safe)
def drop_all(self, safe=True, **options):
self.drop_table(safe, **options)
class Metadata(object):
def __init__(self, model, database=None, table_name=None, indexes=None,
primary_key=None, constraints=None, schema=None,
only_save_dirty=False, table_alias=None, depends_on=None,
options=None, db_table=None, table_function=None,
without_rowid=False, **kwargs):
if db_table is not None:
__deprecated__('"db_table" has been deprecated in favor of '
'"table_name" for Models.')
table_name = db_table
self.model = model
self.database = database
self.fields = {}
self.columns = {}
self.combined = {}
self._sorted_field_list = _SortedFieldList()
self.sorted_fields = []
self.sorted_field_names = []
self.defaults = {}
self._default_by_name = {}
self._default_dict = {}
self._default_callables = {}
self._default_callable_list = []
self.name = model.__name__.lower()
self.table_function = table_function
if not table_name:
table_name = (self.table_function(model)
if self.table_function
else re.sub('[^\w]+', '_', self.name))
self.table_name = table_name
self._table = None
self.indexes = list(indexes) if indexes else []
self.constraints = constraints
self._schema = schema
self.primary_key = primary_key
self.composite_key = self.auto_increment = None
self.only_save_dirty = only_save_dirty
self.table_alias = table_alias
self.depends_on = depends_on
self.without_rowid = without_rowid
self.refs = {}
self.backrefs = {}
self.model_refs = collections.defaultdict(list)
self.model_backrefs = collections.defaultdict(list)
self.manytomany = {}
self.options = options or {}
for key, value in kwargs.items():
setattr(self, key, value)
self._additional_keys = set(kwargs.keys())
def model_graph(self, refs=True, backrefs=True, depth_first=True):
if not refs and not backrefs:
raise ValueError('One of `refs` or `backrefs` must be True.')
accum = [(None, self.model, None)]
seen = set()
queue = collections.deque((self,))
method = queue.pop if depth_first else queue.popleft
while queue:
curr = method()
if curr in seen: continue
seen.add(curr)
if refs:
for fk, model in curr.refs.items():
accum.append((fk, model, False))
queue.append(model._meta)
if backrefs:
for fk, model in curr.backrefs.items():
accum.append((fk, model, True))
queue.append(model._meta)
return accum
def add_ref(self, field):
rel = field.rel_model
self.refs[field] = rel
self.model_refs[rel].append(field)
rel._meta.backrefs[field] = self.model
rel._meta.model_backrefs[self.model].append(field)
def remove_ref(self, field):
rel = field.rel_model
del self.refs[field]
self.model_ref[rel].remove(field)
del rel._meta.backrefs[field]
rel._meta.model_backrefs[self.model].remove(field)
def add_manytomany(self, field):
self.manytomany[field.name] = field
def remove_manytomany(self, field):
del self.manytomany[field.name]
@property
def table(self):
if self._table is None:
self._table = Table(
self.table_name,
[field.column_name for field in self.sorted_fields],
schema=self.schema,
alias=self.table_alias,
_model=self.model,
_database=self.database)
return self._table
@table.setter
def table(self, value):
raise AttributeError('Cannot set the "table".')
@table.deleter
def table(self):
self._table = None
@property
def schema(self):
return self._schema
@schema.setter
def schema(self, value):
self._schema = value
del self.table
@property
def entity(self):
if self._schema:
return Entity(self._schema, self.table_name)
else:
return Entity(self.table_name)
def _update_sorted_fields(self):
self.sorted_fields = list(self._sorted_field_list)
self.sorted_field_names = [f.name for f in self.sorted_fields]
def add_field(self, field_name, field, set_attribute=True):
if field_name in self.fields:
self.remove_field(field_name)
elif field_name in self.manytomany:
self.remove_manytomany(self.manytomany[field_name])
if not isinstance(field, MetaField):
del self.table
field.bind(self.model, field_name, set_attribute)
self.fields[field.name] = field
self.columns[field.column_name] = field
self.combined[field.name] = field
self.combined[field.column_name] = field
self._sorted_field_list.insert(field)
self._update_sorted_fields()
if field.default is not None:
# This optimization helps speed up model instance construction.
self.defaults[field] = field.default
if callable(field.default):
self._default_callables[field] = field.default
self._default_callable_list.append((field.name,
field.default))
else:
self._default_dict[field] = field.default
self._default_by_name[field.name] = field.default
else:
field.bind(self.model, field_name, set_attribute)
if isinstance(field, ForeignKeyField):
self.add_ref(field)
elif isinstance(field, ManyToManyField):
self.add_manytomany(field)
def remove_field(self, field_name):
if field_name not in self.fields:
return
del self.table
original = self.fields.pop(field_name)
del self.columns[original.column_name]
del self.combined[field_name]
try:
del self.combined[original.column_name]
except KeyError:
pass
self._sorted_field_list.remove(original)
self._update_sorted_fields()
if original.default is not None:
del self.defaults[original]
if self._default_callables.pop(original, None):
for i, (name, _) in enumerate(self._default_callable_list):
if name == field_name:
self._default_callable_list.pop(i)
break
else:
self._default_dict.pop(original, None)
self._default_by_name.pop(original.name, None)
if isinstance(original, ForeignKeyField):
self.remove_ref(original)
def set_primary_key(self, name, field):
self.composite_key = isinstance(field, CompositeKey)
self.add_field(name, field)
self.primary_key = field
self.auto_increment = (
field.auto_increment or
bool(field.sequence))
def get_primary_keys(self):
if self.composite_key:
return tuple([self.fields[field_name]
for field_name in self.primary_key.field_names])
else:
return (self.primary_key,) if self.primary_key is not False else ()
def get_default_dict(self):
dd = self._default_by_name.copy()
for field_name, default in self._default_callable_list:
dd[field_name] = default()
return dd
def fields_to_index(self):
indexes = []
for f in self.sorted_fields:
if f.primary_key:
continue
if f.index or f.unique or isinstance(f, ForeignKeyField):
indexes.append(ModelIndex(self.model, (f,), unique=f.unique))
for index_obj in self.indexes:
if isinstance(index_obj, Node):
indexes.append(index_obj)
elif isinstance(index_obj, (list, tuple)):
index_parts, unique = index_obj
fields = []
for part in index_parts:
if isinstance(part, basestring):
fields.append(self.combined[part])
elif isinstance(part, Node):
fields.append(part)
else:
raise ValueError('Expected either a field name or a '
'subclass of Node. Got: %s' % part)
indexes.append(ModelIndex(self.model, fields, unique=unique))
return indexes
def set_database(self, database):
self.database = database
self.model._schema._database = database
class SubclassAwareMetadata(Metadata):
models = []
def __init__(self, model, *args, **kwargs):
super(SubclassAwareMetadata, self).__init__(model, *args, **kwargs)
self.models.append(model)
def map_models(self, fn):
for model in self.models:
fn(model)
class DoesNotExist(Exception): pass
class ModelBase(type):
inheritable = set(['constraints', 'database', 'indexes', 'primary_key',
'options', 'schema', 'table_function',
'only_save_dirty'])
def __new__(cls, name, bases, attrs):
if name == MODEL_BASE or bases[0].__name__ == MODEL_BASE:
return super(ModelBase, cls).__new__(cls, name, bases, attrs)
meta_options = {}
meta = attrs.pop('Meta', None)
if meta:
for k, v in meta.__dict__.items():
if not k.startswith('_'):
meta_options[k] = v
pk = getattr(meta, 'primary_key', None)
pk_name = parent_pk = None
# Inherit any field descriptors by deep copying the underlying field
# into the attrs of the new model, additionally see if the bases define
# inheritable model options and swipe them.
for b in bases:
if not hasattr(b, '_meta'):
continue
base_meta = b._meta
if parent_pk is None:
parent_pk = deepcopy(base_meta.primary_key)
all_inheritable = cls.inheritable | base_meta._additional_keys
for k in base_meta.__dict__:
if k in all_inheritable and k not in meta_options:
meta_options[k] = base_meta.__dict__[k]
meta_options.setdefault('schema', base_meta.schema)
for (k, v) in b.__dict__.items():
if k in attrs: continue
if isinstance(v, FieldAccessor) and not v.field.primary_key:
attrs[k] = deepcopy(v.field)
sopts = meta_options.pop('schema_options', None) or {}
Meta = meta_options.get('model_metadata_class', Metadata)
Schema = meta_options.get('schema_manager_class', SchemaManager)
# Construct the new class.
cls = super(ModelBase, cls).__new__(cls, name, bases, attrs)
cls.__data__ = cls.__rel__ = None
cls._meta = Meta(cls, **meta_options)
cls._schema = Schema(cls, **sopts)
fields = []
for key, value in cls.__dict__.items():
if isinstance(value, Field):
if value.primary_key and pk:
raise ValueError('over-determined primary key %s.' % name)
elif value.primary_key:
pk, pk_name = value, key
else:
fields.append((key, value))
if pk is None:
if parent_pk is not False:
pk, pk_name = ((parent_pk, parent_pk.name)
if parent_pk is not None else
(AutoField(), 'id'))
else:
pk = False
elif isinstance(pk, CompositeKey):
pk_name = '__composite_key__'
cls._meta.composite_key = True
if pk is not False:
cls._meta.set_primary_key(pk_name, pk)
for name, field in fields:
cls._meta.add_field(name, field)
# Create a repr and error class before finalizing.
if hasattr(cls, '__unicode__'):
setattr(cls, '__repr__', lambda self: '<%s: %r>' % (
cls.__name__, self.__unicode__()))
exc_name = '%sDoesNotExist' % cls.__name__
exc_attrs = {'__module__': cls.__module__}
exception_class = type(exc_name, (DoesNotExist,), exc_attrs)
cls.DoesNotExist = exception_class
# Call validation hook, allowing additional model validation.
cls.validate_model()
DeferredForeignKey.resolve(cls)
return cls
def __iter__(self):
return iter(self.select())
def __getitem__(self, key):
return self.get_by_id(key)
def __setitem__(self, key, value):
self.set_by_id(key, value)
def __delitem__(self, key):
self.delete_by_id(key)
def __contains__(self, key):
try:
self.get_by_id(key)
except self.DoesNotExist:
return False
else:
return True
def __len__(self):
return self.select().count()
def __bool__(self): return True
__nonzero__ = __bool__ # Python 2.
class _BoundModelsContext(_callable_context_manager):
def __init__(self, models, database, bind_refs, bind_backrefs):
self.models = models
self.database = database
self.bind_refs = bind_refs
self.bind_backrefs = bind_backrefs
def __enter__(self):
self._orig_database = []
for model in self.models:
self._orig_database.append(model._meta.database)
model.bind(self.database, self.bind_refs, self.bind_backrefs)
return self.models
def __exit__(self, exc_type, exc_val, exc_tb):
for model, db in zip(self.models, self._orig_database):
model.bind(db, self.bind_refs, self.bind_backrefs)
class Model(with_metaclass(ModelBase, Node)):
def __init__(self, *args, **kwargs):
if kwargs.pop('__no_default__', None):
self.__data__ = {}
else:
self.__data__ = self._meta.get_default_dict()
self._dirty = set(self.__data__)
self.__rel__ = {}
for k in kwargs:
setattr(self, k, kwargs[k])
@classmethod
def validate_model(cls):
pass
@classmethod
def alias(cls, alias=None):
return ModelAlias(cls, alias)
@classmethod
def select(cls, *fields):
is_default = not fields
if not fields:
fields = cls._meta.sorted_fields
return ModelSelect(cls, fields, is_default=is_default)
@classmethod
def _normalize_data(cls, data, kwargs):
normalized = {}
if data:
if not isinstance(data, dict):
if kwargs:
raise ValueError('Data cannot be mixed with keyword '
'arguments: %s' % data)
return data
for key in data:
try:
field = (key if isinstance(key, Field)
else cls._meta.combined[key])
except KeyError:
raise ValueError('Unrecognized field name: "%s" in %s.' %
(key, data))
normalized[field] = data[key]
if kwargs:
for key in kwargs:
try:
normalized[cls._meta.combined[key]] = kwargs[key]
except KeyError:
normalized[getattr(cls, key)] = kwargs[key]
return normalized
@classmethod
def update(cls, __data=None, **update):
return ModelUpdate(cls, cls._normalize_data(__data, update))
@classmethod
def insert(cls, __data=None, **insert):
return ModelInsert(cls, cls._normalize_data(__data, insert))
@classmethod
def insert_many(cls, rows, fields=None, batch_size=None):
return ModelInsert(cls, insert=rows, columns=fields)
@classmethod
def insert_from(cls, query, fields):
columns = [getattr(cls, field) if isinstance(field, basestring)
else field for field in fields]
return ModelInsert(cls, insert=query, columns=columns)
@classmethod
def replace(cls, __data=None, **insert):
return cls.insert(__data, **insert).on_conflict('REPLACE')
@classmethod
def replace_many(cls, rows, fields=None):
return (cls
.insert_many(rows=rows, fields=fields)
.on_conflict('REPLACE'))
@classmethod
def raw(cls, sql, *params):
return ModelRaw(cls, sql, params)
@classmethod
def delete(cls):
return ModelDelete(cls)
@classmethod
def create(cls, **query):
inst = cls(**query)
inst.save(force_insert=True)
return inst
@classmethod
def noop(cls):
return NoopModelSelect(cls, ())
@classmethod
def get(cls, *query, **filters):
sq = cls.select()
if query:
sq = sq.where(*query)
if filters:
sq = sq.filter(**filters)
return sq.get()
@classmethod
def get_or_none(cls, *query, **filters):
try:
return cls.get(*query, **filters)
except DoesNotExist:
pass
@classmethod
def get_by_id(cls, pk):
return cls.get(cls._meta.primary_key == pk)
@classmethod
def set_by_id(cls, key, value):
if key is None:
return cls.insert(value).execute()
else:
return (cls.update(value)
.where(cls._meta.primary_key == key).execute())
@classmethod
def delete_by_id(cls, pk):
return cls.delete().where(cls._meta.primary_key == pk).execute()
@classmethod
def get_or_create(cls, **kwargs):
defaults = kwargs.pop('defaults', {})
query = cls.select()
for field, value in kwargs.items():
query = query.where(getattr(cls, field) == value)
try:
return query.get(), False
except cls.DoesNotExist:
try:
if defaults:
kwargs.update(defaults)
with cls._meta.database.atomic():
return cls.create(**kwargs), True
except IntegrityError as exc:
try:
return query.get(), False
except cls.DoesNotExist:
raise exc
@classmethod
def filter(cls, *dq_nodes, **filters):
return cls.select().filter(*dq_nodes, **filters)
def get_id(self):
return getattr(self, self._meta.primary_key.name)
_pk = property(get_id)
@_pk.setter
def _pk(self, value):
setattr(self, self._meta.primary_key.name, value)
def _pk_expr(self):
return self._meta.primary_key == self._pk
def _prune_fields(self, field_dict, only):
new_data = {}
for field in only:
if isinstance(field, basestring):
field = self._meta.combined[field]
if field.name in field_dict:
new_data[field.name] = field_dict[field.name]
return new_data
def _populate_unsaved_relations(self, field_dict):
for foreign_key_field in self._meta.refs:
foreign_key = foreign_key_field.name
conditions = (
foreign_key in field_dict and
field_dict[foreign_key] is None and
self.__rel__.get(foreign_key) is not None)
if conditions:
setattr(self, foreign_key, getattr(self, foreign_key))
field_dict[foreign_key] = self.__data__[foreign_key]
def save(self, force_insert=False, only=None):
field_dict = self.__data__.copy()
if self._meta.primary_key is not False:
pk_field = self._meta.primary_key
pk_value = self._pk
else:
pk_field = pk_value = None
if only:
field_dict = self._prune_fields(field_dict, only)
elif self._meta.only_save_dirty and not force_insert:
field_dict = self._prune_fields(field_dict, self.dirty_fields)
if not field_dict:
self._dirty.clear()
return False
self._populate_unsaved_relations(field_dict)
if pk_value is not None and not force_insert:
if self._meta.composite_key:
for pk_part_name in pk_field.field_names:
field_dict.pop(pk_part_name, None)
else:
field_dict.pop(pk_field.name, None)
rows = self.update(**field_dict).where(self._pk_expr()).execute()
elif pk_field is None or not self._meta.auto_increment:
self.insert(**field_dict).execute()
rows = 1
else:
pk_from_cursor = self.insert(**field_dict).execute()
if pk_from_cursor is not None:
pk_value = pk_from_cursor
self._pk = pk_value
rows = 1
self._dirty.clear()
return rows
def is_dirty(self):
return bool(self._dirty)
@property
def dirty_fields(self):
return [f for f in self._meta.sorted_fields if f.name in self._dirty]
def dependencies(self, search_nullable=False):
model_class = type(self)
query = self.select(self._meta.primary_key).where(self._pk_expr())
stack = [(type(self), query)]
seen = set()
while stack:
klass, query = stack.pop()
if klass in seen:
continue
seen.add(klass)
for fk, rel_model in klass._meta.backrefs.items():
if rel_model is model_class:
node = (fk == self.__data__[fk.rel_field.name])
else:
node = fk << query
subquery = (rel_model.select(rel_model._meta.primary_key)
.where(node))
if not fk.null or search_nullable:
stack.append((rel_model, subquery))
yield (node, fk)
def delete_instance(self, recursive=False, delete_nullable=False):
if recursive:
dependencies = self.dependencies(delete_nullable)
for query, fk in reversed(list(dependencies)):
model = fk.model
if fk.null and not delete_nullable:
model.update(**{fk.name: None}).where(query).execute()
else:
model.delete().where(query).execute()
return self.delete().where(self._pk_expr()).execute()
def __hash__(self):
return hash((self.__class__, self._pk))
def __eq__(self, other):
return (
other.__class__ == self.__class__ and
self._pk is not None and
other._pk == self._pk)
def __ne__(self, other):
return not self == other
def __sql__(self, ctx):
return ctx.sql(getattr(self, self._meta.primary_key.name))
@classmethod
def bind(cls, database, bind_refs=True, bind_backrefs=True):
is_different = cls._meta.database is not database
cls._meta.set_database(database)
if bind_refs or bind_backrefs:
G = cls._meta.model_graph(refs=bind_refs, backrefs=bind_backrefs)
for _, model, is_backref in G:
model._meta.set_database(database)
return is_different
@classmethod
def bind_ctx(cls, database, bind_refs=True, bind_backrefs=True):
return _BoundModelsContext((cls,), database, bind_refs, bind_backrefs)
@classmethod
def table_exists(cls):
meta = cls._meta
return meta.database.table_exists(meta.table, meta.schema)
@classmethod
def create_table(cls, safe=True, **options):
if 'fail_silently' in options:
__deprecated__('"fail_silently" has been deprecated in favor of '
'"safe" for the create_table() method.')
safe = options.pop('fail_silently')
if safe and not cls._meta.database.safe_create_index \
and cls.table_exists():
return
cls._schema.create_all(safe, **options)
@classmethod
def drop_table(cls, safe=True, **options):
if safe and not cls._meta.database.safe_drop_index \
and not cls.table_exists():
return
cls._schema.drop_all(safe, **options)
@classmethod
def index(cls, *fields, **kwargs):
return ModelIndex(cls, fields, **kwargs)
@classmethod
def add_index(cls, *fields, **kwargs):
if len(fields) == 1 and isinstance(fields[0], (SQL, Index)):
cls._meta.indexes.append(fields[0])
else:
cls._meta.indexes.append(ModelIndex(cls, fields, **kwargs))
class ModelAlias(Node):
"""Provide a separate reference to a model in a query."""
def __init__(self, model, alias=None):
self.__dict__['model'] = model
self.__dict__['alias'] = alias
def __getattr__(self, attr):
model_attr = getattr(self.model, attr)
if isinstance(model_attr, Field):
self.__dict__[attr] = FieldAlias.create(self, model_attr)
return self.__dict__[attr]
return model_attr
def __setattr__(self, attr, value):
raise AttributeError('Cannot set attributes on model aliases.')
def get_field_aliases(self):
return [getattr(self, n) for n in self.model._meta.sorted_field_names]
def select(self, *selection):
if not selection:
selection = self.get_field_aliases()
return ModelSelect(self, selection)
def __call__(self, **kwargs):
return self.model(**kwargs)
def __sql__(self, ctx):
if ctx.scope == SCOPE_VALUES:
# Return the quoted table name.
return ctx.sql(self.model)
if self.alias:
ctx.alias_manager[self] = self.alias
if ctx.scope == SCOPE_SOURCE:
# Define the table and its alias.
return (ctx
.sql(self.model._meta.entity)
.literal(' AS ')
.sql(Entity(ctx.alias_manager[self])))
else:
# Refer to the table using the alias.
return ctx.sql(Entity(ctx.alias_manager[self]))
class FieldAlias(Field):
def __init__(self, source, field):
self.source = source
self.model = source.model
self.field = field
@classmethod
def create(cls, source, field):
class _FieldAlias(cls, type(field)):
pass
return _FieldAlias(source, field)
def clone(self):
return FieldAlias(self.source, self.field)
def coerce(self, value): return self.field.coerce(value)
def python_value(self, value): return self.field.python_value(value)
def db_value(self, value): return self.field.db_value(value)
def __getattr__(self, attr):
return self.source if attr == 'model' else getattr(self.field, attr)
def __sql__(self, ctx):
return ctx.sql(Column(self.source, self.field.column_name))
def sort_models(models):
models = set(models)
seen = set()
ordering = []
def dfs(model):
if model in models and model not in seen:
seen.add(model)
for rel_model in model._meta.refs.values():
dfs(rel_model)
if model._meta.depends_on:
for dependency in model._meta.depends_on:
dfs(dependency)
ordering.append(model)
names = lambda m: (m._meta.name, m._meta.table_name)
for m in sorted(models, key=names):
dfs(m)
return ordering
class _ModelQueryHelper(object):
default_row_type = ROW.MODEL
def __init__(self, *args, **kwargs):
super(_ModelQueryHelper, self).__init__(*args, **kwargs)
if not self._database:
self._database = self.model._meta.database
def _get_cursor_wrapper(self, cursor):
row_type = self._row_type or self.default_row_type
if row_type == ROW.MODEL:
return self._get_model_cursor_wrapper(cursor)
elif row_type == ROW.DICT:
return ModelDictCursorWrapper(cursor, self.model, self._returning)
elif row_type == ROW.TUPLE:
return ModelTupleCursorWrapper(cursor, self.model, self._returning)
elif row_type == ROW.NAMED_TUPLE:
return ModelNamedTupleCursorWrapper(cursor, self.model,
self._returning)
elif row_type == ROW.CONSTRUCTOR:
return ModelObjectCursorWrapper(cursor, self.model,
self._returning, self._constructor)
else:
raise ValueError('Unrecognized row type: "%s".' % row_type)
def _get_model_cursor_wrapper(self, cursor):
return ModelObjectCursorWrapper(cursor, self.model, [], self.model)
class ModelRaw(_ModelQueryHelper, RawQuery):
def __init__(self, model, sql, params, **kwargs):
self.model = model
self._returning = ()
super(ModelRaw, self).__init__(sql=sql, params=params, **kwargs)
def get(self):
try:
return self.execute()[0]
except IndexError:
sql, params = self.sql()
raise self.model.DoesNotExist('%s instance matching query does '
'not exist:\nSQL: %s\nParams: %s' %
(self.model, sql, params))
class BaseModelSelect(_ModelQueryHelper):
def __add__(self, rhs):
return ModelCompoundSelectQuery(self.model, self, 'UNION ALL', rhs)
def __or__(self, rhs):
return ModelCompoundSelectQuery(self.model, self, 'UNION', rhs)
def __and__(self, rhs):
return ModelCompoundSelectQuery(self.model, self, 'INTERSECT', rhs)
def __sub__(self, rhs):
return ModelCompoundSelectQuery(self.model, self, 'EXCEPT', rhs)
def __iter__(self):
if not self._cursor_wrapper:
self.execute()
return iter(self._cursor_wrapper)
def prefetch(self, *subqueries):
return prefetch(self, *subqueries)
def get(self):
clone = self.paginate(1, 1)
clone._cursor_wrapper = None
try:
return clone.execute()[0]
except IndexError:
sql, params = clone.sql()
raise self.model.DoesNotExist('%s instance matching query does '
'not exist:\nSQL: %s\nParams: %s' %
(clone.model, sql, params))
@Node.copy
def group_by(self, *columns):
grouping = []
for column in columns:
if is_model(column):
grouping.extend(column._meta.sorted_fields)
elif isinstance(column, Table):
if not column._columns:
raise ValueError('Cannot pass a table to group_by() that '
'does not have columns explicitly '
'declared.')
grouping.extend([getattr(column, col_name)
for col_name in column._columns])
else:
grouping.append(column)
self._group_by = grouping
class ModelCompoundSelectQuery(BaseModelSelect, CompoundSelectQuery):
def __init__(self, model, *args, **kwargs):
self.model = model
super(ModelCompoundSelectQuery, self).__init__(*args, **kwargs)
class ModelSelect(BaseModelSelect, Select):
def __init__(self, model, fields_or_models, is_default=False):
self.model = self._join_ctx = model
self._joins = {}
self._is_default = is_default
fields = []
for fm in fields_or_models:
if is_model(fm):
fields.extend(fm._meta.sorted_fields)
elif isinstance(fm, ModelAlias):
fields.extend(fm.get_field_aliases())
elif isinstance(fm, Table) and fm._columns:
fields.extend([getattr(fm, col) for col in fm._columns])
else:
fields.append(fm)
super(ModelSelect, self).__init__([model], fields)
def select(self, *fields):
if fields or not self._is_default:
return super(ModelSelect, self).select(*fields)
return self
def switch(self, ctx=None):
self._join_ctx = ctx
return self
@Node.copy
def objects(self, constructor=None):
self._row_type = ROW.CONSTRUCTOR
self._constructor = self.model if constructor is None else constructor
def _get_model(self, src):
if is_model(src):
return src, True
elif isinstance(src, Table) and src._model:
return src._model, False
elif isinstance(src, ModelAlias):
return src.model, False
elif isinstance(src, ModelSelect):
return src.model, False
return None, False
def _normalize_join(self, src, dest, on, attr):
# Allow "on" expression to have an alias that determines the
# destination attribute for the joined data.
on_alias = isinstance(on, Alias)
if on_alias:
attr = on._alias
on = on.alias()
src_model, src_is_model = self._get_model(src)
dest_model, dest_is_model = self._get_model(dest)
if src_model and dest_model:
self._join_ctx = dest
constructor = dest_model
if not (src_is_model and dest_is_model) and isinstance(on, Column):
if on.source is src:
to_field = src_model._meta.columns[on.name]
elif on.source is dest:
to_field = dest_model._meta.columns[on.name]
else:
raise AttributeError('"on" clause Column %s does not '
'belong to %s or %s.' %
(on, src_model, dest_model))
on = None
elif isinstance(on, Field):
to_field = on
on = None
else:
to_field = None
fk_field, is_backref = self._generate_on_clause(
src_model, dest_model, to_field, on)
if on is None:
src_attr = 'name' if src_is_model else 'column_name'
dest_attr = 'name' if dest_is_model else 'column_name'
if is_backref:
lhs = getattr(dest, getattr(fk_field, dest_attr))
rhs = getattr(src, getattr(fk_field.rel_field, src_attr))
else:
lhs = getattr(src, getattr(fk_field, src_attr))
rhs = getattr(dest, getattr(fk_field.rel_field, dest_attr))
on = (lhs == rhs)
if not attr:
if fk_field is not None and not is_backref:
attr = fk_field.name
else:
attr = dest_model._meta.name
elif isinstance(dest, Source):
constructor = dict
attr = attr or dest._alias
if not attr and isinstance(dest, Table):
attr = attr or dest.__name__
return (on, attr, constructor)
@Node.copy
def join(self, dest, join_type='INNER', on=None, src=None, attr=None):
src = self._join_ctx if src is None else src
on, attr, constructor = self._normalize_join(src, dest, on, attr)
if attr:
self._joins.setdefault(src, [])
self._joins[src].append((dest, attr, constructor))
return super(ModelSelect, self).join(dest, join_type, on)
@Node.copy
def join_from(self, src, dest, join_type='INNER', on=None, attr=None):
return self.join(dest, join_type, on, src, attr)
def _generate_on_clause(self, src, dest, to_field=None, on=None):
meta = src._meta
backref = fk_fields = False
if dest in meta.model_refs:
fk_fields = meta.model_refs[dest]
elif dest in meta.model_backrefs:
fk_fields = meta.model_backrefs[dest]
backref = True
if not fk_fields:
if on is not None:
return None, False
raise ValueError('Unable to find foreign key between %s and %s. '
'Please specify an explicit join condition.' %
(src, dest))
if to_field is not None:
target = (to_field.field if isinstance(to_field, FieldAlias)
else to_field)
fk_fields = [f for f in fk_fields if (
(f is target) or
(backref and f.rel_field is to_field))]
if len(fk_fields) > 1:
if on is None:
raise ValueError('More than one foreign key between %s and %s.'
' Please specify which you are joining on.' %
(src, dest))
return None, False
else:
fk_field = fk_fields[0]
return fk_field, backref
def _get_model_cursor_wrapper(self, cursor):
if len(self._from_list) == 1 and not self._joins:
return ModelObjectCursorWrapper(cursor, self.model,
self._returning, self.model)
return ModelCursorWrapper(cursor, self.model, self._returning,
self._from_list, self._joins)
def ensure_join(self, lm, rm, on=None, **join_kwargs):
join_ctx = self._join_ctx
for dest, attr, constructor in self._joins.get(lm, []):
if dest == rm:
return self
return self.switch(lm).join(rm, on=on, **join_kwargs).switch(join_ctx)
def convert_dict_to_node(self, qdict):
accum = []
joins = []
fks = (ForeignKeyField, BackrefAccessor)
for key, value in sorted(qdict.items()):
curr = self.model
if '__' in key and key.rsplit('__', 1)[1] in DJANGO_MAP:
key, op = key.rsplit('__', 1)
op = DJANGO_MAP[op]
elif value is None:
op = OP.IS
else:
op = OP.EQ
if '__' not in key:
# Handle simplest case. This avoids joining over-eagerly when a
# direct FK lookup is all that is required.
model_attr = getattr(curr, key)
else:
for piece in key.split('__'):
for dest, attr, _ in self._joins.get(curr, ()):
if attr == piece or (isinstance(dest, ModelAlias) and
dest.alias == piece):
curr = dest
break
else:
model_attr = getattr(curr, piece)
if value is not None and isinstance(model_attr, fks):
curr = model_attr.rel_model
joins.append(model_attr)
accum.append(Expression(model_attr, op, value))
return accum, joins
def filter(self, *args, **kwargs):
# normalize args and kwargs into a new expression
dq_node = ColumnBase()
if args:
dq_node &= reduce(operator.and_, [a.clone() for a in args])
if kwargs:
dq_node &= DQ(**kwargs)
# dq_node should now be an Expression, lhs = Node(), rhs = ...
q = collections.deque([dq_node])
dq_joins = set()
while q:
curr = q.popleft()
if not isinstance(curr, Expression):
continue
for side, piece in (('lhs', curr.lhs), ('rhs', curr.rhs)):
if isinstance(piece, DQ):
query, joins = self.convert_dict_to_node(piece.query)
dq_joins.update(joins)
expression = reduce(operator.and_, query)
# Apply values from the DQ object.
if piece._negated:
expression = Negated(expression)
#expression._alias = piece._alias
setattr(curr, side, expression)
else:
q.append(piece)
dq_node = dq_node.rhs
query = self.clone()
for field in dq_joins:
if isinstance(field, ForeignKeyField):
lm, rm = field.model, field.rel_model
field_obj = field
elif isinstance(field, BackrefAccessor):
lm, rm = field.model, field.rel_model
field_obj = field.field
query = query.ensure_join(lm, rm, field_obj)
return query.where(dq_node)
def __sql_selection__(self, ctx, is_subquery=False):
if self._is_default and is_subquery and len(self._returning) > 1 and \
self.model._meta.primary_key is not False:
return ctx.sql(self.model._meta.primary_key)
return ctx.sql(CommaNodeList(self._returning))
class NoopModelSelect(ModelSelect):
def __sql__(self, ctx):
return self.model._meta.database.get_noop_select(ctx)
def _get_cursor_wrapper(self, cursor):
return CursorWrapper(cursor)
class _ModelWriteQueryHelper(_ModelQueryHelper):
def __init__(self, model, *args, **kwargs):
self.model = model
super(_ModelWriteQueryHelper, self).__init__(model, *args, **kwargs)
def _set_table_alias(self, ctx):
table = self.model._meta.table
ctx.alias_manager[table] = table.__name__
class ModelUpdate(_ModelWriteQueryHelper, Update):
pass
class ModelInsert(_ModelWriteQueryHelper, Insert):
def __init__(self, *args, **kwargs):
super(ModelInsert, self).__init__(*args, **kwargs)
if self._returning is None and self.model._meta.database is not None:
if self.model._meta.database.returning_clause:
self._returning = self.model._meta.get_primary_keys()
self._row_type = ROW.TUPLE
def get_default_data(self):
return self.model._meta.defaults
class ModelDelete(_ModelWriteQueryHelper, Delete):
pass
class ManyToManyQuery(ModelSelect):
def __init__(self, instance, accessor, rel, *args, **kwargs):
self._instance = instance
self._accessor = accessor
super(ManyToManyQuery, self).__init__(rel, (rel,), *args, **kwargs)
def _id_list(self, model_or_id_list):
if isinstance(model_or_id_list[0], Model):
return [obj._pk for obj in model_or_id_list]
return model_or_id_list
def add(self, value, clear_existing=False):
if clear_existing:
self.clear()
accessor = self._accessor
if isinstance(value, SelectQuery):
query = value.columns(
SQL(str(self._instance._pk)),
accessor.rel_model._meta.primary_key)
accessor.through_model.insert_from(
fields=[accessor.src_fk, accessor.dest_fk],
query=query).execute()
else:
value = ensure_tuple(value)
if not value:
return
inserts = [{
accessor.src_fk.name: self._instance._pk,
accessor.dest_fk.name: rel_id}
for rel_id in self._id_list(value)]
accessor.through_model.insert_many(inserts).execute()
def remove(self, value):
if isinstance(value, SelectQuery):
subquery = value.columns(value.model._meta.primary_key)
return (self._accessor.through_model
.delete()
.where(
(self._accessor.dest_fk << subquery) &
(self._accessor.src_fk == self._instance._pk))
.execute())
else:
value = ensure_tuple(value)
if not value:
return
return (self._accessor.through_model
.delete()
.where(
(self._accessor.dest_fk << self._id_list(value)) &
(self._accessor.src_fk == self._instance._pk))
.execute())
def clear(self):
return (self._accessor.through_model
.delete()
.where(self._accessor.src_fk == self._instance)
.execute())
class BaseModelCursorWrapper(DictCursorWrapper):
def __init__(self, cursor, model, columns):
super(BaseModelCursorWrapper, self).__init__(cursor)
self.model = model
self.select = columns or []
def _initialize_columns(self):
combined = self.model._meta.combined
table = self.model._meta.table
description = self.cursor.description
self.ncols = len(self.cursor.description)
self.columns = []
self.converters = converters = [None] * self.ncols
self.fields = fields = [None] * self.ncols
for idx, description_item in enumerate(description):
column = description_item[0]
dot_index = column.find('.')
if dot_index != -1:
column = column[dot_index + 1:]
self.columns.append(column)
try:
node = self.select[idx]
except IndexError:
continue
else:
node = node.unwrap()
# Heuristics used to attempt to get the field associated with a
# given SELECT column, so that we can accurately convert the value
# returned by the database-cursor into a Python object.
if isinstance(node, Field):
converters[idx] = node.python_value
fields[idx] = node
if column == node.name or column == node.column_name:
self.columns[idx] = node.name
elif column in combined:
if not (isinstance(node, Function) and not node._coerce):
# Unlikely, but if a function was aliased to a column,
# don't use that column's converter if coerce is False.
converters[idx] = combined[column].python_value
if isinstance(node, Column) and node.source == table:
fields[idx] = combined[column]
elif (isinstance(node, Function) and node.arguments and
node._coerce):
# Try to special-case functions calling fields.
first = node.arguments[0]
if isinstance(first, Node):
first = first.unwrap()
if isinstance(first, Field):
converters[idx] = first.python_value
elif isinstance(first, Entity):
path = first._path[-1]
field = combined.get(path)
if field is not None:
converters[idx] = field.python_value
initialize = _initialize_columns
def process_row(self, row):
raise NotImplementedError
class ModelDictCursorWrapper(BaseModelCursorWrapper):
def process_row(self, row):
result = {}
columns, converters = self.columns, self.converters
fields = self.fields
for i in range(self.ncols):
attr = columns[i]
if converters[i] is not None:
result[attr] = converters[i](row[i])
else:
result[attr] = row[i]
return result
class ModelTupleCursorWrapper(ModelDictCursorWrapper):
constructor = tuple
def process_row(self, row):
columns, converters = self.columns, self.converters
return self.constructor([
(converters[i](row[i]) if converters[i] is not None else row[i])
for i in range(self.ncols)])
class ModelNamedTupleCursorWrapper(ModelTupleCursorWrapper):
def initialize(self):
self._initialize_columns()
attributes = []
for i in range(self.ncols):
attributes.append(self.columns[i])
self.tuple_class = collections.namedtuple('Row', attributes)
self.constructor = lambda row: self.tuple_class(*row)
class ModelObjectCursorWrapper(ModelDictCursorWrapper):
def __init__(self, cursor, model, select, constructor):
self.constructor = constructor
self.is_model = is_model(constructor)
super(ModelObjectCursorWrapper, self).__init__(cursor, model, select)
def process_row(self, row):
data = super(ModelObjectCursorWrapper, self).process_row(row)
if self.is_model:
# Clear out any dirty fields before returning to the user.
obj = self.constructor(__no_default__=1, **data)
obj._dirty.clear()
return obj
else:
return self.constructor(**data)
class ModelCursorWrapper(BaseModelCursorWrapper):
def __init__(self, cursor, model, select, from_list, joins):
super(ModelCursorWrapper, self).__init__(cursor, model, select)
self.from_list = from_list
self.joins = joins
def initialize(self):
self._initialize_columns()
selected_src = set([field.model for field in self.fields
if field is not None])
select, columns = self.select, self.columns
self.key_to_constructor = {self.model: self.model}
self.src_is_dest = {}
self.src_to_dest = []
accum = collections.deque(self.from_list)
dests = set()
while accum:
curr = accum.popleft()
if isinstance(curr, Join):
accum.append(curr.lhs)
accum.append(curr.rhs)
continue
if curr not in self.joins:
continue
for key, attr, constructor in self.joins[curr]:
if key not in self.key_to_constructor:
self.key_to_constructor[key] = constructor
self.src_to_dest.append((curr, attr, key,
isinstance(curr, dict)))
dests.add(key)
accum.append(key)
for src, _, dest, _ in self.src_to_dest:
self.src_is_dest[src] = src in dests and (dest in selected_src
or src in selected_src)
self.column_keys = []
for idx, node in enumerate(select):
key = self.model
field = self.fields[idx]
if field is not None:
if isinstance(field, FieldAlias):
key = field.source
else:
key = field.model
else:
if isinstance(node, Node):
node = node.unwrap()
if isinstance(node, Column):
key = node.source
self.column_keys.append(key)
def process_row(self, row):
objects = {}
object_list = []
for key, constructor in self.key_to_constructor.items():
objects[key] = constructor(__no_default__=True)
object_list.append(objects[key])
set_keys = set()
for idx, key in enumerate(self.column_keys):
instance = objects[key]
if self.fields[idx] is not None:
column = self.fields[idx].name
else:
column = self.columns[idx]
value = row[idx]
if value is not None:
set_keys.add(key)
if self.converters[idx]:
value = self.converters[idx](value)
if isinstance(instance, dict):
instance[column] = value
else:
setattr(instance, column, value)
# Need to do some analysis on the joins before this.
for (src, attr, dest, is_dict) in self.src_to_dest:
instance = objects[src]
try:
joined_instance = objects[dest]
except KeyError:
continue
# If no fields were set on the destination instance then do not
# assign an "empty" instance.
if instance is None or dest is None or \
(dest not in set_keys and not self.src_is_dest.get(dest)):
continue
if is_dict:
instance[attr] = joined_instance
else:
setattr(instance, attr, joined_instance)
# When instantiating models from a cursor, we clear the dirty fields.
for instance in object_list:
if isinstance(instance, Model):
instance._dirty.clear()
return objects[self.model]
class PrefetchQuery(collections.namedtuple('_PrefetchQuery', (
'query', 'fields', 'is_backref', 'rel_models', 'field_to_name', 'model'))):
def __new__(cls, query, fields=None, is_backref=None, rel_models=None,
field_to_name=None, model=None):
if fields:
if is_backref:
rel_models = [field.model for field in fields]
foreign_key_attrs = [field.rel_field.name for field in fields]
else:
rel_models = [field.rel_model for field in fields]
foreign_key_attrs = [field.name for field in fields]
field_to_name = list(zip(fields, foreign_key_attrs))
model = query.model
return super(PrefetchQuery, cls).__new__(
cls, query, fields, is_backref, rel_models, field_to_name, model)
def populate_instance(self, instance, id_map):
if self.is_backref:
for field in self.fields:
identifier = instance.__data__[field.name]
key = (field, identifier)
if key in id_map:
setattr(instance, field.name, id_map[key])
else:
for field, attname in self.field_to_name:
identifier = instance.__data__[field.rel_field.name]
key = (field, identifier)
rel_instances = id_map.get(key, [])
for inst in rel_instances:
setattr(inst, attname, instance)
setattr(instance, field.backref, rel_instances)
def store_instance(self, instance, id_map):
for field, attname in self.field_to_name:
identity = field.rel_field.python_value(instance.__data__[attname])
key = (field, identity)
if self.is_backref:
id_map[key] = instance
else:
id_map.setdefault(key, [])
id_map[key].append(instance)
def prefetch_add_subquery(sq, subqueries):
fixed_queries = [PrefetchQuery(sq)]
for i, subquery in enumerate(subqueries):
if isinstance(subquery, tuple):
subquery, target_model = subquery
else:
target_model = None
if not isinstance(subquery, Query) and is_model(subquery) or \
isinstance(subquery, ModelAlias):
subquery = subquery.select()
subquery_model = subquery.model
fks = backrefs = None
for j in reversed(range(i + 1)):
fixed = fixed_queries[j]
last_query = fixed.query
last_model = fixed.model
rels = subquery_model._meta.model_refs.get(last_model, [])
if rels:
fks = [getattr(subquery_model, fk.name) for fk in rels]
pks = [getattr(last_model, fk.rel_field.name) for fk in rels]
else:
backrefs = last_model._meta.model_refs.get(subquery_model, [])
if (fks or backrefs) and ((target_model is last_model) or
(target_model is None)):
break
if not fks and not backrefs:
tgt_err = ' using %s' % target_model if target_model else ''
raise AttributeError('Error: unable to find foreign key for '
'query: %s%s' % (subquery, tgt_err))
if fks:
expr = reduce(operator.or_, [
(fk << last_query.select(pk))
for (fk, pk) in zip(fks, pks)])
subquery = subquery.where(expr)
fixed_queries.append(PrefetchQuery(subquery, fks, False))
elif backrefs:
expr = reduce(operator.or_, [
(backref.rel_field << last_query.select(backref))
for backref in backrefs])
subquery = subquery.where(expr)
fixed_queries.append(PrefetchQuery(subquery, backrefs, True))
return fixed_queries
def prefetch(sq, *subqueries):
if not subqueries:
return sq
fixed_queries = prefetch_add_subquery(sq, subqueries)
deps = {}
rel_map = {}
for pq in reversed(fixed_queries):
query_model = pq.model
if pq.fields:
for rel_model in pq.rel_models:
rel_map.setdefault(rel_model, [])
rel_map[rel_model].append(pq)
deps[query_model] = {}
id_map = deps[query_model]
has_relations = bool(rel_map.get(query_model))
for instance in pq.query:
if pq.fields:
pq.store_instance(instance, id_map)
if has_relations:
for rel in rel_map[query_model]:
rel.populate_instance(instance, deps[rel.model])
return pq.query
|
py | 7dfbbd5ad1876192396803c7d01c59b014367bc6 | import re
import typing as t
import importlib_resources as ir
from lxml import etree
from lxml.etree import DocumentInvalid
from .utils import log
local_report_def = ir.files("datapane.resources.report_def")
rng_validator = etree.RelaxNG(file=str(local_report_def / "full_schema.rng"))
dp_namespace: str = "https://datapane.com/schemas/report/1/"
# regex to check for a xsd:ID name
re_xml_id = re.compile(r"^[a-zA-Z_][\w.-]*$")
def validate_report_doc(
xml_str: t.Optional[str] = None, xml_doc: t.Optional[etree.Element] = None, quiet: bool = False
) -> bool:
"""Validate the model against the schema, throws an etree.DocumentInvalid if not"""
assert xml_str or xml_doc
if xml_str:
xml_doc = etree.fromstring(xml_str)
try:
rng_validator.assertValid(xml_doc)
return True
except DocumentInvalid:
if not xml_str:
xml_str = etree.tounicode(xml_doc, pretty_print=True)
if not quiet:
log.error(f"Error validating report document:\n\n{xml_str}")
raise
|
py | 7dfbbdde545b9f85b143faa32c075b8c9f4fb3f9 | #!/usr/bin/env python
# -*- Mode: Python; tab-width: 4; indent-tabs-mode: nil; coding: utf-8; -*-
# vim:set ft=python ts=4 sw=4 sts=4 autoindent:
'''
Per-project configuration functionality for
Brat Rapid Annotation Tool (brat)
Author: Pontus Stenetorp <pontus is s u-tokyo ac jp>
Author: Sampo Pyysalo <smp is s u-tokyo ac jp>
Author: Illes Solt <solt tmit bme hu>
Version: 2011-08-15
'''
import re
import robotparser # TODO reduce scope
import urlparse # TODO reduce scope
import sys
from annotation import open_textfile
from message import Messager
ENTITY_CATEGORY, EVENT_CATEGORY, RELATION_CATEGORY, UNKNOWN_CATEGORY = xrange(4)
class InvalidProjectConfigException(Exception):
pass
# names of files in which various configs are found
__access_control_filename = 'acl.conf'
__annotation_config_filename = 'annotation.conf'
__visual_config_filename = 'visual.conf'
__tools_config_filename = 'tools.conf'
__kb_shortcut_filename = 'kb_shortcuts.conf'
# annotation config section name constants
ENTITY_SECTION = "entities"
RELATION_SECTION = "relations"
EVENT_SECTION = "events"
ATTRIBUTE_SECTION = "attributes"
# aliases for config section names
SECTION_ALIAS = {
"spans" : ENTITY_SECTION,
}
__expected_annotation_sections = (ENTITY_SECTION, RELATION_SECTION, EVENT_SECTION, ATTRIBUTE_SECTION)
__optional_annotation_sections = []
# visual config section name constants
LABEL_SECTION = "labels"
DRAWING_SECTION = "drawing"
__expected_visual_sections = (LABEL_SECTION, DRAWING_SECTION)
__optional_visual_sections = []
# tools config section name constants
OPTIONS_SECTION = "options"
SEARCH_SECTION = "search"
ANNOTATORS_SECTION = "annotators"
DISAMBIGUATORS_SECTION = "disambiguators"
NORMALIZATION_SECTION = "normalization"
__expected_tools_sections = (OPTIONS_SECTION, SEARCH_SECTION, ANNOTATORS_SECTION, DISAMBIGUATORS_SECTION, NORMALIZATION_SECTION)
__optional_tools_sections = (OPTIONS_SECTION, SEARCH_SECTION, ANNOTATORS_SECTION, DISAMBIGUATORS_SECTION, NORMALIZATION_SECTION)
# special relation types for marking which spans can overlap
# ENTITY_NESTING_TYPE used up to version 1.3, now deprecated
ENTITY_NESTING_TYPE = "ENTITY-NESTING"
# TEXTBOUND_OVERLAP_TYPE used from version 1.3 onward
TEXTBOUND_OVERLAP_TYPE = "<OVERLAP>"
SPECIAL_RELATION_TYPES = set([ENTITY_NESTING_TYPE,
TEXTBOUND_OVERLAP_TYPE])
OVERLAP_TYPE_ARG = '<OVL-TYPE>'
# visual config default value names
VISUAL_SPAN_DEFAULT = "SPAN_DEFAULT"
VISUAL_ARC_DEFAULT = "ARC_DEFAULT"
VISUAL_ATTR_DEFAULT = "ATTRIBUTE_DEFAULT"
# visual config attribute name lists
SPAN_DRAWING_ATTRIBUTES = ['fgColor', 'bgColor', 'borderColor']
ARC_DRAWING_ATTRIBUTES = ['color', 'dashArray', 'arrowHead', 'labelArrow']
ATTR_DRAWING_ATTRIBUTES = ['box', 'dashArray', 'glyph', 'position']
# fallback defaults if config files not found
__default_configuration = """
[entities]
Protein
[relations]
Equiv Arg1:Protein, Arg2:Protein, <REL-TYPE>:symmetric-transitive
[events]
Protein_binding|GO:0005515 Theme+:Protein
Gene_expression|GO:0010467 Theme:Protein
[attributes]
Negation Arg:<EVENT>
Speculation Arg:<EVENT>
"""
__default_visual = """
[labels]
Protein | Protein | Pro | P
Protein_binding | Protein binding | Binding | Bind
Gene_expression | Gene expression | Expression | Exp
Theme | Theme | Th
[drawing]
Protein bgColor:#7fa2ff
SPAN_DEFAULT fgColor:black, bgColor:lightgreen, borderColor:black
ARC_DEFAULT color:black
ATTRIBUTE_DEFAULT glyph:*
"""
__default_tools = """
[search]
google <URL>:http://www.google.com/search?q=%s
"""
__default_kb_shortcuts = """
P Protein
"""
__default_access_control = """
User-agent: *
Allow: /
Disallow: /hidden/
User-agent: guest
Disallow: /confidential/
"""
# Reserved strings with special meanings in configuration.
reserved_config_name = ["ANY", "ENTITY", "RELATION", "EVENT", "NONE", "EMPTY", "REL-TYPE", "URL", "URLBASE", "GLYPH-POS", "DEFAULT", "NORM", "OVERLAP", "OVL-TYPE"]
# TODO: "GLYPH-POS" is no longer used, warn if encountered and
# recommend to use "position" instead.
reserved_config_string = ["<%s>" % n for n in reserved_config_name]
# Magic string to use to represent a separator in a config
SEPARATOR_STR = "SEPARATOR"
def normalize_to_storage_form(t):
"""
Given a label, returns a form of the term that can be used for
disk storage. For example, space can be replaced with underscores
to allow use with space-separated formats.
"""
if t not in normalize_to_storage_form.__cache:
# conservative implementation: replace any space with
# underscore, replace unicode accented characters with
# non-accented equivalents, remove others, and finally replace
# all characters not in [a-zA-Z0-9_-] with underscores.
import re
import unicodedata
n = t.replace(" ", "_")
if isinstance(n, unicode):
ascii = unicodedata.normalize('NFKD', n).encode('ascii', 'ignore')
# n = re.sub(r'[^a-zA-Z0-9_-]', '_', n)
n = re.sub(u'[^a-zA-Z\u4e00-\u9fa5<>,0-9_-]','_',n)
normalize_to_storage_form.__cache[t] = n
return normalize_to_storage_form.__cache[t]
normalize_to_storage_form.__cache = {}
class TypeHierarchyNode:
"""
Represents a node in a simple (possibly flat) hierarchy.
Each node is associated with a set of terms, one of which (the
storage_form) matches the way in which the type denoted by the
node is referenced to in data stored on disk and in client-server
communications. This term is guaranteed to be in "storage form" as
defined by normalize_to_storage_form().
Each node may be associated with one or more "arguments", which
are (multivalued) key:value pairs. These determine various characteristics
of the node, but their interpretation depends on the hierarchy the
node occupies: for example, for events the arguments correspond to
event arguments.
"""
def __init__(self, terms, args=[]):
self.terms, self.args = terms, args
if len(terms) == 0 or len([t for t in terms if t == ""]) != 0:
Messager.debug("Empty term in configuration", duration=-1)
raise InvalidProjectConfigException
# unused if any of the terms marked with "!"
self.unused = False
for i in range(len(self.terms)):
if self.terms[i][0] == "!":
self.terms[i]= self.terms[i][1:]
self.unused = True
self.children = []
# The first of the listed terms is used as the primary term for
# storage (excepting for "special" config-only types). Due to
# format restrictions, this form must not have e.g. space or
# various special characters.
if self.terms[0] not in SPECIAL_RELATION_TYPES:
self.__primary_term = normalize_to_storage_form(self.terms[0])
else:
self.__primary_term = self.terms[0]
# TODO: this might not be the ideal place to put this warning
if self.__primary_term != self.terms[0]:
Messager.warning("Note: in configuration, term '%s' is not appropriate for storage (should match '^[a-zA-Z0-9_-]*$'), using '%s' instead. (Revise configuration file to get rid of this message. Terms other than the first are not subject to this restriction.)" % (self.terms[0], self.__primary_term), -1)
self.terms[0] = self.__primary_term
# TODO: cleaner and more localized parsing
self.arguments = {}
self.special_arguments = {}
self.arg_list = []
self.arg_min_count = {}
self.arg_max_count = {}
self.keys_by_type = {}
for a in self.args:
a = a.strip()
m = re.match(r'^(\S*?):(\S*)$', a)
if not m:
Messager.warning("Project configuration: Failed to parse argument '%s' (args: %s)" % (a, args), 5)
raise InvalidProjectConfigException
key, atypes = m.groups()
# special case (sorry): if the key is a reserved config
# string (e.g. "<REL-TYPE>" or "<URL>"), parse differently
# and store separately
if key in reserved_config_string:
if key is self.special_arguments:
Messager.warning("Project configuration: error parsing: %s argument '%s' appears multiple times." % key, 5)
raise InvalidProjectConfigException
# special case in special case: relation type specifications
# are split by hyphens, nothing else is.
# (really sorry about this.)
if key == "<REL-TYPE>":
self.special_arguments[key] = atypes.split("-")
else:
self.special_arguments[key] = [atypes]
# NOTE: skip the rest of processing -- don't add in normal args
continue
# Parse "repetition" modifiers. These are regex-like:
# - Arg : mandatory argument, exactly one
# - Arg? : optional argument, at most one
# - Arg* : optional argument, any number
# - Arg+ : mandatory argument, one or more
# - Arg{N} : mandatory, exactly N
# - Arg{N-M} : mandatory, between N and M
m = re.match(r'^(\S+?)(\{\S+\}|\?|\*|\+|)$', key)
if not m:
Messager.warning("Project configuration: error parsing argument '%s'." % key, 5)
raise InvalidProjectConfigException
key, rep = m.groups()
if rep == '':
# exactly one
minimum_count = 1
maximum_count = 1
elif rep == '?':
# zero or one
minimum_count = 0
maximum_count = 1
elif rep == '*':
# any number
minimum_count = 0
maximum_count = sys.maxint
elif rep == '+':
# one or more
minimum_count = 1
maximum_count = sys.maxint
else:
# exact number or range constraint
assert '{' in rep and '}' in rep, "INTERNAL ERROR"
m = re.match(r'\{(\d+)(?:-(\d+))?\}$', rep)
if not m:
Messager.warning("Project configuration: error parsing range '%s' in argument '%s' (syntax is '{MIN-MAX}')." % (rep, key+rep), 5)
raise InvalidProjectConfigException
n1, n2 = m.groups()
n1 = int(n1)
if n2 is None:
# exact number
if n1 == 0:
Messager.warning("Project configuration: cannot have exactly 0 repetitions of argument '%s'." % (key+rep), 5)
raise InvalidProjectConfigException
minimum_count = n1
maximum_count = n1
else:
# range
n2 = int(n2)
if n1 > n2:
Messager.warning("Project configuration: invalid range %d-%d for argument '%s'." % (n1, n2, key+rep), 5)
raise InvalidProjectConfigException
minimum_count = n1
maximum_count = n2
# format / config sanity: an argument whose label ends
# with a digit label cannot be repeated, as this would
# introduce ambiguity into parsing. (For example, the
# second "Theme" is "Theme2", and the second "Arg1" would
# be "Arg12".)
if maximum_count > 1 and key[-1].isdigit():
Messager.warning("Project configuration: error parsing: arguments ending with a digit cannot be repeated: '%s'" % (key+rep), 5)
raise InvalidProjectConfigException
if key in self.arguments:
Messager.warning("Project configuration: error parsing: %s argument '%s' appears multiple times." % key, 5)
raise InvalidProjectConfigException
assert (key not in self.arg_min_count and
key not in self.arg_max_count), "INTERNAL ERROR"
self.arg_min_count[key] = minimum_count
self.arg_max_count[key] = maximum_count
self.arg_list.append(key)
for atype in atypes.split("|"):
if atype.strip() == "":
Messager.warning("Project configuration: error parsing: empty type for argument '%s'." % a, 5)
raise InvalidProjectConfigException
# Check disabled; need to support arbitrary UTF values
# for visual.conf. TODO: add this check for other configs.
# TODO: consider checking for similar for appropriate confs.
# if atype not in reserved_config_string and normalize_to_storage_form(atype) != atype:
# Messager.warning("Project configuration: '%s' is not a valid argument (should match '^[a-zA-Z0-9_-]*$')" % atype, 5)
# raise InvalidProjectConfigException
if key not in self.arguments:
self.arguments[key] = []
self.arguments[key].append(atype)
if atype not in self.keys_by_type:
self.keys_by_type[atype] = []
self.keys_by_type[atype].append(key)
def argument_minimum_count(self, arg):
"""
Returns the minumum number of times the given argument is
required to appear for this type.
"""
return self.arg_min_count.get(arg, 0)
def argument_maximum_count(self, arg):
"""
Returns the maximum number of times the given argument is
allowed to appear for this type.
"""
return self.arg_max_count.get(arg, 0)
def mandatory_arguments(self):
"""
Returns the arguments that must appear at least once for
this type.
"""
return [a for a in self.arg_list if self.arg_min_count[a] > 0]
def multiple_allowed_arguments(self):
"""
Returns the arguments that may appear multiple times for this
type.
"""
return [a for a in self.arg_list if self.arg_max_count[a] > 1]
def storage_form(self):
"""
Returns the form of the term used for storage serverside.
"""
return self.__primary_term
def normalizations(self):
"""
Returns the normalizations applicable to this node, if any.
"""
return self.special_arguments.get('<NORM>', [])
def __require_tab_separator(section):
"""
Given a section name, returns True iff in that section of the
project config only tab separators should be permitted.
This exception initially introduced to allow slighlty different
syntax for the [labels] section than others.
"""
return section == "labels"
def __read_term_hierarchy(input, section=None):
root_nodes = []
last_node_at_depth = {}
macros = {}
for l in input:
# skip empties and lines starting with '#'
if l.strip() == '' or re.match(r'^\s*#', l):
continue
# interpret lines of only hyphens as separators
# for display
if re.match(r'^\s*-+\s*$', l):
# TODO: proper placeholder and placing
root_nodes.append(SEPARATOR_STR)
continue
# interpret lines of the format <STR1>=STR2 as "macro"
# definitions, defining <STR1> as a placeholder that should be
# replaced with STR2 whevever it occurs.
m = re.match(r'^<([a-zA-Z_-]+)>=\s*(.*?)\s*$', l)
if m:
name, value = m.groups()
if name in reserved_config_name:
Messager.error("Cannot redefine <%s> in configuration, it is a reserved name." % name)
# TODO: proper exception
assert False
else:
macros["<%s>" % name] = value
continue
# macro expansion
for n in macros:
l = l.replace(n, macros[n])
# check for undefined macros
for m in re.finditer(r'(<.*?>)', l):
s = m.group(1)
assert s in reserved_config_string, "Error: undefined macro %s in configuration. (Note that macros are section-specific.)" % s
# choose strict tab-only separator or looser any-space
# separator matching depending on section
if __require_tab_separator(section):
m = re.match(r'^(\s*)([^\t]+)(?:\t(.*))?$', l)
else:
m = re.match(r'^(\s*)(\S+)(?:\s+(.*))?$', l)
assert m, "Error parsing line: '%s'" % l
indent, terms, args = m.groups()
terms = [t.strip() for t in terms.split("|") if t.strip() != ""]
if args is None or args.strip() == "":
args = []
else:
args = [a.strip() for a in args.split(",") if a.strip() != ""]
# older configs allowed space in term strings, splitting those
# from arguments by space. Trying to parse one of these in the
# new way will result in a crash from space in arguments.
# The following is a workaround for the transition.
if len([x for x in args if re.search('\s', x)]) and '\t' in l:
# re-parse in the old way (dups from above)
m = re.match(r'^(\s*)([^\t]+)(?:\t(.*))?$', l)
assert m, "Error parsing line: '%s'" % l
indent, terms, args = m.groups()
terms = [t.strip() for t in terms.split("|") if t.strip() != ""]
if args is None or args.strip() == "":
args = []
else:
args = [a.strip() for a in args.split(",") if a.strip() != ""]
# issue a warning
Messager.warning("Space in term name(s) (%s) on line \"%s\" in config. This feature is deprecated and support will be removed in future versions. Please revise your configuration." % (",".join(['"%s"' % x for x in terms if " " in x]), l), 20)
# depth in the ontology corresponds to the number of
# spaces in the initial indent.
depth = len(indent)
n = TypeHierarchyNode(terms, args)
if depth == 0:
# root level, no children assignments
root_nodes.append(n)
else:
# assign as child of last node at the depth of the parent
assert depth-1 in last_node_at_depth, "Error: no parent for '%s'" % l
last_node_at_depth[depth-1].children.append(n)
last_node_at_depth[depth] = n
return root_nodes
def __read_or_default(filename, default):
try:
f = open_textfile(filename, 'r')
r = f.read()
f.close()
return r
except:
# TODO: specific exception handling and reporting
return default
def __parse_kb_shortcuts(shortcutstr, default, source):
try:
shortcuts = {}
for l in shortcutstr.split("\n"):
l = l.strip()
if l == "" or l[:1] == "#":
continue
key, type = re.split(r'[ \t]+', l)
if key in shortcuts:
Messager.warning("Project configuration: keyboard shortcut for '%s' defined multiple times. Ignoring all but first ('%s')" % (key, shortcuts[key]))
else:
shortcuts[key] = type
except:
# TODO: specific exception handling
Messager.warning("Project configuration: error parsing keyboard shortcuts from %s. Configuration may be wrong." % source, 5)
shortcuts = default
return shortcuts
def __parse_access_control(acstr, source):
try:
parser = robotparser.RobotFileParser()
parser.parse(acstr.split("\n"))
except:
# TODO: specific exception handling
display_message("Project configuration: error parsing access control rules from %s. Configuration may be wrong." % source, "warning", 5)
parser = None
return parser
def get_config_path(directory):
return __read_first_in_directory_tree(directory, __annotation_config_filename)[1]
def __read_first_in_directory_tree(directory, filename):
# config will not be available command-line invocations;
# in these cases search whole tree
try:
from config import BASE_DIR
except:
BASE_DIR = "/"
from os.path import split, join
source, result = None, None
# check from the given directory and parents, but not above BASE_DIR
if directory is not None:
# TODO: this check may fail; consider "foo//bar/data"
while BASE_DIR in directory:
source = join(directory, filename)
result = __read_or_default(source, None)
if result is not None:
break
directory = split(directory)[0]
return (result, source)
def __parse_configs(configstr, source, expected_sections, optional_sections):
# top-level config structure is a set of term hierarchies
# separated by lines consisting of "[SECTION]" where SECTION is
# e.g. "entities", "relations", etc.
# start by splitting config file lines by section, also storing
# the label (default name or alias) used for each section.
section = "general"
section_lines = { section: [] }
section_labels = {}
for ln, l in enumerate(configstr.split("\n")):
m = re.match(r'^\s*\[(.*)\]\s*$', l)
if m:
section = m.group(1)
# map and store section name/alias (e.g. "spans" -> "entities")
section_name = SECTION_ALIAS.get(section, section)
section_labels[section_name] = section
section = section_name
if section not in expected_sections:
Messager.warning("Project configuration: unexpected section [%s] in %s. Ignoring contents." % (section, source), 5)
if section not in section_lines:
section_lines[section] = []
else:
section_lines[section].append(l)
# attempt to parse lines in each section as a term hierarchy
configs = {}
for s, sl in section_lines.items():
try:
configs[s] = __read_term_hierarchy(sl, s)
except Exception, e:
Messager.warning("Project configuration: error parsing section [%s] in %s: %s" % (s, source, str(e)), 5)
raise
# verify that expected sections are present; replace with empty if not.
for s in expected_sections:
if s not in configs:
if s not in optional_sections:
Messager.warning("Project configuration: missing section [%s] in %s. Configuration may be wrong." % (s, source), 5)
configs[s] = []
return (configs, section_labels)
def get_configs(directory, filename, defaultstr, minconf, sections, optional_sections):
if (directory, filename) not in get_configs.__cache:
configstr, source = __read_first_in_directory_tree(directory, filename)
if configstr is None:
# didn't get one; try default dir and fall back to the default
configstr = __read_or_default(filename, defaultstr)
if configstr == defaultstr:
Messager.info("Project configuration: no configuration file (%s) found, using default." % filename, 5)
source = "[default]"
else:
source = filename
# try to parse what was found, fall back to minimal config
try:
configs, section_labels = __parse_configs(configstr, source, sections, optional_sections)
except:
Messager.warning("Project configuration: Falling back to minimal default. Configuration is likely wrong.", 5)
configs = minconf
section_labels = dict(map(lambda a: (a,a), sections))
# very, very special case processing: if we have a type
# "Equiv" defined in a "relations" section that doesn't
# specify a "<REL-TYPE>", automatically fill "symmetric" and
# "transitive". This is to support older configurations that
# rely on the type "Equiv" to identify the relation as an
# equivalence.
if 'relations' in configs:
for r in configs['relations']:
if r == SEPARATOR_STR:
continue
if (r.storage_form() == "Equiv" and
"<REL-TYPE>" not in r.special_arguments):
# this was way too much noise; will only add in after
# at least most configs are revised.
# Messager.warning('Note: "Equiv" defined in config without "<REL-TYPE>"; assuming symmetric and transitive. Consider revising config to add "<REL-TYPE>:symmetric-transitive" to definition.')
r.special_arguments["<REL-TYPE>"] = ["symmetric", "transitive"]
get_configs.__cache[(directory, filename)] = (configs, section_labels)
return get_configs.__cache[(directory, filename)]
get_configs.__cache = {}
def __get_access_control(directory, filename, default_rules):
acstr, source = __read_first_in_directory_tree(directory, filename)
if acstr is None:
acstr = default_rules # TODO read or default isntead of default
if acstr == default_rules:
source = "[default rules]"
else:
source = filename
ac_oracle = __parse_access_control(acstr, source)
return ac_oracle
def __get_kb_shortcuts(directory, filename, default_shortcuts, min_shortcuts):
shortcutstr, source = __read_first_in_directory_tree(directory, filename)
if shortcutstr is None:
shortcutstr = __read_or_default(filename, default_shortcuts)
if shortcutstr == default_shortcuts:
source = "[default kb_shortcuts]"
else:
source = filename
kb_shortcuts = __parse_kb_shortcuts(shortcutstr, min_shortcuts, source)
return kb_shortcuts
# final fallback for configuration; a minimal known-good config
__minimal_configuration = {
ENTITY_SECTION : [TypeHierarchyNode(["Protein"])],
RELATION_SECTION : [TypeHierarchyNode(["Equiv"], ["Arg1:Protein", "Arg2:Protein", "<REL-TYPE>:symmetric-transitive"])],
EVENT_SECTION : [TypeHierarchyNode(["Event"], ["Theme:Protein"])],
ATTRIBUTE_SECTION : [TypeHierarchyNode(["Negation"], ["Arg:<EVENT>"])],
}
def get_annotation_configs(directory):
return get_configs(directory,
__annotation_config_filename,
__default_configuration,
__minimal_configuration,
__expected_annotation_sections,
__optional_annotation_sections)
# final fallback for visual configuration; minimal known-good config
__minimal_visual = {
LABEL_SECTION : [TypeHierarchyNode(["Protein", "Pro", "P"]),
TypeHierarchyNode(["Equiv", "Eq"]),
TypeHierarchyNode(["Event", "Ev"])],
DRAWING_SECTION : [TypeHierarchyNode([VISUAL_SPAN_DEFAULT], ["fgColor:black", "bgColor:white"]),
TypeHierarchyNode([VISUAL_ARC_DEFAULT], ["color:black"]),
TypeHierarchyNode([VISUAL_ATTR_DEFAULT], ["glyph:*"])],
}
def get_visual_configs(directory):
return get_configs(directory,
__visual_config_filename,
__default_visual,
__minimal_visual,
__expected_visual_sections,
__optional_visual_sections)
# final fallback for tools configuration; minimal known-good config
__minimal_tools = {
OPTIONS_SECTION : [],
SEARCH_SECTION : [TypeHierarchyNode(["google"], ["<URL>:http://www.google.com/search?q=%s"])],
ANNOTATORS_SECTION : [],
DISAMBIGUATORS_SECTION : [],
NORMALIZATION_SECTION : [],
}
def get_tools_configs(directory):
return get_configs(directory,
__tools_config_filename,
__default_tools,
__minimal_tools,
__expected_tools_sections,
__optional_tools_sections)
def get_entity_type_hierarchy(directory):
return get_annotation_configs(directory)[0][ENTITY_SECTION]
def get_relation_type_hierarchy(directory):
return get_annotation_configs(directory)[0][RELATION_SECTION]
def get_event_type_hierarchy(directory):
return get_annotation_configs(directory)[0][EVENT_SECTION]
def get_attribute_type_hierarchy(directory):
return get_annotation_configs(directory)[0][ATTRIBUTE_SECTION]
def get_annotation_config_section_labels(directory):
return get_annotation_configs(directory)[1]
# TODO: too much caching?
def get_labels(directory):
cache = get_labels.__cache
if directory not in cache:
l = {}
for t in get_visual_configs(directory)[0][LABEL_SECTION]:
if t.storage_form() in l:
Messager.warning("In configuration, labels for '%s' defined more than once. Only using the last set." % t.storage_form(), -1)
# first is storage for, rest are labels.
l[t.storage_form()] = t.terms[1:]
cache[directory] = l
return cache[directory]
get_labels.__cache = {}
# TODO: too much caching?
def get_drawing_types(directory):
cache = get_drawing_types.__cache
if directory not in cache:
l = set()
for n in get_drawing_config(directory):
l.add(n.storage_form())
cache[directory] = list(l)
return cache[directory]
get_drawing_types.__cache = {}
def get_option_config(directory):
return get_tools_configs(directory)[0][OPTIONS_SECTION]
def get_drawing_config(directory):
return get_visual_configs(directory)[0][DRAWING_SECTION]
def get_visual_config_section_labels(directory):
return get_visual_configs(directory)[1]
def get_search_config(directory):
return get_tools_configs(directory)[0][SEARCH_SECTION]
def get_annotator_config(directory):
return get_tools_configs(directory)[0][ANNOTATORS_SECTION]
def get_disambiguator_config(directory):
return get_tools_configs(directory)[0][DISAMBIGUATORS_SECTION]
def get_normalization_config(directory):
return get_tools_configs(directory)[0][NORMALIZATION_SECTION]
def get_tools_config_section_labels(directory):
return get_tools_configs(directory)[1]
def get_access_control(directory):
cache = get_access_control.__cache
if directory not in cache:
a = __get_access_control(directory,
__access_control_filename,
__default_access_control)
cache[directory] = a
return cache[directory]
get_access_control.__cache = {}
def get_kb_shortcuts(directory):
cache = get_kb_shortcuts.__cache
if directory not in cache:
a = __get_kb_shortcuts(directory,
__kb_shortcut_filename,
__default_kb_shortcuts,
{ "P" : "Positive_regulation" })
cache[directory] = a
return cache[directory]
get_kb_shortcuts.__cache = {}
def __collect_type_list(node, collected):
if node == SEPARATOR_STR:
return collected
collected.append(node)
for c in node.children:
__collect_type_list(c, collected)
return collected
def __type_hierarchy_to_list(hierarchy):
root_nodes = hierarchy
types = []
for n in root_nodes:
__collect_type_list(n, types)
return types
# TODO: it's not clear it makes sense for all of these methods to have
# their own caches; this seems a bit like a case of premature
# optimization to me. Consider simplifying.
def get_entity_type_list(directory):
cache = get_entity_type_list.__cache
if directory not in cache:
cache[directory] = __type_hierarchy_to_list(get_entity_type_hierarchy(directory))
return cache[directory]
get_entity_type_list.__cache = {}
def get_event_type_list(directory):
cache = get_event_type_list.__cache
if directory not in cache:
cache[directory] = __type_hierarchy_to_list(get_event_type_hierarchy(directory))
return cache[directory]
get_event_type_list.__cache = {}
def get_relation_type_list(directory):
cache = get_relation_type_list.__cache
if directory not in cache:
cache[directory] = __type_hierarchy_to_list(get_relation_type_hierarchy(directory))
return cache[directory]
get_relation_type_list.__cache = {}
def get_attribute_type_list(directory):
cache = get_attribute_type_list.__cache
if directory not in cache:
cache[directory] = __type_hierarchy_to_list(get_attribute_type_hierarchy(directory))
return cache[directory]
get_attribute_type_list.__cache = {}
def get_search_config_list(directory):
cache = get_search_config_list.__cache
if directory not in cache:
cache[directory] = __type_hierarchy_to_list(get_search_config(directory))
return cache[directory]
get_search_config_list.__cache = {}
def get_annotator_config_list(directory):
cache = get_annotator_config_list.__cache
if directory not in cache:
cache[directory] = __type_hierarchy_to_list(get_annotator_config(directory))
return cache[directory]
get_annotator_config_list.__cache = {}
def get_disambiguator_config_list(directory):
cache = get_disambiguator_config_list.__cache
if directory not in cache:
cache[directory] = __type_hierarchy_to_list(get_disambiguator_config(directory))
return cache[directory]
get_disambiguator_config_list.__cache = {}
def get_normalization_config_list(directory):
cache = get_normalization_config_list.__cache
if directory not in cache:
cache[directory] = __type_hierarchy_to_list(get_normalization_config(directory))
return cache[directory]
get_normalization_config_list.__cache = {}
def get_node_by_storage_form(directory, term):
cache = get_node_by_storage_form.__cache
if directory not in cache:
d = {}
for e in get_entity_type_list(directory) + get_event_type_list(directory):
t = e.storage_form()
if t in d:
Messager.warning("Project configuration: term %s appears multiple times, only using last. Configuration may be wrong." % t, 5)
d[t] = e
cache[directory] = d
return cache[directory].get(term, None)
get_node_by_storage_form.__cache = {}
def get_option_config_by_storage_form(directory, term):
cache = get_option_config_by_storage_form.__cache
if directory not in cache:
d = {}
for n in get_option_config(directory):
t = n.storage_form()
if t in d:
Messager.warning("Project configuration: %s appears multiple times, only using last. Configuration may be wrong." % t, 5)
d[t] = {}
for a in n.arguments:
if len(n.arguments[a]) != 1:
Messager.warning("Project configuration: %s key %s has multiple values, only using first. Configuration may be wrong." % (t, a), 5)
d[t][a] = n.arguments[a][0]
cache[directory] = d
return cache[directory].get(term, None)
get_option_config_by_storage_form.__cache = {}
# access for settings for specific options in tools.conf
# TODO: avoid fixed string values here, define vars earlier
def options_get_validation(directory):
v = get_option_config_by_storage_form(directory, 'Validation')
return 'none' if v is None else v.get('validate', 'none')
def options_get_tokenization(directory):
v = get_option_config_by_storage_form(directory, 'Tokens')
return 'whitespace' if v is None else v.get('tokenizer', 'whitespace')
def options_get_ssplitter(directory):
v = get_option_config_by_storage_form(directory, 'Sentences')
return 'regex' if v is None else v.get('splitter', 'regex')
def options_get_annlogfile(directory):
v = get_option_config_by_storage_form(directory, 'Annotation-log')
return '<NONE>' if v is None else v.get('logfile', '<NONE>')
def get_drawing_config_by_storage_form(directory, term):
cache = get_drawing_config_by_storage_form.__cache
if directory not in cache:
d = {}
for n in get_drawing_config(directory):
t = n.storage_form()
if t in d:
Messager.warning("Project configuration: term %s appears multiple times, only using last. Configuration may be wrong." % t, 5)
d[t] = {}
for a in n.arguments:
# attribute drawing can be specified with multiple
# values (multi-valued attributes), other parts of
# drawing config should have single values only.
if len(n.arguments[a]) != 1:
if a in ATTR_DRAWING_ATTRIBUTES:
# use multi-valued directly
d[t][a] = n.arguments[a]
else:
# warn and pass
Messager.warning("Project configuration: expected single value for %s argument %s, got '%s'. Configuration may be wrong." % (t, a, "|".join(n.arguments[a])))
else:
d[t][a] = n.arguments[a][0]
# TODO: hack to get around inability to have commas in values;
# fix original issue instead
for t in d:
for k in d[t]:
# sorry about this
if not isinstance(d[t][k], list):
d[t][k] = d[t][k].replace("-", ",")
else:
for i in range(len(d[t][k])):
d[t][k][i] = d[t][k][i].replace("-", ",")
default_keys = [VISUAL_SPAN_DEFAULT,
VISUAL_ARC_DEFAULT,
VISUAL_ATTR_DEFAULT]
for default_dict in [d.get(dk, {}) for dk in default_keys]:
for k in default_dict:
for t in d:
d[t][k] = d[t].get(k, default_dict[k])
# Kind of a special case: recognize <NONE> as "deleting" an
# attribute (prevents default propagation) and <EMPTY> as
# specifying that a value should be the empty string
# (can't be written as such directly).
for t in d:
todelete = [k for k in d[t] if d[t][k] == '<NONE>']
for k in todelete:
del d[t][k]
for k in d[t]:
if d[t][k] == '<EMPTY>':
d[t][k] = ''
cache[directory] = d
return cache[directory].get(term, None)
get_drawing_config_by_storage_form.__cache = {}
def __directory_relations_by_arg_num(directory, num, atype, include_special=False):
assert num >= 0 and num < 2, "INTERNAL ERROR"
rels = []
entity_types = set([t.storage_form()
for t in get_entity_type_list(directory)])
event_types = set([t.storage_form()
for t in get_event_type_list(directory)])
for r in get_relation_type_list(directory):
# "Special" nesting relations ignored unless specifically
# requested
if r.storage_form() in SPECIAL_RELATION_TYPES and not include_special:
continue
if len(r.arg_list) != 2:
# Don't complain about argument constraints for unused relations
if not r.unused:
Messager.warning("Relation type %s has %d arguments in configuration (%s; expected 2). Please fix configuration." % (r.storage_form(), len(r.arg_list), ",".join(r.arg_list)))
else:
types = r.arguments[r.arg_list[num]]
for type_ in types:
# TODO: there has to be a better way
if (type_ == atype or
type_ == "<ANY>" or
atype == "<ANY>" or
(type_ in entity_types and atype == "<ENTITY>") or
(type_ in event_types and atype == "<EVENT>") or
(atype in entity_types and type_ == "<ENTITY>") or
(atype in event_types and type_ == "<EVENT>")):
rels.append(r)
# TODO: why not break here?
return rels
def get_relations_by_arg1(directory, atype, include_special=False):
cache = get_relations_by_arg1.__cache
cache[directory] = cache.get(directory, {})
if (atype, include_special) not in cache[directory]:
cache[directory][(atype, include_special)] = __directory_relations_by_arg_num(directory, 0, atype, include_special)
return cache[directory][(atype, include_special)]
get_relations_by_arg1.__cache = {}
def get_relations_by_arg2(directory, atype, include_special=False):
cache = get_relations_by_arg2.__cache
cache[directory] = cache.get(directory, {})
if (atype, include_special) not in cache[directory]:
cache[directory][(atype, include_special)] = __directory_relations_by_arg_num(directory, 1, atype, include_special)
return cache[directory][(atype, include_special)]
get_relations_by_arg2.__cache = {}
def get_relations_by_storage_form(directory, rtype, include_special=False):
cache = get_relations_by_storage_form.__cache
cache[directory] = cache.get(directory, {})
if include_special not in cache[directory]:
cache[directory][include_special] = {}
for r in get_relation_type_list(directory):
if (r.storage_form() in SPECIAL_RELATION_TYPES and
not include_special):
continue
if r.unused:
continue
if r.storage_form() not in cache[directory][include_special]:
cache[directory][include_special][r.storage_form()] = []
cache[directory][include_special][r.storage_form()].append(r)
return cache[directory][include_special].get(rtype, [])
get_relations_by_storage_form.__cache = {}
def get_labels_by_storage_form(directory, term):
cache = get_labels_by_storage_form.__cache
if directory not in cache:
cache[directory] = {}
for l, labels in get_labels(directory).items():
# recognize <EMPTY> as specifying that a label should
# be the empty string
labels = [lab if lab != '<EMPTY>' else ' ' for lab in labels]
cache[directory][l] = labels
return cache[directory].get(term, None)
get_labels_by_storage_form.__cache = {}
# fallback for missing or partial config: these are highly likely to
# be entity (as opposed to an event or relation) types.
# TODO: remove this workaround once the configs stabilize.
very_likely_physical_entity_types = [
'Protein',
'Entity',
'Organism',
'Chemical',
'Two-component-system',
'Regulon-operon',
# for more PTM annotation
'Protein_family_or_group',
'DNA_domain_or_region',
'Protein_domain_or_region',
'Amino_acid_monomer',
'Carbohydrate',
# for AZ corpus
'Cell_type',
'Drug_or_compound',
'Gene_or_gene_product',
'Tissue',
#'Not_sure',
#'Other',
'Other_pharmaceutical_agent',
]
# helper; doesn't really belong here
# TODO: shouldn't we have an utils.py or something for stuff like this?
def unique_preserve_order(iterable):
seen = set()
uniqued = []
for i in iterable:
if i not in seen:
seen.add(i)
uniqued.append(i)
return uniqued
class ProjectConfiguration(object):
def __init__(self, directory):
# debugging (note: latter test for windows paths)
if directory[:1] != "/" and not re.search(r'^[a-zA-Z]:\\', directory):
Messager.debug("Project config received relative directory ('%s'), configuration may not be found." % directory, duration=-1)
self.directory = directory
def mandatory_arguments(self, atype):
"""
Returns the mandatory argument types that must be present for
an annotation of the given type.
"""
node = get_node_by_storage_form(self.directory, atype)
if node is None:
Messager.warning("Project configuration: unknown event type %s. Configuration may be wrong." % atype)
return []
return node.mandatory_arguments()
def multiple_allowed_arguments(self, atype):
"""
Returns the argument types that are allowed to be filled more
than once for an annotation of the given type.
"""
node = get_node_by_storage_form(self.directory, atype)
if node is None:
Messager.warning("Project configuration: unknown event type %s. Configuration may be wrong." % atype)
return []
return node.multiple_allowed_arguments()
def argument_maximum_count(self, atype, arg):
"""
Returns the maximum number of times that the given argument is
allowed to be filled for an annotation of the given type.
"""
node = get_node_by_storage_form(self.directory, atype)
if node is None:
Messager.warning("Project configuration: unknown event type %s. Configuration may be wrong." % atype)
return 0
return node.argument_maximum_count(arg)
def argument_minimum_count(self, atype, arg):
"""
Returns the minimum number of times that the given argument is
allowed to be filled for an annotation of the given type.
"""
node = get_node_by_storage_form(self.directory, atype)
if node is None:
Messager.warning("Project configuration: unknown event type %s. Configuration may be wrong." % atype)
return 0
return node.argument_minimum_count(arg)
def arc_types_from(self, from_ann):
return self.arc_types_from_to(from_ann)
def relation_types_from(self, from_ann, include_special=False):
"""
Returns the possible relation types that can have an
annotation of the given type as their arg1.
"""
return [r.storage_form() for r in get_relations_by_arg1(self.directory, from_ann, include_special)]
def relation_types_to(self, to_ann, include_special=False):
"""
Returns the possible relation types that can have an
annotation of the given type as their arg2.
"""
return [r.storage_form() for r in get_relations_by_arg2(self.directory, to_ann, include_special)]
def relation_types_from_to(self, from_ann, to_ann, include_special=False):
"""
Returns the possible relation types that can have the
given arg1 and arg2.
"""
types = []
t1r = get_relations_by_arg1(self.directory, from_ann, include_special)
t2r = get_relations_by_arg2(self.directory, to_ann, include_special)
for r in t1r:
if r in t2r:
types.append(r.storage_form())
return types
def overlap_types(self, inner, outer):
"""
Returns the set of annotation overlap types that have been
configured for the given pair of annotations.
"""
# TODO: this is O(NM) for relation counts N and M and goes
# past much of the implemented caching. Might become a
# bottleneck for annotations with large type systems.
t1r = get_relations_by_arg1(self.directory, inner, True)
t2r = get_relations_by_arg2(self.directory, outer, True)
types = []
for r in (s for s in t1r if s.storage_form() in SPECIAL_RELATION_TYPES):
if r in t2r:
types.append(r)
# new-style overlap configuration ("<OVERLAP>") takes precedence
# over old-style configuration ("ENTITY-NESTING").
ovl_types = set()
ovl = [r for r in types if r.storage_form() == TEXTBOUND_OVERLAP_TYPE]
nst = [r for r in types if r.storage_form() == ENTITY_NESTING_TYPE]
if ovl:
if nst:
Messager.warning('Warning: both '+TEXTBOUND_OVERLAP_TYPE+
' and '+ENTITY_NESTING_TYPE+' defined for '+
'('+inner+','+outer+') in config. '+
'Ignoring latter.')
for r in ovl:
if OVERLAP_TYPE_ARG not in r.special_arguments:
Messager.warning('Warning: missing '+OVERLAP_TYPE_ARG+
' for '+TEXTBOUND_OVERLAP_TYPE+
', ignoring specification.')
continue
for val in r.special_arguments[OVERLAP_TYPE_ARG]:
ovl_types |= set(val.split('|'))
elif nst:
# translate into new-style configuration
ovl_types = set(['contain'])
else:
ovl_types = set()
undefined_types = [t for t in ovl_types if
t not in ('contain', 'equal', 'cross', '<ANY>')]
if undefined_types:
Messager.warning('Undefined '+OVERLAP_TYPE_ARG+' value(s) '+
str(undefined_types)+' for '+
'('+inner+','+outer+') in config. ')
return ovl_types
def span_can_contain(self, inner, outer):
"""
Returns True if the configuration allows the span of an
annotation of type inner to (properly) contain an annotation
of type outer, False otherwise.
"""
ovl_types = self.overlap_types(inner, outer)
if 'contain' in ovl_types or '<ANY>' in ovl_types:
return True
ovl_types = self.overlap_types(outer, inner)
if '<ANY>' in ovl_types:
return True
return False
def spans_can_be_equal(self, t1, t2):
"""
Returns True if the configuration allows the spans of
annotations of type t1 and t2 to be equal, False otherwise.
"""
ovl_types = self.overlap_types(t1, t2)
if 'equal' in ovl_types or '<ANY>' in ovl_types:
return True
ovl_types = self.overlap_types(t2, t1)
if 'equal' in ovl_types or '<ANY>' in ovl_types:
return True
return False
def spans_can_cross(self, t1, t2):
"""
Returns True if the configuration allows the spans of
annotations of type t1 and t2 to cross, False otherwise.
"""
ovl_types = self.overlap_types(t1, t2)
if 'cross' in ovl_types or '<ANY>' in ovl_types:
return True
ovl_types = self.overlap_types(t2, t1)
if 'cross' in ovl_types or '<ANY>' in ovl_types:
return True
return False
def all_connections(self, include_special=False):
"""
Returns a dict of dicts of lists, outer dict keyed by
entity/event type, inner dicts by role/relation type, and
lists containing entity/event types, representing all possible
connections between annotations. This function is provided to
optimize access to the entire annotation configuration for
passing it to the client and should never be used to check for
individual connections. The caller must not change the
contents of the returned collection.
"""
# TODO: are these uniques really necessary?
entity_types = unique_preserve_order(self.get_entity_types())
event_types = unique_preserve_order(self.get_event_types())
all_types = unique_preserve_order(entity_types + event_types)
connections = {}
# TODO: it might be possible to avoid copies like
# entity_types[:] and all_types[:] here. Consider the
# possibility.
for t1 in all_types:
assert t1 not in connections, "INTERNAL ERROR"
connections[t1] = {}
processed_as_relation = {}
# relations
rels = get_relations_by_arg1(self.directory, t1, include_special)
for r in rels:
a = r.storage_form()
conns = connections[t1].get(a, [])
# magic number "1" is for 2nd argument
args = r.arguments[r.arg_list[1]]
if "<ANY>" in args:
connections[t1][a] = all_types[:]
else:
for t2 in args:
if t2 == "<ENTITY>":
conns.extend(entity_types)
elif t2 == "<EVENT>":
conns.extend(event_types)
else:
conns.append(t2)
connections[t1][a] = unique_preserve_order(conns)
processed_as_relation[a] = True
# event arguments
n1 = get_node_by_storage_form(self.directory, t1)
for a, args in n1.arguments.items():
if a in processed_as_relation:
Messager.warning("Project configuration: %s appears both as role and relation. Configuration may be wrong." % a)
# won't try to resolve
continue
assert a not in connections[t1], "INTERNAL ERROR"
# TODO: dedup w/above
if "<ANY>" in args:
connections[t1][a] = all_types[:]
else:
conns = []
for t2 in args:
if t2 == "<EVENT>":
conns.extend(event_types)
elif t2 == "<ENTITY>":
conns.extend(entity_types)
else:
conns.append(t2)
connections[t1][a] = unique_preserve_order(conns)
return connections
def arc_types_from_to(self, from_ann, to_ann="<ANY>", include_special=False):
"""
Returns the possible arc types that can connect an annotation
of type from_ann to an annotation of type to_ann.
If to_ann has the value \"<ANY>\", returns all possible arc types.
"""
from_node = get_node_by_storage_form(self.directory, from_ann)
if from_node is None:
Messager.warning("Project configuration: unknown textbound/event type %s. Configuration may be wrong." % from_ann)
return []
if to_ann == "<ANY>":
relations_from = get_relations_by_arg1(self.directory, from_ann, include_special)
# TODO: consider using from_node.arg_list instead of .arguments for order
return unique_preserve_order([role for role in from_node.arguments] + [r.storage_form() for r in relations_from])
# specific hits
types = from_node.keys_by_type.get(to_ann, [])
if "<ANY>" in from_node.keys_by_type:
types += from_node.keys_by_type["<ANY>"]
# generic arguments
if self.is_event_type(to_ann) and '<EVENT>' in from_node.keys_by_type:
types += from_node.keys_by_type['<EVENT>']
if self.is_physical_entity_type(to_ann) and '<ENTITY>' in from_node.keys_by_type:
types += from_node.keys_by_type['<ENTITY>']
# relations
types.extend(self.relation_types_from_to(from_ann, to_ann))
return unique_preserve_order(types)
def attributes_for(self, ann_type):
"""
Returs a list of the possible attribute types for an
annotation of the given type.
"""
attrs = []
for attr in get_attribute_type_list(self.directory):
if attr == SEPARATOR_STR:
continue
if 'Arg' not in attr.arguments:
Messager.warning("Project configuration: config error: attribute '%s' lacks 'Arg:' specification." % attr.storage_form())
continue
types = attr.arguments['Arg']
if ((ann_type in types) or ('<ANY>' in types) or
(self.is_event_type(ann_type) and '<EVENT>' in types) or
(self.is_physical_entity_type(ann_type) and '<ENTITY>' in types)
or
(self.is_relation_type(ann_type) and '<RELATION>' in types)):
attrs.append(attr.storage_form())
return attrs
def get_labels(self):
return get_labels(self.directory)
def get_kb_shortcuts(self):
return get_kb_shortcuts(self.directory)
def get_access_control(self):
return get_access_control(self.directory)
def get_attribute_types(self):
return [t.storage_form() for t in get_attribute_type_list(self.directory)]
def get_event_types(self):
return [t.storage_form() for t in get_event_type_list(self.directory)]
def get_relation_types(self):
return [t.storage_form() for t in get_relation_type_list(self.directory)]
def get_equiv_types(self):
# equivalence relations are those relations that are symmetric
# and transitive, i.e. that have "symmetric" and "transitive"
# in their "<REL-TYPE>" special argument values.
return [t.storage_form() for t in get_relation_type_list(self.directory)
if "<REL-TYPE>" in t.special_arguments and
"symmetric" in t.special_arguments["<REL-TYPE>"] and
"transitive" in t.special_arguments["<REL-TYPE>"]]
def get_relations_by_type(self, _type):
return get_relations_by_storage_form(self.directory, _type)
def get_labels_by_type(self, _type):
return get_labels_by_storage_form(self.directory, _type)
def get_drawing_types(self):
return get_drawing_types(self.directory)
def get_drawing_config_by_type(self, _type):
return get_drawing_config_by_storage_form(self.directory, _type)
def get_search_config(self):
search_config = []
for r in get_search_config_list(self.directory):
if '<URL>' not in r.special_arguments:
Messager.warning('Project configuration: config error: missing <URL> specification for %s search.' % r.storage_form())
else:
search_config.append((r.storage_form(), r.special_arguments['<URL>'][0]))
return search_config
def _get_tool_config(self, tool_list):
tool_config = []
for r in tool_list:
if '<URL>' not in r.special_arguments:
Messager.warning('Project configuration: config error: missing <URL> specification for %s.' % r.storage_form())
continue
if 'tool' not in r.arguments:
Messager.warning('Project configuration: config error: missing tool name ("tool") for %s.' % r.storage_form())
continue
if 'model' not in r.arguments:
Messager.warning('Project configuration: config error: missing model name ("model") for %s.' % r.storage_form())
continue
tool_config.append((r.storage_form(),
r.arguments['tool'][0],
r.arguments['model'][0],
r.special_arguments['<URL>'][0]))
return tool_config
def get_disambiguator_config(self):
tool_list = get_disambiguator_config_list(self.directory)
return self._get_tool_config(tool_list)
def get_annotator_config(self):
# TODO: "annotator" is a very confusing term for a web service
# that does automatic annotation in the context of a tool
# where most annotators are expected to be human. Rethink.
tool_list = get_annotator_config_list(self.directory)
return self._get_tool_config(tool_list)
def get_normalization_config(self):
norm_list = get_normalization_config_list(self.directory)
norm_config = []
for n in norm_list:
if 'DB' not in n.arguments:
# optional, server looks in default location if None
n.arguments['DB'] = [None]
if '<URL>' not in n.special_arguments:
Messager.warning('Project configuration: config error: missing <URL> specification for %s.' % n.storage_form())
continue
if '<URLBASE>' not in n.special_arguments:
# now optional, client skips link generation if None
n.special_arguments['<URLBASE>'] = [None]
norm_config.append((n.storage_form(),
n.special_arguments['<URL>'][0],
n.special_arguments['<URLBASE>'][0],
n.arguments['DB'][0]))
return norm_config
def get_entity_types(self):
return [t.storage_form() for t in get_entity_type_list(self.directory)]
def get_entity_type_hierarchy(self):
return get_entity_type_hierarchy(self.directory)
def get_relation_type_hierarchy(self):
return get_relation_type_hierarchy(self.directory)
def get_event_type_hierarchy(self):
return get_event_type_hierarchy(self.directory)
def get_attribute_type_hierarchy(self):
return get_attribute_type_hierarchy(self.directory)
def _get_filtered_attribute_type_hierarchy(self, types):
from copy import deepcopy
# TODO: This doesn't property implement recursive traversal
# and filtering, instead only checking the topmost nodes.
filtered = []
for t in self.get_attribute_type_hierarchy():
if t.storage_form() in types:
filtered.append(deepcopy(t))
return filtered
def attributes_for_types(self, types):
"""
Returns list containing the attribute types that are
applicable to at least one of the given annotation types.
"""
# list to preserve order, dict for lookup
attribute_list = []
seen = {}
for t in types:
for a in self.attributes_for(t):
if a not in seen:
attribute_list.append(a)
seen[a] = True
return attribute_list
def get_entity_attribute_type_hierarchy(self):
"""
Returns the attribute type hierarchy filtered to include
only attributes that apply to at least one entity.
"""
attr_types = self.attributes_for_types(self.get_entity_types())
return self._get_filtered_attribute_type_hierarchy(attr_types)
def get_relation_attribute_type_hierarchy(self):
"""
Returns the attribute type hierarchy filtered to include
only attributes that apply to at least one relation.
"""
attr_types = self.attributes_for_types(self.get_relation_types())
return self._get_filtered_attribute_type_hierarchy(attr_types)
def get_event_attribute_type_hierarchy(self):
"""
Returns the attribute type hierarchy filtered to include
only attributes that apply to at least one event.
"""
attr_types = self.attributes_for_types(self.get_event_types())
return self._get_filtered_attribute_type_hierarchy(attr_types)
def preferred_display_form(self, t):
"""
Given a storage form label, returns the preferred display form
as defined by the label configuration (labels.conf)
"""
labels = get_labels_by_storage_form(self.directory, t)
if labels is None or len(labels) < 1:
return t
else:
return labels[0]
def is_physical_entity_type(self, t):
if t in self.get_entity_types() or t in self.get_event_types():
return t in self.get_entity_types()
# TODO: remove this temporary hack
if t in very_likely_physical_entity_types:
return True
return t in self.get_entity_types()
def is_event_type(self, t):
return t in self.get_event_types()
def is_relation_type(self, t):
return t in self.get_relation_types()
def is_equiv_type(self, t):
return t in self.get_equiv_types()
def is_configured_type(self, t):
return (t in self.get_entity_types() or
t in self.get_event_types() or
t in self.get_relation_types())
def type_category(self, t):
"""
Returns the category of the given type t.
The categories can be compared for equivalence but offer
no other interface.
"""
if self.is_physical_entity_type(t):
return ENTITY_CATEGORY
elif self.is_event_type(t):
return EVENT_CATEGORY
elif self.is_relation_type(t):
return RELATION_CATEGORY
else:
# TODO: others
return UNKNOWN_CATEGORY
|
py | 7dfbbe8b2b53d96fa4b2f2e9a73599067c762855 | from celery.task import Task
class IndexSubscriptionsForSearch(Task):
def run(self, user_id):
from apps.search.models import MUserSearch
user_search = MUserSearch.get_user(user_id)
user_search.index_subscriptions_for_search()
class IndexSubscriptionsChunkForSearch(Task):
ignore_result = False
def run(self, feed_ids, user_id):
from apps.search.models import MUserSearch
user_search = MUserSearch.get_user(user_id)
user_search.index_subscriptions_chunk_for_search(feed_ids)
class IndexFeedsForSearch(Task):
def run(self, feed_ids, user_id):
from apps.search.models import MUserSearch
MUserSearch.index_feeds_for_search(feed_ids, user_id) |
py | 7dfbbea2160f15c0b4c7c882ed0f678e9866d6cb | from __future__ import annotations
import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import List, Type
from nuplan.common.actor_state.ego_state import EgoState
from nuplan.common.actor_state.state_representation import StateSE2
from nuplan.common.maps.abstract_map import AbstractMap
from nuplan.planning.scenario_builder.abstract_scenario import AbstractScenario
from nuplan.planning.simulation.observation.observation_type import Observation
from nuplan.planning.training.preprocessing.features.abstract_model_feature import AbstractModelFeature
logger = logging.getLogger(__name__)
@dataclass
class FeatureBuilderMetaData:
map_api: AbstractMap # Abstract map api for accessing the maps.
mission_goal: StateSE2 # Goal far into future (in generally more than 100m far beyond scenario length).
expert_goal_state: StateSE2 # Expert state at the end of the scenario
class AbstractFeatureBuilder(ABC):
"""
Abstract class that creates model input features from database samples.
"""
@classmethod
@abstractmethod
def get_feature_type(cls) -> Type[AbstractModelFeature]:
"""
:return A type of a feature that will be computed
"""
pass
@classmethod
@abstractmethod
def get_feature_unique_name(cls) -> str:
"""
:return A unique string identifier of generated feature
"""
pass
@abstractmethod
def get_features_from_simulation(self,
ego_states: List[EgoState],
observations: List[Observation],
meta_data: FeatureBuilderMetaData) -> AbstractModelFeature:
"""
Constructs model input features from simulation history.
:param ego_states: Past ego state trajectory including the state at the current time step [t_-N, ..., t_-1, t_0]
:param observations: Past observations including the observation at the current time step [t_-N, ..., t_-1, t_0]
:param meta_data: Additional data require for building the feature
:return: Constructed features
"""
pass
@abstractmethod
def get_features_from_scenario(self, scenario: AbstractScenario) -> AbstractModelFeature:
"""
Constructs model input features from a database samples.
:param scenario: Generic scenario
:return: Constructed features
"""
pass
|
py | 7dfbbeb7309d9ef7994d9f72703e1ceaa71ddfe2 | #
# Runtime for the ARMv7 MPU
#
# Copyright (c) 2020, Arm Limited. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
import itertools as it
import math
from .. import argstuff
from .. import runtimes
from ..box import Fn, Section, Region, Import, Export
from ..glue.error_glue import ErrorGlue
from ..glue.write_glue import WriteGlue
from ..glue.abort_glue import AbortGlue
from ..glue.heap_glue import HeapGlue
from ..outputs import OutputBlob
MPU_STATE = """
uint32_t __box_active = 0;
extern uint32_t __box_callregion;
extern void __box_return(void);
"""
MPU_IMPL = """
#define SHCSR ((volatile uint32_t*)0xe000ed24)
#define MPU_TYPE ((volatile uint32_t*)0xe000ed90)
#define MPU_CTRL ((volatile uint32_t*)0xe000ed94)
#define MPU_RBAR ((volatile uint32_t*)0xe000ed9c)
#define MPU_RASR ((volatile uint32_t*)0xe000eda0)
struct __box_mpuregions {
uint32_t control;
uint32_t count;
uint32_t regions[][2];
};
static int32_t __box_mpu_init(void) {
// make sure MPU is initialized
if (!(*MPU_CTRL & 0x1)) {
// do we have an MPU?
assert(*MPU_TYPE >= %(mpuregions)d);
// enable MemManage exceptions
*SHCSR = *SHCSR | 0x00070000;
// setup call region
*MPU_RBAR = (uint32_t)&__box_callregion | 0x10;
// disallow execution
*MPU_RASR = 0x10000001 | ((%(calllog2)d-1) << 1);
// enable the MPU
*MPU_CTRL = 5;
}
return 0;
}
static void __box_mpu_switch(const struct __box_mpuregions *regions) {
// update MPU regions
*MPU_CTRL = 0;
uint32_t count = regions->count;
for (int i = 0; i < %(mpuregions)d; i++) {
if (i < count) {
*MPU_RBAR = regions->regions[i][0] | 0x10 | (i+1);
*MPU_RASR = regions->regions[i][1];
} else {
*MPU_RBAR = 0x10 | (i+1);
*MPU_RASR = 0;
}
}
*MPU_CTRL = 5;
// update CONTROL state, note that return-from-exception acts
// as an instruction barrier
uint32_t control;
__asm__ volatile ("mrs %%0, control" : "=r"(control));
control = (~1 & control) | (regions->control);
__asm__ volatile ("msr control, %%0" :: "r"(control));
}
"""
BOX_STATE = """
struct __box_state {
bool initialized;
uint32_t caller;
uint32_t lr;
uint32_t *sp;
};
"""
MPU_HANDLERS = """
struct __box_frame {
uint32_t *fp;
uint32_t lr;
uint32_t *sp;
uint32_t caller;
};
// foward declaration of fault wrapper, may be called directly
// in other handlers, but only in other handlers! (needs isr context)
uint64_t __box_faultsetup(int32_t err) {
// mark box as uninitialized
__box_state[__box_active]->initialized = false;
// invoke user handler, should not return
// TODO should we set this up to be called in non-isr context?
if (__box_aborts[__box_active]) {
__box_aborts[__box_active](err);
__builtin_unreachable();
}
struct __box_state *state = __box_state[__box_active];
struct __box_state *targetstate = __box_state[state->caller];
uint32_t targetlr = targetstate->lr;
uint32_t *targetsp = targetstate->sp;
struct __box_frame *targetbf = (struct __box_frame*)targetsp;
uint32_t *targetfp = targetbf->fp;
// in call?
if (!targetlr) {
// halt if we can't handle
__box_abort(-ELOOP);
}
// check if our return target supports erroring
uint32_t op = targetfp[6];
if (!(op & 2)) {
// halt if we can't handle
__box_abort(err);
}
// we can return an error
__box_active = state->caller;
targetstate->lr = targetbf->lr;
targetstate->sp = targetbf->sp;
targetstate->caller = targetbf->caller;
// select MPU regions
__box_mpu_switch(__box_mpuregions[__box_active]);
// copy return frame
targetfp[0] = err; // r0 = arg0
targetfp[1] = 0; // r1 = arg1
targetfp[2] = 0; // r2 = arg2
targetfp[3] = 0; // r3 = arg3
targetfp[6] = targetfp[5]; // pc = lr
return ((uint64_t)targetlr) | ((uint64_t)(uint32_t)targetsp << 32);
}
__attribute__((naked, noreturn))
void __box_faulthandler(int32_t err) {
__asm__ volatile (
// call into c with stack control
"bl __box_faultsetup \\n\\t"
// drop saved state
"add r1, r1, #4*4 \\n\\t"
// restore fp registers?
"tst r0, #0x10 \\n\\t"
"it eq \\n\\t"
"vldmiaeq r1!, {s16-s31} \\n\\t"
// restore core registers
"ldmia r1!, {r4-r11} \\n\\t"
// update sp
"tst r0, #0x4 \\n\\t"
"ite eq \\n\\t"
"msreq msp, r1 \\n\\t"
"msrne psp, r1 \\n\\t"
// return
"bx r0 \\n\\t"
::
"i"(__box_faultsetup)
);
}
uint64_t __box_callsetup(uint32_t lr, uint32_t *sp,
uint32_t op, uint32_t *fp) {
// save lr + sp
struct __box_state *state = __box_state[__box_active];
struct __box_frame *frame = (struct __box_frame*)sp;
frame->fp = fp;
frame->lr = state->lr;
frame->sp = state->sp;
frame->caller = state->caller;
state->lr = lr;
state->sp = sp;
uint32_t caller = __box_active;
__box_active = (caller == 0)
? (((op/4)-2) %% __BOX_COUNT) + 1
: 0;
uint32_t targetpc = (caller == 0)
? __box_jumptables[__box_active-1][((op/4)-2) / __BOX_COUNT + 1]
: __box_sys_jumptables[caller-1][((op/4)-2)];
struct __box_state *targetstate = __box_state[__box_active];
uint32_t targetlr = targetstate->lr;
uint32_t *targetsp = targetstate->sp;
// keep track of caller
targetstate->caller = caller;
// don't allow returns while executing
targetstate->lr = 0;
// need sp to fixup instruction aborts
targetstate->sp = targetsp;
// select MPU regions
__box_mpu_switch(__box_mpuregions[__box_active]);
// setup new call frame
targetsp -= 8;
targetsp[0] = fp[0]; // r0 = arg0
targetsp[1] = fp[1]; // r1 = arg1
targetsp[2] = fp[2]; // r2 = arg2
targetsp[3] = fp[3]; // r3 = arg3
targetsp[4] = fp[4]; // r12 = r12
targetsp[5] = (uint32_t)&__box_return; // lr = __box_return
targetsp[6] = targetpc; // pc = targetpc
targetsp[7] = fp[7]; // psr = psr
return ((uint64_t)targetlr) | ((uint64_t)(uint32_t)targetsp << 32);
}
__attribute__((naked))
void __box_callhandler(uint32_t lr, uint32_t *sp, uint32_t op) {
__asm__ volatile (
// keep track of args
"mov r3, r1 \\n\\t"
// save core registers
"stmdb r1!, {r4-r11} \\n\\t"
// save fp registers?
"tst r0, #0x10 \\n\\t"
"it eq \\n\\t"
"vstmdbeq r1!, {s16-s31} \\n\\t"
// make space to save state
"sub r1, r1, #4*4 \\n\\t"
// sp == msp?
"tst r0, #0x4 \\n\\t"
"it eq \\n\\t"
"moveq sp, r1 \\n\\t"
// ah! reserve a frame in case we're calling this
// interrupts stack from another stack
"sub sp, sp, #8*4 \\n\\t"
// call into c now that we have stack control
"bl __box_callsetup \\n\\t"
// update new sp
"tst r0, #0x4 \\n\\t"
"itee eq \\n\\t"
"msreq msp, r1 \\n\\t"
"msrne psp, r1 \\n\\t"
// drop reserved frame?
"addne sp, sp, #8*4 \\n\\t"
// return to call
"bx r0 \\n\\t"
::
"i"(__box_callsetup)
);
}
uint64_t __box_returnsetup(uint32_t lr, uint32_t *sp,
uint32_t op, uint32_t *fp) {
// save lr + sp
struct __box_state *state = __box_state[__box_active];
// drop exception frame and fixup instruction aborts
sp = state->sp;
state->lr = lr;
state->sp = sp;
__box_active = state->caller;
struct __box_state *targetstate = __box_state[__box_active];
uint32_t targetlr = targetstate->lr;
// in call?
if (!targetlr) {
__box_faulthandler(-EFAULT);
__builtin_unreachable();
}
uint32_t *targetsp = targetstate->sp;
struct __box_frame *targetframe = (struct __box_frame*)targetsp;
uint32_t *targetfp = targetframe->fp;
targetstate->lr = targetframe->lr;
targetstate->sp = targetframe->sp;
targetstate->caller = targetframe->caller;
// select MPU regions
__box_mpu_switch(__box_mpuregions[__box_active]);
// copy return frame
targetfp[0] = fp[0]; // r0 = arg0
targetfp[1] = fp[1]; // r1 = arg1
targetfp[2] = fp[2]; // r2 = arg2
targetfp[3] = fp[3]; // r3 = arg3
targetfp[6] = targetfp[5]; // pc = lr
return ((uint64_t)targetlr) | ((uint64_t)(uint32_t)targetsp << 32);
}
__attribute__((naked, noreturn))
void __box_returnhandler(uint32_t lr, uint32_t *sp, uint32_t op) {
__asm__ volatile (
// keep track of rets
"mov r3, r1 \\n\\t"
// call into c new that we have stack control
"bl __box_returnsetup \\n\\t"
// drop saved state
"add r1, r1, #4*4 \\n\\t"
// restore fp registers?
"tst r0, #0x10 \\n\\t"
"it eq \\n\\t"
"vldmiaeq r1!, {s16-s31} \\n\\t"
// restore core registers
"ldmia r1!, {r4-r11} \\n\\t"
// update sp
"tst r0, #0x4 \\n\\t"
"ite eq \\n\\t"
"msreq msp, r1 \\n\\t"
"msrne psp, r1 \\n\\t"
// return
"bx r0 \\n\\t"
::
"i"(__box_returnsetup)
);
}
__attribute__((alias("__box_mpu_handler")))
void __box_usagefault_handler(void);
__attribute__((alias("__box_mpu_handler")))
void __box_busfault_handler(void);
__attribute__((alias("__box_mpu_handler")))
void __box_memmanage_handler(void);
__attribute__((naked))
void __box_mpu_handler(void) {
__asm__ volatile (
// get lr
"mov r0, lr \\n\\t"
"tst r0, #0x4 \\n\\t"
// get sp
"ite eq \\n\\t"
"mrseq r1, msp \\n\\t"
"mrsne r1, psp \\n\\t"
// get pc
"ldr r2, [r1, #6*4] \\n\\t"
// check type of call
// return?
"ldr r3, =__box_callregion \\n\\t"
"subs r2, r2, r3 \\n\\t"
"beq __box_returnhandler \\n\\t"
// explicit abort?
"cmp r2, #4 \\n\\t"
"itt eq \\n\\t"
"ldreq r0, [r1, #0] \\n\\t"
"beq __box_faulthandler \\n\\t"
// call?
"ldr r3, =%(callsize)d \\n\\t"
"cmp r2, r3 \\n\\t"
"blo __box_callhandler \\n\\t"
// if we've reached here this is a true fault
"ldr r0, =%%[EFAULT] \\n\\t"
"b __box_faulthandler \\n\\t"
"b ."
::
"i"(__box_faulthandler),
"i"(__box_callhandler),
"i"(__box_returnhandler),
"i"(&__box_callregion),
[EFAULT]"i"(-EFAULT)
);
}
"""
@runtimes.runtime
class ARMv7MMPURuntime(
ErrorGlue,
WriteGlue,
AbortGlue,
HeapGlue,
runtimes.Runtime):
"""
A bento-box runtime that uses an Arm v7 MPU to provide memory isolation
between boxes.
"""
__argname__ = "armv7m_mpu"
__arghelp__ = __doc__
@classmethod
def __argparse__(cls, parser, **kwargs):
parser.add_argument('--mpu_regions', type=int,
help="Upper limit on the number of MPU regions to manage for "
"each box. Note the actual number of MPU regions will be "
"this plus one region for box calls. Defualts to 4.")
parser.add_nestedparser('--jumptable', Section)
parser.add_nestedparser('--call_region', Region)
parser.add_argument('--zero', type=bool,
help="Zero RAM before executing the box. This is useful if boxes "
"share RAM to avoid leaking data. This is not useful if the "
"box is the only box able to access its allocated RAM.")
def __init__(self, mpu_regions=None, jumptable=None,
call_region=None, zero=None):
super().__init__()
self._mpu_regions = mpu_regions if mpu_regions is not None else 4
self._jumptable = Section('jumptable', **jumptable.__dict__)
self._call_region = (Region(**call_region.__dict__)
if call_region.addr is not None else
None)
self._zero = zero or False
# overridable
def _box_call_region(self, parent):
callmemory = parent.bestmemory(
'rx', size=0, reverse=True).origmemory
addr = callmemory.addr + callmemory.size
# TODO fewer magic numbers here?
importcount = max(it.chain(
[3+sum(len(box.exports)
for box in parent.boxes
if box.runtime == self)],
(4+len(box.imports)
for box in parent.boxes
if box.runtime == self)))
# fit into MPU limits
size = 2**math.ceil(math.log2(4*importcount))
size = max(size, 32)
return Region(addr=addr, size=size)
# overridable
def _check_call_region(self, call_region):
assert math.log2(call_region.size) % 1 == 0, (
"%s: MPU call region not aligned to a power-of-two `%s`"
% (self.name, call_region))
assert call_region.addr % call_region.size == 0, (
"%s: MPU call region not aligned to size `%s`"
% (self.name, call_region))
assert call_region.size >= 32, (
"%s: MPU call region too small (< 32 bytes) `%s`"
% (self.name, call_region))
# overridable
def _check_mpu_region(self, memory):
assert math.log2(memory.size) % 1 == 0, (
"%s: Memory region `%s` not aligned to a power-of-two `%s`"
% (self.name, memory.name, memory))
assert memory.addr % memory.size == 0, (
"%s: Memory region `%s` not aligned to its size `%s`"
% (self.name, memory.name, memory))
assert memory.size >= 32, (
"%s: Memory region `%s` too small (< 32 bytes) `%s`"
% (self.name, memory.name, memory))
# overridable
def _build_mpu_impl(self, output, parent):
output.decls.append(MPU_IMPL)
# overridable
def _build_mpu_sysregions(self, output, parent):
out = output.decls.append()
out.printf('const struct __box_mpuregions __box_sys_mpuregions = {')
with out.pushindent():
out.printf('.control = 0,')
out.printf('.count = 0,')
out.printf('.regions = {}')
out.printf('};')
# overridable
def _build_mpu_regions(self, output, parent, box):
out = output.decls.append()
out.printf('const struct __box_mpuregions __box_%(box)s_mpuregions = {')
with out.pushindent():
out.printf('.control = 1,')
out.printf('.count = %(count)d,', count=len(box.memories))
out.printf('.regions = {')
with out.pushindent():
for memory in box.memories:
out.printf('{%(rbar)#010x, %(rasr)#010x},',
rbar=memory.addr,
rasr= (0x10000000
if 'x' not in memory.mode else
0x00000000)
| (0x03000000
if set('rw').issubset(memory.mode) else
0x02000000
if 'r' in memory.mode else
0x00000000)
| 0 #(0x00080000)
| ((int(math.log2(memory.size))-1) << 1)
| 1)
out.printf('},')
out.printf('};')
def box_parent_prologue(self, parent):
# we need these
parent.addexport('__box_memmanage_handler', 'fn() -> void',
scope=parent.name, source=self.__argname__)
parent.addexport('__box_busfault_handler', 'fn() -> void',
scope=parent.name, source=self.__argname__)
parent.addexport('__box_usagefault_handler', 'fn() -> void',
scope=parent.name, source=self.__argname__)
super().box_parent_prologue(parent)
# best effort call_region size
if self._call_region is None:
self._call_region = self._box_call_region(parent)
# check our call region
self._check_call_region(self._call_region)
parent.pushattrs(
mpuregions=self._mpu_regions,
callregion=self._call_region.addr,
callsize=self._call_region.addr,
callmask=self._call_region.size-1,
calllog2=math.log2(self._call_region.size))
for box in parent.boxes:
if box.runtime == self:
box.pushattrs(
callregion=self._call_region.addr)
def box_parent(self, parent, box):
# register hooks
self._load_hook = parent.addimport(
'__box_%s_load' % box.name, 'fn() -> err',
scope=parent.name, source=self.__argname__,
doc="Called to load the box during init. Normally provided "
"by the loader but can be overriden.")
self._abort_hook = parent.addimport(
'__box_%s_abort' % box.name, 'fn(err err) -> noreturn',
scope=parent.name, source=self.__argname__, weak=True,
doc="Called when this box aborts, either due to an illegal "
"memory access or other failure. the error code is "
"provided as an argument.")
self._write_hook = parent.addimport(
'__box_%s_write' % box.name,
'fn(i32, const u8[size], usize size) -> errsize',
scope=parent.name, source=self.__argname__, weak=True,
doc="Override __box_write for this specific box.")
self._flush_hook = parent.addimport(
'__box_%s_flush' % box.name,
'fn(i32) -> err',
scope=parent.name, source=self.__argname__, weak=True,
doc="Override __box_flush for this specific box.")
super().box_parent(parent, box)
def box(self, box):
# need isolated stack
if not box.stack.size:
print("warning: Box `%s` has no stack!" % box.name)
# check memory regions against MPU limitations
for memory in box.memories:
self._check_mpu_region(memory)
super().box(box)
self._jumptable.alloc(box, 'rp')
box.stack.alloc(box, 'rw')
box.heap.alloc(box, 'rw')
# plugs
self._abort_plug = box.addexport(
'__box_abort', 'fn(err) -> noreturn',
scope=box.name, source=self.__argname__, weak=True)
self._write_plug = box.addexport(
'__box_write', 'fn(i32, const u8[size], usize size) -> errsize',
scope=box.name, source=self.__argname__, weak=True)
self._flush_plug = box.addexport(
'__box_flush', 'fn(i32) -> err',
scope=box.name, source=self.__argname__, weak=True)
# zeroing takes care of bss
if self._zero:
box.addexport('__box_bss_init', 'fn() -> void',
scope=box.name, source=self.__argname__, weak=True)
def _parentimports(self, parent, box):
"""
Get imports that need linking.
Yields import, needswrapper, needsinit.
"""
# implicit imports
yield Import(
'__box_%s_postinit' % box.name,
'fn(const u32*) -> err32',
source=self.__argname__), False, False
# imports that need linking
for import_ in parent.imports:
if import_.link and import_.link.export.box == box:
yield (import_.postbound(),
len(import_.boundargs) > 0 or box.init == 'lazy',
box.init == 'lazy')
def _parentexports(self, parent, box):
"""
Get exports that need linking.
Yields export, needswrapper.
"""
# implicit exports
yield Export(
'__box_%s_write' % box.name,
'fn(i32, const u8*, usize) -> errsize',
source=self.__argname__), False
yield Export(
'__box_%s_flush' % box.name,
'fn(i32) -> err',
source=self.__argname__), False
# exports that need linking
for export in parent.exports:
if any(link.import_.box == box for link in export.links):
yield export.prebound(), len(export.boundargs) > 0
def _imports(self, box):
"""
Get imports that need linking.
Yields import, needswrapper.
"""
# implicit imports
yield Import(
'__box_write',
'fn(i32, const u8[size], usize size) -> errsize',
source=self.__argname__), False
yield Export(
'__box_flush',
'fn(i32) -> err',
source=self.__argname__), False
# imports that need linking
for import_ in box.imports:
if import_.link and import_.link.export.box != box:
yield import_.postbound(), len(import_.boundargs) > 0
def _exports(self, box):
"""
Get exports that need linking.
Yields export, needswrapper
"""
# implicit exports
yield Export(
'__box_init', 'fn() -> err32',
source=self.__argname__), False
# exports that need linking
for export in box.exports:
if export.scope != box:
yield export.prebound(), len(export.boundargs) > 0
def build_mk(self, output, box):
# target rule
output.decls.insert(0, '%(name)-16s ?= %(target)s',
name='TARGET', target=output.get('target', '%(box)s.elf'))
out = output.rules.append(doc='target rule')
out.printf('$(TARGET): $(OBJ) $(CRATES) $(BOXES) $(LDSCRIPT)')
with out.indent():
out.printf('$(CC) $(OBJ) $(BOXES) $(LDFLAGS) -o $@')
super().build_mk(output, box)
def build_parent_c_prologue(self, output, parent):
super().build_parent_c_prologue(output, parent)
output.decls.append(MPU_STATE)
self._build_mpu_impl(output, parent)
output.decls.append('#define __BOX_COUNT %(boxcount)d',
boxcount=sum(1 for box in parent.boxes if box.runtime == self))
output.decls.append(BOX_STATE)
def build_parent_c(self, output, parent, box):
super().build_parent_c(output, parent, box)
out = output.decls.append()
out.printf('//// %(box)s state ////')
out.printf('struct __box_state __box_%(box)s_state;')
out.printf('extern uint32_t __box_%(box)s_jumptable[];')
self._build_mpu_regions(output, parent, box)
output.decls.append('//// %(box)s exports ////')
for import_, needsinit in ((import_, needsinit)
for import_, needswrapper, needsinit in
self._parentimports(parent, box)
if needswrapper):
out = output.decls.append(
fn=output.repr_fn(import_),
prebound=output.repr_fn(import_,
name='__box_import_%(alias)s',
attrs=['extern']),
alias=import_.alias)
out.printf('%(fn)s {')
with out.indent():
# inject lazy-init?
if needsinit:
out.printf('if (!__box_%(box)s_state.initialized) {')
with out.indent():
out.printf('int err = __box_%(box)s_init();')
out.printf('if (err) {')
with out.indent():
if import_.isfalible():
out.printf('return err;')
else:
out.printf('__box_abort(err);')
out.printf('}')
out.printf('}')
out.printf()
# jump to real import
out.printf('%(prebound)s;')
out.printf('%(return_)s__box_import_%(alias)s(%(args)s);',
return_=('return ' if import_.rets else ''),
args=', '.join(map(str, import_.argnamesandbounds())))
out.printf('}')
output.decls.append('//// %(box)s imports ////')
# redirect hooks if necessary
if not self._write_hook.link:
out = output.decls.append(
write_hook=self._write_hook.name,
doc='redirect %(write_hook)s -> __box_write')
out.printf('#define %(write_hook)s __box_write')
if not self._flush_hook.link:
out = output.decls.append(
flush_hook=self._flush_hook.name,
doc='redirect %(flush_hook)s -> __box_flush')
out.printf('#define %(flush_hook)s __box_flush')
# wrappers?
for export in (export
for export, needswrapper in self._parentexports(parent, box)
if needswrapper):
out = output.decls.append(
fn=output.repr_fn(
export.postbound(),
name='__box_%(box)s_export_%(alias)s'),
alias=export.alias)
out.printf('%(fn)s {')
with out.indent():
out.printf('%(return_)s%(alias)s(%(args)s);',
return_='return ' if import_.rets else '',
args=', '.join(map(str, export.argnamesandbounds())))
out.printf('}')
# import jumptable
out = output.decls.append()
out.printf('const uint32_t __box_%(box)s_sys_jumptable[] = {')
with out.indent():
for export, needswrapper in self._parentexports(parent, box):
out.printf('(uint32_t)%(prefix)s%(alias)s,',
prefix='__box_%(box)s_export_' if needswrapper else '',
alias=export.alias)
out.printf('};')
# init
output.decls.append('//// %(box)s init ////')
out = output.decls.append()
out.printf('int __box_%(box)s_init(void) {')
with out.indent():
out.printf('int err;')
out.printf('if (__box_%(box)s_state.initialized) {')
with out.indent():
out.printf('return 0;')
out.printf('}')
out.printf()
if box.roommates:
out.printf('// bring down any overlapping boxes')
for i, roommate in enumerate(box.roommates):
with out.pushattrs(roommate=roommate.name):
out.printf('extern int __box_%(roommate)s_clobber(void);')
out.printf('err = __box_%(roommate)s_clobber();')
out.printf('if (err) {')
with out.indent():
out.printf('return err;')
out.printf('}')
out.printf()
out.printf('// make sure that the MPU is initialized')
out.printf('err = __box_mpu_init();')
out.printf('if (err) {')
with out.indent():
out.printf('return err;')
out.printf('}')
out.printf()
out.printf('// prepare the box\'s stack')
out.printf('// must use PSP, otherwise boxes could '
'overflow the ISR stack')
out.printf('__box_%(box)s_state.lr = '
'0xfffffffd; // TODO determine fp?')
out.printf('__box_%(box)s_state.sp = '
'(void*)__box_%(box)s_jumptable[0];')
out.printf()
if self._zero:
out.printf('// zero memory')
for memory in box.memoryslices:
if 'w' in memory.mode:
with out.pushattrs(
memory=memory.name,
memorystart='__box_%(box)s_%(memory)s_start',
memoryend='__box_%(box)s_%(memory)s_end'):
out.printf('extern uint8_t %(memorystart)s;')
out.printf('extern uint8_t %(memoryend)s;')
out.printf('memset(&%(memorystart)s, 0, '
'&%(memoryend)s - &%(memorystart)s);')
out.printf()
out.printf('// load the box if unloaded')
out.printf('err = __box_%(box)s_load();')
out.printf('if (err) {')
with out.indent():
out.printf('return err;')
out.printf('}')
out.printf()
out.printf('// call box\'s init')
out.printf('extern int __box_%(box)s_postinit(void);')
out.printf('err = __box_%(box)s_postinit();')
out.printf('if (err) {')
with out.indent():
out.printf('return err;')
out.printf('}')
out.printf()
out.printf('__box_%(box)s_state.initialized = true;')
out.printf('return 0;')
out.printf('}')
out = output.decls.append()
out.printf('int __box_%(box)s_clobber(void) {')
with out.indent():
out.printf('__box_%(box)s_state.initialized = false;')
out.printf('return 0;')
out.printf('}')
# stack manipulation
output.includes.append('<assert.h>')
out = output.decls.append(
memory=box.stack.memory.name)
out.printf('void *__box_%(box)s_push(size_t size) {')
with out.indent():
out.printf('size = (size+3)/4;')
out.printf('extern uint8_t __box_%(box)s_%(memory)s_start;')
out.printf('if (__box_%(box)s_state.sp - size '
'< (uint32_t*)&__box_%(box)s_%(memory)s_start) {')
with out.indent():
out.printf('return NULL;')
out.printf('}')
out.printf()
out.printf('__box_%(box)s_state.sp -= size;')
out.printf('return __box_%(box)s_state.sp;')
out.printf('}')
out = output.decls.append(
memory=box.stack.memory.name)
out.printf('void __box_%(box)s_pop(size_t size) {')
with out.indent():
out.printf('size = (size+3)/4;')
out.printf('__attribute__((unused))')
out.printf('extern uint8_t __box_%(box)s_%(memory)s_end;')
out.printf('assert(__box_%(box)s_state.sp + size '
'<= (uint32_t*)&__box_%(box)s_%(memory)s_end);')
out.printf('__box_%(box)s_state.sp += size;')
out.printf('}')
def build_parent_c_epilogue(self, output, parent):
super().build_parent_c_epilogue(output, parent)
# state
output.decls.append('struct __box_state __box_sys_state;')
out = output.decls.append()
out.printf('struct __box_state *const __box_state[__BOX_COUNT+1] = {')
with out.indent():
out.printf('&__box_sys_state,')
for box in parent.boxes:
if box.runtime == self:
out.printf('&__box_%(box)s_state,', box=box.name)
out.printf('};')
# abort hooks
out = output.decls.append()
out.printf('void (*const __box_aborts[])(int err) = {')
with out.indent():
out.printf('NULL,')
for box in parent.boxes:
if box.runtime == self:
if box.runtime._abort_hook.link:
out.printf(box.runtime._abort_hook.link.import_.alias)
else:
out.printf('NULL,')
out.printf('};')
# mpu regions
self._build_mpu_sysregions(output, parent)
out = output.decls.append()
out.printf('const struct __box_mpuregions *const '
'__box_mpuregions[__BOX_COUNT+1] = {')
with out.pushindent():
out.printf('&__box_sys_mpuregions,')
for box in parent.boxes:
if box.runtime == self:
out.printf('&__box_%(box)s_mpuregions,', box=box.name)
out.printf('};')
# jumptables
out = output.decls.append()
out.printf('const uint32_t *const '
'__box_jumptables[__BOX_COUNT] = {')
with out.pushindent():
for box in parent.boxes:
if box.runtime == self:
out.printf('__box_%(box)s_jumptable,',
box=box.name)
out.printf('};');
out = output.decls.append()
out.printf('const uint32_t *const '
'__box_sys_jumptables[__BOX_COUNT] = {')
with out.pushindent():
for box in parent.boxes:
if box.runtime == self:
out.printf('__box_%(box)s_sys_jumptable,',
box=box.name)
out.printf('};');
# mpu handlers
output.decls.append(MPU_HANDLERS)
def build_parent_ld(self, output, parent, box):
super().build_parent_ld(output, parent, box)
output.decls.append('__box_%(box)s_jumptable = '
'__box_%(box)s_%(memory)s_start;',
memory=self._jumptable.memory.name,
doc='box %(box)s jumptable')
def build_parent_ld_epilogue(self, output, parent):
super().build_parent_ld_prologue(output, parent)
out = output.decls.append(doc='call region')
out.printf('__box_callregion = %(callregion)#0.8x;')
out.printf('__box_return = __box_callregion;')
# create box calls for imports
boxcount = sum(1 for box in parent.boxes if box.runtime == self)
out = output.decls.append(doc='box calls')
for i, box in enumerate(box
for box in parent.boxes
if box.runtime == self):
for j, (import_, needswrapper, _) in enumerate(
self._parentimports(parent, box)):
out.printf('%(import_)-24s = __box_callregion + '
'4*(2 + %(boxcount)d*%(j)d + %(i)d) + 2*%(falible)d + 1;',
import_='__box_import_'+import_.alias
if needswrapper else
import_.alias,
falible=import_.isfalible(),
i=i,
j=j,
boxcount=boxcount)
def build_c(self, output, box):
super().build_c(output, box)
out = output.decls.append()
out.printf('int __box_init(void) {')
with out.indent():
if self.data_init_hook.link:
out.printf('// data inited by %(hook)s',
hook=self.data_init_hook.link.export.source)
out.printf()
else:
out.printf('// load data')
out.printf('extern uint32_t __data_init_start;')
out.printf('extern uint32_t __data_start;')
out.printf('extern uint32_t __data_end;')
out.printf('const uint32_t *s = &__data_init_start;')
out.printf('for (uint32_t *d = &__data_start; '
'd < &__data_end; d++) {')
with out.indent():
out.printf('*d = *s++;')
out.printf('}')
out.printf()
if self.bss_init_hook.link:
out.printf('// bss inited by %(hook)s',
hook=self.bss_init_hook.link.export.source)
out.printf()
else:
out.printf('// zero bss')
out.printf('extern uint32_t __bss_start;')
out.printf('extern uint32_t __bss_end;')
out.printf('for (uint32_t *d = &__bss_start; '
'd < &__bss_end; d++) {')
with out.indent():
out.printf('*d = 0;')
out.printf('}')
out.printf()
out.printf('// init libc')
out.printf('extern void __libc_init_array(void);')
out.printf('__libc_init_array();')
out.printf()
out.printf('return 0;')
out.printf('}')
output.decls.append('//// imports ////')
for import_ in (import_
for import_, needswrapper in self._imports(box)
if needswrapper):
out = output.decls.append(
fn=output.repr_fn(import_),
prebound=output.repr_fn(import_,
name='__box_import_%(alias)s',
attrs=['extern']),
alias=import_.alias)
out.printf('%(fn)s {')
with out.indent():
out.printf('%(prebound)s;')
out.printf('%(return_)s__box_export_%(alias)s(%(args)s);',
return_='return ' if import_.rets else '',
args=', '.join(map(str, import_.argnamesandbounds())))
out.printf('}')
output.decls.append('//// exports ////')
for export in (export
for export, needswrapper in self._exports(box)
if needswrapper):
out = output.decls.append(
fn=output.repr_fn(
export.postbound(),
name='__box_export_%(alias)s'),
alias=export.alias)
out.printf('%(fn)s {')
with out.indent():
out.printf('%(return_)s%(alias)s(%(args)s);',
return_='return ' if import_.rets else '',
args=', '.join(map(str, export.argnamesandbounds())))
out.printf('}')
out = output.decls.append(doc='box-side jumptable')
out.printf('extern uint8_t __stack_end;')
out.printf('__attribute__((used, section(".jumptable")))')
out.printf('const uint32_t __box_jumptable[] = {')
with out.pushindent():
if box.stack.size > 0:
out.printf('(uint32_t)&__stack_end,')
for export, needswrapper in self._exports(box):
out.printf('(uint32_t)%(prefix)s%(alias)s,',
prefix='__box_export_' if needswrapper else '',
alias=export.alias)
out.printf('};')
def build_ld(self, output, box):
output.decls.append('__box_callregion = %(callregion)#0.8x;')
# create box calls for imports
out = output.decls.append(doc='box calls')
out.printf('%(import_)-24s = __box_callregion + '
'4*%(i)d + 2*%(falible)d + 1;',
import_='__box_abort',
falible=False,
i=1)
for i, (import_, needswrapper) in enumerate(self._imports(box)):
out.printf('%(import_)-24s = __box_callregion + '
'4*%(i)d + 2*%(falible)d + 1;',
import_='__box_import_' + import_.alias
if needswrapper else
import_.alias,
falible=import_.isfalible(),
i=2+i)
if not output.no_sections:
out = output.sections.append(
section='.jumptable',
memory=self._jumptable.memory.name)
out.printf('. = ALIGN(%(align)d);')
out.printf('__jumptable_start = .;')
out.printf('%(section)s . : {')
with out.pushindent():
out.printf('__jumptable = .;')
out.printf('KEEP(*(.jumptable))')
out.printf('} > %(MEMORY)s')
out.printf('. = ALIGN(%(align)d);')
out.printf('__jumptable_end = .;')
super().build_ld(output, box)
|
py | 7dfbbee6a4bdfcd5c23a5995a13ca717d14670e7 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPep8Naming(PythonPackage):
"""Check PEP-8 naming conventions, plugin for flake8."""
homepage = "https://pypi.org/project/pep8-naming/"
url = "https://files.pythonhosted.org/packages/3e/4a/125425d6b1e017f48dfc9c961f4bb9510168db7a090618906c750184ed03/pep8-naming-0.7.0.tar.gz"
extends('python', ignore='bin/(flake8|pyflakes|pycodestyle)')
version('0.7.0', '624258e0dd06ef32a9daf3c36cc925ff7314da7233209c5b01f7e5cdd3c34826')
depends_on('py-flake8-polyfill', type='run')
|
py | 7dfbbfac1aaa622424d3fe2a8596bb0b7aed08fd | import math
from numba import *
from experiments.elasticscatter.kernels import cuda_k_to_ij
__author__ = 'christopher'
@cuda.jit(argtypes=[f4[:, :], f4[:, :], i4])
def get_normalization_array(norm_array, scat, offset):
"""
Generate the Q dependant normalization factors for the F(Q) array
Parameters
----------
norm_array: NxNx3 array
Normalization array
scat: NxM array
The scatter factor array
offset: int
The number of previously calculated atom pairs
"""
# snorm = cuda.shared.array((1, 64), f4)
snormi = cuda.shared.array((1, 64), f4)
snormj = cuda.shared.array((1, 64), f4)
k, qx = cuda.grid(2)
n = norm_array.shape[0]
qmax_bin = norm_array.shape[1]
if k >= n or qx >= qmax_bin:
return
tid = cuda.threadIdx.y
i, j = cuda_k_to_ij(int32(k + offset))
snormi[0, tid] = scat[i, qx]
snormj[0, tid] = scat[j, qx]
cuda.syncthreads()
snormi[0, tid] *= snormj[0, tid]
cuda.syncthreads()
# norm_array[k, qx] = scat[i, qx] * scat[j, qx]
norm_array[k, qx] = snormi[0, tid]
# A[k, q] = norm*Q, B[k, q] = cos(Q*r), C[k, w] = d/r/r
# D[k, q] = A*B - F(Q)
# E[k, w, q] = D * C
@cuda.jit(argtypes=[f4[:, :], f4[:, :], f4])
def get_grad_fq_a(a, norm, qbin):
k, qx = cuda.grid(2)
if k >= len(a) or qx >= a.shape[1]:
return
a[k, qx] = norm[k, qx] * float32(qx * qbin)
@cuda.jit(argtypes=[f4[:, :], f4[:], f4])
def get_grad_fq_b(b, r, qbin):
k, qx = cuda.grid(2)
if k >= len(b) or qx >= b.shape[1]:
return
b[k, qx] = math.cos(float32(qx * qbin) * r[k])
@cuda.jit(argtypes=[f4[:], f4[:, :]])
def get_grad_fq_c(r, d):
k = cuda.grid(1)
if k >= len(r):
return
for w in range(3):
d[k, w] /= r[k] ** 2
# @cuda.jit(argtypes=[f4[:, :], f4[:, :], f4[:, :], f4[:, :]])
@cuda.jit(argtypes=[f4[:, :], f4[:, :], f4[:, :]])
def get_grad_fq_d(a, b, fq):
k, qx = cuda.grid(2)
if k >= len(a) or qx >= a.shape[1]:
return
# D[k, qx] = a[k, qx] * b[k, qx] - fq[k, qx]
a[k, qx] *= b[k, qx]
a[k, qx] -= fq[k, qx]
@cuda.jit(argtypes=[f4[:, :, :], f4[:, :], f4[:, :]])
def get_grad_fq_e(e, d, c):
k, qx = cuda.grid(2)
if k >= len(e) or qx >= e.shape[2]:
return
for w in range(3):
e[k, w, qx] = d[k, qx] * c[k, w]
@cuda.jit(argtypes=[f4[:, :, :], f4[:, :, :], i4])
def experimental_sum_grad_fq2(new_grad, grad, k_cov):
k, qx = cuda.grid(2)
if k >= len(grad) or qx >= grad.shape[2]:
return
i, j = cuda_k_to_ij(i4(k + k_cov))
for tz in range(3):
cuda.atomic.add(new_grad, (j, tz, qx), 1)
# new_grad[i, tz, qx] = j
# cuda.atomic.add(new_grad, (i, tz, qx), j)
@cuda.jit(argtypes=[f4[:, :, :], f4[:, :, :], i4])
def experimental_sum_grad_fq3(new_grad, grad, k_cov):
k, qx = cuda.grid(2)
if k >= len(grad) or qx >= grad.shape[2]:
return
i, j = cuda_k_to_ij(i4(k + k_cov))
# for tz in range(3):
# new_grad[i, tz, qx] -= grad[k, tz, qx]
# new_grad[j, tz, qx] += grad[k, tz, qx]
# cuda.atomic.add(new_grad, (i, 0, qx), grad[k, 0, qx] * -1)
# cuda.atomic.add(new_grad, (j, 0, qx), grad[k, 0, qx] * 1)
#
# cuda.atomic.add(new_grad, (i, 1, qx), grad[k, 1, qx] * -1)
# cuda.atomic.add(new_grad, (j, 1, qx), grad[k, 1, qx] * 1)
#
# cuda.atomic.add(new_grad, (i, 2, qx), grad[k, 2, qx] * -1)
# cuda.atomic.add(new_grad, (j, 2, qx), grad[k, 2, qx] * 1)
new_grad[i, 0, qx] -= grad[k, 0, qx]
new_grad[j, 0, qx] += grad[k, 0, qx]
new_grad[i, 1, qx] -= grad[k, 1, qx]
new_grad[j, 1, qx] += grad[k, 1, qx]
new_grad[i, 2, qx] -= grad[k, 2, qx]
new_grad[j, 2, qx] += grad[k, 2, qx]
|
py | 7dfbc06b903f07686abd05848a98859928dd3503 | import datetime
from itertools import count
import random
import string
import time
import names
from .. import models
__all__ = ['fixture_random']
ORDINARY_USERID = "42"
ADMIN_USERID = "666"
def date_to_timestamp(d) :
return int(time.mktime(d.timetuple()))
def random_text(n=100):
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(n))
def random_date(start=None, end=None):
"""Get a random date between two dates"""
if start is None and end is None:
end = datetime.datetime.now()
start = end - datetime.timedelta(days=365)
stime = date_to_timestamp(start)
etime = date_to_timestamp(end)
ptime = stime + random.random() * (etime - stime)
return datetime.date.fromtimestamp(ptime)
def fixture_random():
code = count(24400)
# patients
p = models.Patient(id=str(code.next()))
# name with accent
p.name = u'John Heyder Oliveira de Medeiros Galv\xe3o'
p.blood_type = random.choice(models.blood_types)
p.type_ = random.choice(models.patient_types)
p.put()
keys = [p.key]
for _ in range(5):
p = models.Patient(id=str(code.next()))
p.name = names.get_full_name()
p.blood_type = random.choice(models.blood_types)
p.type_ = random.choice(models.patient_types)
p.put()
keys.append(p.key)
# transfusions
for _ in range(40):
tr = models.Transfusion(id=str(code.next()))
tr.patient = random.choice(keys)
tr.date = random_date()
tr.local = random.choice(models.valid_locals)
tr.text = random_text()
tr.bags = []
for _ in range(2):
bag = models.BloodBag()
bag.type_ = random.choice(models.blood_types)
bag.content = random.choice(models.blood_contents)
tr.bags.append(bag)
if random.choice((True, False)):
tr.tags = ['naovisitado']
else:
if random.choice((True, False)):
tr.tags.append('rt')
else:
tr.tags.append('semrt')
tr.put()
# users
# admin user
u = models.UserPrefs(id=ADMIN_USERID, name='admin',
email="[email protected]", admin=True, authorized=True)
u.put()
# ordinary user1
u = models.UserPrefs(id=ORDINARY_USERID, name="user",
email="[email protected]", admin=False, authorized=True)
u.put()
# ordinary user1
u = models.UserPrefs(id=ORDINARY_USERID * 2, name="user2",
email="[email protected]", admin=False, authorized=True)
u.put()
|
py | 7dfbc0f0f6f765da00ab17f45bebe96538e917b8 | import io
from bertlv import (
tree_from_binary,
tree_from_xml,
tree_to_binary,
tree_to_text,
tree_to_xml,
)
def test_tree_from_binary_with_bytes(tlv_data_binary, tlv_tree):
assert tree_from_binary(tlv_data_binary) == tlv_tree
tree_from_binary(bytearray(tlv_data_binary))
# noinspection PyTypeChecker
tree_from_binary(memoryview(tlv_data_binary))
def test_tree_from_binary_with_stream(tlv_data_binary, tlv_tree):
stream = io.BytesIO(tlv_data_binary)
assert tree_from_binary(stream) == tlv_tree
def test_tree_from_binary_with_file(tlv_file_binary, tlv_tree):
with tlv_file_binary.open("rb") as file:
assert tree_from_binary(file) == tlv_tree
def test_tree_from_xml_with_bytes(tlv_data_xml, tlv_tree):
assert tree_from_xml(tlv_data_xml) == tlv_tree
tree_from_xml(bytearray(tlv_data_xml))
# noinspection PyTypeChecker
tree_from_xml(memoryview(tlv_data_xml))
def test_tree_from_xml_with_stream(tlv_data_xml, tlv_tree):
stream = io.BytesIO(tlv_data_xml)
assert tree_from_xml(stream) == tlv_tree
def test_tree_from_xml_with_file(tlv_file_xml, tlv_tree):
with tlv_file_xml.open("rb") as file:
assert tree_from_xml(file) == tlv_tree
def test_tree_to_binary_with_bytes(tlv_data_binary, tlv_tree):
assert tree_to_binary(tlv_tree) == tlv_data_binary
def test_tree_to_binary_with_stream(tlv_data_binary, tlv_tree):
stream = io.BytesIO()
tree_to_binary(tlv_tree, stream)
assert stream.getvalue() == tlv_data_binary
def test_tree_to_binary_with_file(tlv_file_binary, tlv_tree, tmp_path):
path = tmp_path / "test.tlv"
with path.open("wb") as file:
tree_to_binary(tlv_tree, file)
assert path.read_bytes() == tlv_file_binary.read_bytes()
def test_tree_to_text_with_bytes(tlv_data_text, tlv_tree):
assert tree_to_text(tlv_tree) == tlv_data_text
def test_tree_to_text_with_stream(tlv_data_text, tlv_tree):
stream = io.BytesIO()
tree_to_text(tlv_tree, stream)
assert stream.getvalue() == tlv_data_text
def test_tree_to_text_with_file(tlv_file_text, tlv_tree, tmp_path):
path = tmp_path / "test.txt"
with path.open("wb") as file:
tree_to_text(tlv_tree, file)
assert path.read_bytes() == tlv_file_text.read_bytes()
def test_tree_to_xml_with_bytes(tlv_data_xml, tlv_tree):
assert tree_to_xml(tlv_tree) == tlv_data_xml
def test_tree_to_xml_with_stream(tlv_data_xml, tlv_tree):
stream = io.BytesIO()
tree_to_xml(tlv_tree, stream)
assert stream.getvalue() == tlv_data_xml
def test_tree_to_xml_with_file(tlv_file_xml, tlv_tree, tmp_path):
path = tmp_path / "test.xml"
with path.open("wb") as file:
tree_to_xml(tlv_tree, file)
assert path.read_bytes() == tlv_file_xml.read_bytes()
|
py | 7dfbc10c64191a95076aade68d7132608023a534 | # Copyright 2014 OpenStack Foundation
# Copyright 2015 Chuck Fouts
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import warnings
from manilaclient.v2 import services
warnings.warn("Module manilaclient.v1.services is deprecated (taken as "
"a basis for manilaclient.v2.services). "
"The preferable way to get a client class or object is to use "
"the manilaclient.client module.")
class MovedModule(object):
def __init__(self, new_module):
self.new_module = new_module
def __getattr__(self, attr):
return getattr(self.new_module, attr)
sys.modules["manilaclient.v1.services"] = MovedModule(services)
|
py | 7dfbc2355ef75258cb1385ec5737485166c2ab88 | # GENERATED BY KOMAND SDK - DO NOT EDIT
from .action import CreateIncident
|
py | 7dfbc2989012de8791686779ea79bffe84a124e7 | import sys
import gym
from dqn import Agent
num_episodes = 20
env_name = sys.argv[1] if len(sys.argv) > 1 else "MsPacman-v0"
env = gym.make(env_name)
agent = Agent(state_size=env.observation_space.shape,
number_of_actions=env.action_space.n,
save_name=env_name)
for e in xrange(num_episodes):
observation = env.reset()
done = False
agent.new_episode()
total_cost = 0.0
total_reward = 0.0
frame = 0
while not done:
frame += 1
#env.render()
action, values = agent.act(observation)
#action = env.action_space.sample()
observation, reward, done, info = env.step(action)
total_cost += agent.observe(reward)
total_reward += reward
print "total reward", total_reward
print "mean cost", total_cost/frame
|
py | 7dfbc29e33e204afe7209688b803484fbe16aa2a | """
WSGI config for controle_financeiro project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'controle_financeiro.settings.local')
application = get_wsgi_application()
|
py | 7dfbc2b47a8f8b91132330d326732f639a572224 | import scopyon
import numpy.random
config = scopyon.DefaultConfiguration()
config.default.detector.exposure_time = 33.0e-3
pixel_length = config.default.detector.pixel_length / config.default.magnification
L_2 = config.default.detector.image_size[0] * pixel_length * 0.5
rng = numpy.random.RandomState(123)
N = 100
inputs = rng.uniform(-L_2, +L_2, size=(N, 2))
img = scopyon.form_image(inputs, config=config, rng=rng)
img.save("tirf_000.png")
|
py | 7dfbc343fc97633a793eeeba736032941bdbd877 | #
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Update roles for a user."""
import argparse
import math
import random
from google.cloud import datastore
import common.service_account as sa
DEFAULT_PROJECT = 'eclipse-2017-test'
INVALID_USER = '-1'
def get_arguments():
parser = argparse.ArgumentParser(description='Add a set of fake users.')
parser.add_argument('--project_id', type=str, default=DEFAULT_PROJECT,
help = 'Project ID to add users to')
parser.add_argument('--count', nargs=1, type=int, default = 0,
help = 'Number of fake users to add')
return parser.parse_args()
def get_polygon():
eclipse_points = [[45.27, -123.44], [45.2, -121.84], [45.11, -120.31], [45.01, -118.82], [44.89, -117.39], [44.76, -116.0], [44.61, -114.65], [44.45, -113.34], [44.29, -112.08], [44.11, -110.84], [43.92, -109.64], [43.73, -108.47], [43.52, -107.33], [43.31, -106.22], [43.09, -105.13], [42.85, -104.07], [42.62, -103.03], [42.38, -102.02], [42.12, -101.02], [41.87, -100.04], [41.6, -99.09], [41.33, -98.15], [41.06, -97.23], [40.78, -96.32], [40.49, -95.43], [40.2, -94.56], [39.9, -93.69], [39.6, -92.84], [39.29, -92.01], [38.98, -91.18], [38.66, -90.37], [38.34, -89.56], [38.02, -88.77], [37.69, -87.98], [37.35, -87.2], [37.01, -86.43], [36.67, -85.67], [36.32, -84.91], [35.97, -84.16], [35.61, -83.41], [35.25, -82.67], [34.89, -81.93], [34.52, -81.19], [34.15, -80.46], [32.91, -80.43], [33.28, -81.15], [33.65, -81.88], [34.01, -82.6], [34.37, -83.33], [34.72, -84.06], [35.07, -84.79], [35.41, -85.53], [35.76, -86.27], [36.09, -87.02], [36.42, -87.78], [36.75, -88.54], [37.08, -89.31], [37.4, -90.09], [37.71, -90.88], [38.02, -91.67], [38.33, -92.48], [38.63, -93.29], [38.93, -94.12], [39.22, -94.96], [39.51, -95.82], [39.79, -96.68], [40.07, -97.56], [40.34, -98.46], [40.61, -99.37], [40.87, -100.3], [41.13, -101.25], [41.38, -102.22], [41.62, -103.2], [41.86, -104.21], [42.09, -105.24], [42.31, -106.3], [42.52, -107.38], [42.73, -108.48], [42.93, -109.62], [43.12, -110.78], [43.3, -111.97], [43.48, -113.2], [43.64, -114.47], [43.79, -115.77], [43.93, -117.12], [44.05, -118.51], [44.17, -119.94], [44.27, -121.43], [44.35, -122.98]]
eclipse_poly = [ {'lat': point[0], 'lng': point[1]} for point in eclipse_points ]
return eclipse_poly
def create_user(client, user_id, eclipse_poly):
user_key = client.key("User", user_id)
print user_key.name
user = datastore.Entity(key = user_key)
user['name'] = u"Test User " + user_id
user['email'] = u"test" + user_id + u"@example.com"
# Get random location.
point = eclipse_poly[random.randint(0, len(eclipse_poly) - 1)]
u = float(random.uniform(-1.0, 1.0))
v = float(random.uniform(-1.0, 1.0))
user['geocoded_location'] = [point['lat'] + u, point['lng'] + v]
print point
print u
print v
print user['geocoded_location']
client.put(user)
user_role_key = client.key("UserRole", user_id)
user_role = datastore.Entity(key = user_role_key)
user_role['roles'] = [u"user"]
# make some of them volunteers.
if float(random.uniform(0.0, 1.0)) > 0.8:
user_role['roles'].append(u"volunteer")
client.put(user_role)
return user
def main():
args = get_arguments()
eclipse_poly = get_polygon()
client = datastore.Client(args.project_id)
for x in range(0, args.count[0]):
user = create_user(client, str(random.randint(1, 10000000)), eclipse_poly)
if __name__ == '__main__':
main()
|
py | 7dfbc46885b430d8598808403d5208cdb75ee2ef | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
__all__ = [
'GetComponentResult',
'AwaitableGetComponentResult',
'get_component',
]
@pulumi.output_type
class GetComponentResult:
"""
A collection of values returned by getComponent.
"""
def __init__(__self__, arn=None, change_description=None, data=None, date_created=None, description=None, encrypted=None, id=None, kms_key_id=None, name=None, owner=None, platform=None, supported_os_versions=None, tags=None, type=None, version=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if change_description and not isinstance(change_description, str):
raise TypeError("Expected argument 'change_description' to be a str")
pulumi.set(__self__, "change_description", change_description)
if data and not isinstance(data, str):
raise TypeError("Expected argument 'data' to be a str")
pulumi.set(__self__, "data", data)
if date_created and not isinstance(date_created, str):
raise TypeError("Expected argument 'date_created' to be a str")
pulumi.set(__self__, "date_created", date_created)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if encrypted and not isinstance(encrypted, bool):
raise TypeError("Expected argument 'encrypted' to be a bool")
pulumi.set(__self__, "encrypted", encrypted)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kms_key_id and not isinstance(kms_key_id, str):
raise TypeError("Expected argument 'kms_key_id' to be a str")
pulumi.set(__self__, "kms_key_id", kms_key_id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if owner and not isinstance(owner, str):
raise TypeError("Expected argument 'owner' to be a str")
pulumi.set(__self__, "owner", owner)
if platform and not isinstance(platform, str):
raise TypeError("Expected argument 'platform' to be a str")
pulumi.set(__self__, "platform", platform)
if supported_os_versions and not isinstance(supported_os_versions, list):
raise TypeError("Expected argument 'supported_os_versions' to be a list")
pulumi.set(__self__, "supported_os_versions", supported_os_versions)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if version and not isinstance(version, str):
raise TypeError("Expected argument 'version' to be a str")
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def arn(self) -> str:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="changeDescription")
def change_description(self) -> str:
"""
Change description of the component.
"""
return pulumi.get(self, "change_description")
@property
@pulumi.getter
def data(self) -> str:
"""
Data of the component.
"""
return pulumi.get(self, "data")
@property
@pulumi.getter(name="dateCreated")
def date_created(self) -> str:
"""
Date the component was created.
"""
return pulumi.get(self, "date_created")
@property
@pulumi.getter
def description(self) -> str:
"""
Description of the component.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def encrypted(self) -> bool:
"""
Encryption status of the component.
"""
return pulumi.get(self, "encrypted")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> str:
"""
Amazon Resource Name (ARN) of the Key Management Service (KMS) Key used to encrypt the component.
"""
return pulumi.get(self, "kms_key_id")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the component.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def owner(self) -> str:
"""
Owner of the component.
"""
return pulumi.get(self, "owner")
@property
@pulumi.getter
def platform(self) -> str:
"""
Platform of the component.
"""
return pulumi.get(self, "platform")
@property
@pulumi.getter(name="supportedOsVersions")
def supported_os_versions(self) -> Sequence[str]:
"""
Operating Systems (OSes) supported by the component.
"""
return pulumi.get(self, "supported_os_versions")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
Key-value map of resource tags for the component.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the component.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def version(self) -> str:
"""
Version of the component.
"""
return pulumi.get(self, "version")
class AwaitableGetComponentResult(GetComponentResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetComponentResult(
arn=self.arn,
change_description=self.change_description,
data=self.data,
date_created=self.date_created,
description=self.description,
encrypted=self.encrypted,
id=self.id,
kms_key_id=self.kms_key_id,
name=self.name,
owner=self.owner,
platform=self.platform,
supported_os_versions=self.supported_os_versions,
tags=self.tags,
type=self.type,
version=self.version)
def get_component(arn: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetComponentResult:
"""
Provides details about an Image Builder Component.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.imagebuilder.get_component(arn="arn:aws:imagebuilder:us-west-2:aws:component/amazon-cloudwatch-agent-linux/1.0.0")
```
:param str arn: Amazon Resource Name (ARN) of the component.
:param Mapping[str, str] tags: Key-value map of resource tags for the component.
"""
__args__ = dict()
__args__['arn'] = arn
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:imagebuilder/getComponent:getComponent', __args__, opts=opts, typ=GetComponentResult).value
return AwaitableGetComponentResult(
arn=__ret__.arn,
change_description=__ret__.change_description,
data=__ret__.data,
date_created=__ret__.date_created,
description=__ret__.description,
encrypted=__ret__.encrypted,
id=__ret__.id,
kms_key_id=__ret__.kms_key_id,
name=__ret__.name,
owner=__ret__.owner,
platform=__ret__.platform,
supported_os_versions=__ret__.supported_os_versions,
tags=__ret__.tags,
type=__ret__.type,
version=__ret__.version)
|
py | 7dfbc5d80e230c2086c0910d381c846a0893ab1c | from moxom.helper import Helper
from moxom.runtime_context import create_routines, Routine
import unittest
import sys
from io import StringIO
from typing import Callable, Any
class HelperTest(unittest.TestCase):
def setUp(self) -> None:
self.original_stdout = sys.stdout
self.new_stdout = StringIO()
sys.stdout = self.new_stdout
def tearDown(self) -> None:
sys.stdout = self.original_stdout
def test_help_should_list_all_commands_with(self):
# given
routines = create_routines(
Routine("thing", lambda x: print(x)),
Routine("test", lambda x: print(x))
)
# when
helped_routines = Helper.with_help(routines)
# then
self.assertIn("help", helped_routines.keys())
# when
self.run_until_not_callable(helped_routines["help"].body)
result = self.new_stdout.getvalue().strip().split("\n")
# then
assert "thing" in result
assert "test" in result
def test_help_command_should_print_provided_help(self):
# given
test_help = "Some test help"
routines = Helper.with_help(create_routines(
Routine("thing", lambda x: print(x), help=test_help)
))
# when
self.run_until_not_callable(routines["help"].subroutines["thing"].body())
result = self.new_stdout.getvalue().strip().split("\n")
# then
self.assertIn(test_help, result)
def test_help_command_should_print_help_from_method_comments(self):
# given
test_help = "Some test help"
def some_method(*args):
"""
Some test help
:param args:
:return:
"""
print(*args)
return
routines = Helper.with_help(create_routines(
Routine("test_method", some_method)
))
# when
self.run_until_not_callable(routines["help"].subroutines["test_method"].body())
result = self.new_stdout.getvalue().strip()
# then
self.assertIn(test_help, result)
def run_until_not_callable(self, body: Callable) -> Any:
if isinstance(body, Callable):
return self.run_until_not_callable(body())
else:
return body
|
py | 7dfbc75729f081fd6d2e46e87c1b3428593e6fa2 | from templates.text import TextTemplate
from pogoiv.iv_calculator import IvCalculator
def process(input, data ,sender):
calc = IvCalculator()
if data is not None:
pogodata = data.split(' ')
res = calc.get_ivs(pogodata[1].encode("utf-8"), pogodata[2].encode("utf-8"), pogodata[3].encode("utf-8"), pogodata[4].encode("utf-8"), pogodata[5].encode("utf-8"))
else:
res = "Invalid data input!\n"
res += "e.g check \"Chansey\" 285 271 1900 False"
output = {
'input': input,
'output': TextTemplate(str(res)).get_message(),
'success': True
}
return output
|
py | 7dfbc792fc9a1ff96a96e405efaa2288c140f01e | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for manipulating blocks and transactions."""
from .script import (
CScript,
OP_CHECKSIG,
OP_DUP,
OP_EQUALVERIFY,
OP_HASH160,
OP_RETURN,
OP_TRUE,
)
from .messages import (
CBlock,
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
FromHex,
ToHex,
ser_string,
)
from .txtools import pad_tx
from .util import assert_equal, satoshi_round
# Create a block (with regtest difficulty)
def create_block(hashprev, coinbase, nTime=None):
block = CBlock()
if nTime is None:
import time
block.nTime = int(time.time() + 600)
else:
block.nTime = nTime
block.hashPrevBlock = hashprev
block.nBits = 0x207fffff # Will break after a difficulty adjustment...
block.vtx.append(coinbase)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
def make_conform_to_ctor(block):
for tx in block.vtx:
tx.rehash()
block.vtx = [block.vtx[0]] + \
sorted(block.vtx[1:], key=lambda tx: tx.get_id())
def serialize_script_num(value):
r = bytearray(0)
if value == 0:
return r
neg = value < 0
absvalue = -value if neg else value
while (absvalue):
r.append(int(absvalue & 0xff))
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return r
# Create a coinbase transaction, assuming no miner fees.
# If pubkey is passed in, the coinbase output will be a P2PK output;
# otherwise an anyone-can-spend output.
def create_coinbase(height, pubkey=None):
coinbase = CTransaction()
coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff),
ser_string(serialize_script_num(height)), 0xffffffff))
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = 50 * COIN
halvings = int(height / 150) # regtest
coinbaseoutput.nValue >>= halvings
if (pubkey is not None):
coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG])
else:
coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
coinbase.vout = [coinbaseoutput]
# Make sure the coinbase is at least 100 bytes
pad_tx(coinbase)
coinbase.calc_sha256()
return coinbase
def create_tx_with_script(prevtx, n, script_sig=b"",
amount=1, script_pub_key=CScript()):
"""Return one-input, one-output transaction object
spending the prevtx's n-th output with the given amount.
Can optionally pass scriptPubKey and scriptSig, default is anyone-can-spend output.
"""
tx = CTransaction()
assert(n < len(prevtx.vout))
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), script_sig, 0xffffffff))
tx.vout.append(CTxOut(amount, script_pub_key))
pad_tx(tx)
tx.calc_sha256()
return tx
def create_transaction(node, txid, to_address, amount):
""" Return signed transaction spending the first output of the
input txid. Note that the node must be able to sign for the
output that is being spent, and the node must not be running
multiple wallets.
"""
raw_tx = create_raw_transaction(node, txid, to_address, amount)
tx = FromHex(CTransaction(), raw_tx)
return tx
def create_raw_transaction(node, txid, to_address, amount):
""" Return raw signed transaction spending the first output of the
input txid. Note that the node must be able to sign for the
output that is being spent, and the node must not be running
multiple wallets.
"""
inputs = [{"txid": txid, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransactionwithwallet(rawtx)
assert_equal(signresult["complete"], True)
return signresult['hex']
def get_legacy_sigopcount_block(block, fAccurate=True):
count = 0
for tx in block.vtx:
count += get_legacy_sigopcount_tx(tx, fAccurate)
return count
def get_legacy_sigopcount_tx(tx, fAccurate=True):
count = 0
for i in tx.vout:
count += i.scriptPubKey.GetSigOpCount(fAccurate)
for j in tx.vin:
# scriptSig might be of type bytes, so convert to CScript for the
# moment
count += CScript(j.scriptSig).GetSigOpCount(fAccurate)
return count
def create_confirmed_utxos(node, count, age=101):
"""
Helper to create at least "count" utxos
"""
to_generate = int(0.5 * count) + age
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
outputs[addr1] = satoshi_round(t['amount'] / 2)
outputs[addr2] = satoshi_round(t['amount'] / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
ctx = FromHex(CTransaction(), raw_tx)
fee = node.calculate_fee(ctx) // 2
ctx.vout[0].nValue -= fee
# Due to possible truncation, we go ahead and take another satoshi in
# fees to ensure the transaction gets through
ctx.vout[1].nValue -= fee + 1
signed_tx = node.signrawtransactionwithwallet(ToHex(ctx))["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert len(utxos) >= count
return utxos
def mine_big_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
send_big_transactions(node, utxos, num, 100)
node.generate(1)
def send_big_transactions(node, utxos, num, fee_multiplier):
from .cashaddr import decode
txids = []
padding = "1" * 512
addrHash = decode(node.getnewaddress())[2]
for _ in range(num):
ctx = CTransaction()
utxo = utxos.pop()
txid = int(utxo['txid'], 16)
ctx.vin.append(CTxIn(COutPoint(txid, int(utxo["vout"])), b""))
ctx.vout.append(
CTxOut(int(satoshi_round(utxo['amount'] * COIN)),
CScript([OP_DUP, OP_HASH160, addrHash, OP_EQUALVERIFY, OP_CHECKSIG])))
for i in range(0, 127):
ctx.vout.append(CTxOut(0, CScript(
[OP_RETURN, bytes(padding, 'utf-8')])))
# Create a proper fee for the transaction to be mined
ctx.vout[0].nValue -= int(fee_multiplier * node.calculate_fee(ctx))
signresult = node.signrawtransactionwithwallet(
ToHex(ctx), None, "NONE|FORKID")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.